diff --git a/.gitignore b/.gitignore index 1e6e178e746fadfbfb5e0f02a692d87b8b57a302..704b2e741598a3b32fb91987b25a51edaedf8fff 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,6 @@ debug/ release/ target/ debs/ -deps/ rpms/ mac/ *.pyc diff --git a/README-CN.md b/README-CN.md index 44542747eb15fd279ae35eb437381f717811ec63..f830404af33a52bd7c10ef9b455b08db51a5442a 100644 --- a/README-CN.md +++ b/README-CN.md @@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench ### Ubuntu 18.04 及以上版本 & Debian: ```bash -sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev +sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev ``` #### 为 taos-tools 安装编译需要的软件 @@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ```bash sudo yum install epel-release sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel +sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake ``` ### CentOS 8/Fedora/Rocky Linux ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel +sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel ``` #### 在 CentOS 上构建 taosTools 安装依赖软件 @@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash ### macOS ``` -brew install argp-standalone pkgconfig geos +brew install argp-standalone pkgconfig ``` ### 设置 golang 开发环境 diff --git a/README.md b/README.md index c81e2d309d0177cbc447e379bdbcaf2cc1adfcbb..f477a51a1ff8d1cf45acfbc41cbf2aec1b86ce1a 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t ### Ubuntu 18.04 and above or Debian ```bash -sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev +sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev ``` #### Install build dependencies for taosTools @@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ```bash sudo yum install epel-release sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel +sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake ``` ### CentOS 8/Fedora/Rocky Linux ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel +sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel ``` #### Install build dependencies for taosTools on CentOS @@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash ### macOS ``` -brew install argp-standalone pkgconfig geos +brew install argp-standalone pkgconfig ``` ### Setup golang environment diff --git a/cmake/cmake.define b/cmake/cmake.define index f3caf49da339d0055476df8175a3041ba7ba69e2..cf7f45099414af86a6fe8bccfbbd8bb477c5b711 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.0) -set(CMAKE_VERBOSE_MAKEFILE OFF) +set(CMAKE_VERBOSE_MAKEFILE ON) set(TD_BUILD_TAOSA_INTERNAL FALSE) #set output directory @@ -115,18 +115,6 @@ ELSE () SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}") ENDIF () - IF (${BUILD_SANITIZER}) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") - MESSAGE(STATUS "Compile with Address Sanitizer!") - ELSEIF (${BUILD_RELEASE}) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - ELSE () - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - ENDIF () - # disable all assert IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true")) ADD_DEFINITIONS(-DDISABLE_ASSERT) @@ -168,4 +156,20 @@ ELSE () MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED") ENDIF() + # build mode + SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + + IF (${BUILD_SANITIZER}) + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") + MESSAGE(STATUS "Compile with Address Sanitizer!") + ELSEIF (${BUILD_RELEASE}) + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") + ELSE () + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + ENDIF () + ENDIF () diff --git a/cmake/cmake.platform b/cmake/cmake.platform index ba747c6134c7032239b6d1d7b87186be66257069..25b442ab724c7a38eea076d8253de743a4f93bec 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -172,5 +172,15 @@ ENDIF() MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR}) +set(TD_DEPS_DIR "x86") +if (TD_LINUX) + IF (TD_ARM_64 OR TD_ARM_32) + set(TD_DEPS_DIR "arm") + ELSE() + set(TD_DEPS_DIR "x86") + ENDIF() +endif() +MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR}) + MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})") MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})") diff --git a/cmake/cmake.version b/cmake/cmake.version index 3d0dc80902c5ebd7daf64a91ac37f72fcf7023bf..edc51f206cfeb0ee6eb3d4c92bda64b5af3d6814 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.5.0") + SET(TD_VER_NUMBER "3.0.6.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/rocksdb_CMakeLists.txt.in b/cmake/rocksdb_CMakeLists.txt.in index ba4a404af690b5762985532e801592a679799716..4f75b27f490b3b81fe9f6687f34a1d8297018dd6 100644 --- a/cmake/rocksdb_CMakeLists.txt.in +++ b/cmake/rocksdb_CMakeLists.txt.in @@ -1,5 +1,6 @@ # rocksdb +IF (NOT ${TD_LINUX}) ExternalProject_Add(rocksdb GIT_REPOSITORY https://github.com/facebook/rocksdb.git GIT_TAG v8.1.1 @@ -9,3 +10,5 @@ ExternalProject_Add(rocksdb INSTALL_COMMAND "" TEST_COMMAND "" ) +ENDIF(NOT ${TD_LINUX}) + diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index fdb9f102f079350b1ac25ab91a9a99c6864694c3..1928132210fe8eb8e0168030c117632e7a6ee0c7 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -78,10 +78,18 @@ if(${BUILD_WITH_LEVELDB}) endif(${BUILD_WITH_LEVELDB}) # rocksdb +IF (NOT ${TD_LINUX}) if(${BUILD_WITH_ROCKSDB}) cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) +ELSE() +if(${BUILD_WITH_ROCKSDB}) + #cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_ROCKSDB) +endif(${BUILD_WITH_ROCKSDB}) + +ENDIF(NOT ${TD_LINUX}) # canonical-raft if(${BUILD_WITH_CRAFT}) @@ -175,8 +183,8 @@ if(${BUILD_TEST}) PUBLIC $ ) endif(${TD_DARWIN}) - - + + endif(${BUILD_TEST}) # cJson @@ -227,9 +235,14 @@ endif(${BUILD_WITH_LEVELDB}) # rocksdb # To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev +IF (NOT ${TD_LINUX}) + if(${BUILD_WITH_ROCKSDB}) if(${TD_LINUX}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") + IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") + SET(CMAKE_BUILD_TYPE Release) + endif() endif(${TD_LINUX}) MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) @@ -242,7 +255,7 @@ if(${BUILD_WITH_ROCKSDB}) endif(${TD_DARWIN_ARM64}) if (${TD_WINDOWS}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") endif(${TD_WINDOWS}) @@ -253,19 +266,19 @@ if(${BUILD_WITH_ROCKSDB}) endif(${TD_DARWIN}) if(${TD_WINDOWS}) - option(WITH_JNI "" OFF) + option(WITH_JNI "" OFF) endif(${TD_WINDOWS}) if(${TD_WINDOWS}) option(WITH_MD_LIBRARY "build with MD" OFF) set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) endif(${TD_WINDOWS}) - - + + option(WITH_FALLOCATE "" OFF) option(WITH_JEMALLOC "" OFF) option(WITH_GFLAGS "" OFF) - option(PORTABLE "" OFF) + option(PORTABLE "" ON) option(WITH_LIBURING "" OFF) option(FAIL_ON_WARNINGS OFF) @@ -273,7 +286,7 @@ if(${BUILD_WITH_ROCKSDB}) option(WITH_BENCHMARK_TOOLS "" OFF) option(WITH_TOOLS "" OFF) option(WITH_LIBURING "" OFF) - IF (TD_LINUX) + IF (TD_LINUX) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) ELSE() option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) @@ -285,16 +298,17 @@ if(${BUILD_WITH_ROCKSDB}) ) endif(${BUILD_WITH_ROCKSDB}) +ENDIF(NOT ${TD_LINUX}) # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) option(ENABLE_TEST "Enable the tests" OFF) add_subdirectory(lucene EXCLUDE_FROM_ALL) target_include_directories( - lucene++ + lucene++ PUBLIC $ - ) - + ) + endif(${BUILD_WITH_LUCENE}) # NuRaft @@ -354,7 +368,7 @@ if(${BUILD_MSVCREGEX}) target_include_directories(msvcregex PRIVATE "msvcregex" ) - target_link_libraries(msvcregex + target_link_libraries(msvcregex INTERFACE Shell32 ) SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex) @@ -414,8 +428,8 @@ if(${BUILD_WITH_BDB}) IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/bdb/libdb.a" INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/bdb" ) - target_link_libraries(bdb - INTERFACE pthread + target_link_libraries(bdb + INTERFACE pthread ) endif(${BUILD_WITH_BDB}) @@ -427,12 +441,12 @@ if(${BUILD_WITH_SQLITE}) IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/sqlite/.libs/libsqlite3.a" INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/sqlite" ) - target_link_libraries(sqlite - INTERFACE m - INTERFACE pthread + target_link_libraries(sqlite + INTERFACE m + INTERFACE pthread ) if(NOT TD_WINDOWS) - target_link_libraries(sqlite + target_link_libraries(sqlite INTERFACE dl ) endif(NOT TD_WINDOWS) @@ -440,22 +454,22 @@ endif(${BUILD_WITH_SQLITE}) # addr2line if(${BUILD_ADDR2LINE}) - if(NOT ${TD_WINDOWS}) - check_include_file( "sys/types.h" HAVE_SYS_TYPES_H) - check_include_file( "sys/stat.h" HAVE_SYS_STAT_H ) - check_include_file( "inttypes.h" HAVE_INTTYPES_H ) - check_include_file( "stddef.h" HAVE_STDDEF_H ) - check_include_file( "stdlib.h" HAVE_STDLIB_H ) - check_include_file( "string.h" HAVE_STRING_H ) - check_include_file( "memory.h" HAVE_MEMORY_H ) - check_include_file( "strings.h" HAVE_STRINGS_H ) + if(NOT ${TD_WINDOWS}) + check_include_file( "sys/types.h" HAVE_SYS_TYPES_H) + check_include_file( "sys/stat.h" HAVE_SYS_STAT_H ) + check_include_file( "inttypes.h" HAVE_INTTYPES_H ) + check_include_file( "stddef.h" HAVE_STDDEF_H ) + check_include_file( "stdlib.h" HAVE_STDLIB_H ) + check_include_file( "string.h" HAVE_STRING_H ) + check_include_file( "memory.h" HAVE_MEMORY_H ) + check_include_file( "strings.h" HAVE_STRINGS_H ) check_include_file( "stdint.h" HAVE_STDINT_H ) check_include_file( "unistd.h" HAVE_UNISTD_H ) check_include_file( "sgidefs.h" HAVE_SGIDEFS_H ) check_include_file( "stdafx.h" HAVE_STDAFX_H ) - check_include_file( "elf.h" HAVE_ELF_H ) - check_include_file( "libelf.h" HAVE_LIBELF_H ) - check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H) + check_include_file( "elf.h" HAVE_ELF_H ) + check_include_file( "libelf.h" HAVE_LIBELF_H ) + check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H) check_include_file( "alloca.h" HAVE_ALLOCA_H ) check_include_file( "elfaccess.h" HAVE_ELFACCESS_H) check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H ) @@ -463,7 +477,7 @@ if(${BUILD_ADDR2LINE}) check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H) check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H ) set(VERSION 0.3.1) - set(PACKAGE_VERSION "\"${VERSION}\"") + set(PACKAGE_VERSION "\"${VERSION}\"") configure_file(libdwarf/cmake/config.h.cmake config.h) file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c") add_library(libdwarf STATIC ${LIBDWARF_SOURCES}) @@ -485,8 +499,16 @@ endif(${BUILD_ADDR2LINE}) # geos if(${BUILD_GEOS}) + if(${TD_LINUX}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") + IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") + SET(CMAKE_BUILD_TYPE Release) + endif() + endif(${TD_LINUX}) option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF) add_subdirectory(geos EXCLUDE_FROM_ALL) + unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD target_include_directories( geos_c PUBLIC $ diff --git a/deps/arm/rocksdb_static/librocksdb.a b/deps/arm/rocksdb_static/librocksdb.a new file mode 100644 index 0000000000000000000000000000000000000000..dc7f37b0f308431c96327d7a3a61f612673e3437 Binary files /dev/null and b/deps/arm/rocksdb_static/librocksdb.a differ diff --git a/deps/arm/rocksdb_static/rocksdb/c.h b/deps/arm/rocksdb_static/rocksdb/c.h new file mode 100644 index 0000000000000000000000000000000000000000..1ba7fabefe92f674ecbd27a91bfbb57b0ed17d23 --- /dev/null +++ b/deps/arm/rocksdb_static/rocksdb/c.h @@ -0,0 +1,2844 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. + + C bindings for rocksdb. May be useful as a stable ABI that can be + used by programs that keep rocksdb in a shared library, or for + a JNI api. + + Does not support: + . getters for the option types + . custom comparators that implement key shortening + . capturing post-write-snapshot + . custom iter, db, env, cache implementations using just the C bindings + + Some conventions: + + (1) We expose just opaque struct pointers and functions to clients. + This allows us to change internal representations without having to + recompile clients. + + (2) For simplicity, there is no equivalent to the Slice type. Instead, + the caller has to pass the pointer and length as separate + arguments. + + (3) Errors are represented by a null-terminated c string. NULL + means no error. All operations that can raise an error are passed + a "char** errptr" as the last argument. One of the following must + be true on entry: + *errptr == NULL + *errptr points to a malloc()ed null-terminated error message + On success, a leveldb routine leaves *errptr unchanged. + On failure, leveldb frees the old value of *errptr and + set *errptr to a malloc()ed error message. + + (4) Bools have the type unsigned char (0 == false; rest == true) + + (5) All of the pointer arguments must be non-NULL. +*/ + +#pragma once + +#ifdef _WIN32 +#ifdef ROCKSDB_DLL +#ifdef ROCKSDB_LIBRARY_EXPORTS +#define ROCKSDB_LIBRARY_API __declspec(dllexport) +#else +#define ROCKSDB_LIBRARY_API __declspec(dllimport) +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/* Exported types */ + +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; +typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_backup_engine_options_t rocksdb_backup_engine_options_t; +typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; +typedef struct rocksdb_memory_allocator_t rocksdb_memory_allocator_t; +typedef struct rocksdb_lru_cache_options_t rocksdb_lru_cache_options_t; +typedef struct rocksdb_hyper_clock_cache_options_t + rocksdb_hyper_clock_cache_options_t; +typedef struct rocksdb_cache_t rocksdb_cache_t; +typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; +typedef struct rocksdb_compactionfiltercontext_t + rocksdb_compactionfiltercontext_t; +typedef struct rocksdb_compactionfilterfactory_t + rocksdb_compactionfilterfactory_t; +typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; +typedef struct rocksdb_env_t rocksdb_env_t; +typedef struct rocksdb_fifo_compaction_options_t + rocksdb_fifo_compaction_options_t; +typedef struct rocksdb_filelock_t rocksdb_filelock_t; +typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; +typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; +typedef struct rocksdb_logger_t rocksdb_logger_t; +typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t; +typedef struct rocksdb_block_based_table_options_t + rocksdb_block_based_table_options_t; +typedef struct rocksdb_cuckoo_table_options_t rocksdb_cuckoo_table_options_t; +typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; +typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; +typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; +typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; +typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; +typedef struct rocksdb_writebatch_wi_t rocksdb_writebatch_wi_t; +typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; +typedef struct rocksdb_universal_compaction_options_t + rocksdb_universal_compaction_options_t; +typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; +typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; +typedef struct rocksdb_column_family_metadata_t + rocksdb_column_family_metadata_t; +typedef struct rocksdb_level_metadata_t rocksdb_level_metadata_t; +typedef struct rocksdb_sst_file_metadata_t rocksdb_sst_file_metadata_t; +typedef struct rocksdb_envoptions_t rocksdb_envoptions_t; +typedef struct rocksdb_ingestexternalfileoptions_t + rocksdb_ingestexternalfileoptions_t; +typedef struct rocksdb_sstfilewriter_t rocksdb_sstfilewriter_t; +typedef struct rocksdb_ratelimiter_t rocksdb_ratelimiter_t; +typedef struct rocksdb_perfcontext_t rocksdb_perfcontext_t; +typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; +typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; +typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; +typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; +typedef struct rocksdb_optimistictransactiondb_t + rocksdb_optimistictransactiondb_t; +typedef struct rocksdb_optimistictransaction_options_t + rocksdb_optimistictransaction_options_t; +typedef struct rocksdb_transaction_t rocksdb_transaction_t; +typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; +typedef struct rocksdb_wal_iterator_t rocksdb_wal_iterator_t; +typedef struct rocksdb_wal_readoptions_t rocksdb_wal_readoptions_t; +typedef struct rocksdb_memory_consumers_t rocksdb_memory_consumers_t; +typedef struct rocksdb_memory_usage_t rocksdb_memory_usage_t; + +/* DB operations */ + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_with_ttl( + const rocksdb_options_t* options, const char* name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, const char* name, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* +rocksdb_backup_engine_open_opts(const rocksdb_backup_engine_options_t* options, + rocksdb_env_t* env, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( + rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup_flush( + rocksdb_backup_engine_t* be, rocksdb_t* db, + unsigned char flush_before_backup, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( + rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* +rocksdb_restore_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( + rocksdb_restore_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( + rocksdb_restore_options_t* opt, int v); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_verify_backup( + rocksdb_backup_engine_t* be, uint32_t backup_id, char** errptr); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* +rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API int64_t rocksdb_backup_engine_info_timestamp( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_backup_id( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_backup_engine_info_size( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( + rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_increase_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* ts_low, size_t ts_lowlen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + size_t* ts_lowlen, char** errptr); + +/* BackupEngineOptions */ + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_options_t* +rocksdb_backup_engine_options_create(const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_backup_dir( + rocksdb_backup_engine_options_t* options, const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_env( + rocksdb_backup_engine_options_t* options, rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_table_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_share_table_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_sync( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_backup_engine_options_get_sync( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_destroy_old_data( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_destroy_old_data( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_log_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_backup_log_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_backup_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_restore_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_restore_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_background_operations( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_background_operations( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options, uint64_t size); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_destroy( + rocksdb_backup_engine_options_t*); + +/* Checkpoint */ + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create( + rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir, + uint64_t log_size_for_flush, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy( + rocksdb_checkpoint_t* checkpoint); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_and_trim_history( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char* trim_ts, + size_t trim_tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families_with_ttl( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, const int* ttls, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary_column_families( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( + const rocksdb_options_t* options, const char* name, size_t* lencf, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( + char** list, size_t len); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family_with_ttl( + rocksdb_t* db, const rocksdb_options_t* column_family_options, + const char* column_family_name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t*); + +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_column_family_handle_get_id(rocksdb_column_family_handle_t* handle); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_handle_get_name( + rocksdb_column_family_handle_t* handle, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_put( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_range_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* start_key, + size_t start_key_len, const char* end_key, size_t end_key_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_write( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +/* Returns NULL if not found. A malloc()ed array otherwise. + Stores the length of the array in *vallen. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +// if values_list[i] == NULL and errs[i] == NULL, +// then we got status.IsNotFound(), which we will not return. +// all errors except status status.ok() and status.IsNotFound() are returned. +// +// errs, values_list and values_list_sizes must be num_keys in length, +// allocated by the caller. +// errs is a list of strings as opposed to the conventional one error, +// where errs[i] is the status for retrieval of keys_list[i]. +// each non-NULL errs entry is a malloc()ed, null terminated string. +// each non-NULL values_list entry is a malloc()ed array, with +// the length for each stored in values_list_sizes[i]. +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** timestamp_list, + size_t* timestamp_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** timestamps_list, + size_t* timestamps_list_sizes, char** errs); + +// The MultiGet API that improves performance by batching operations +// in the read path for greater efficiency. Currently, only the block based +// table format with full filters are supported. Other table formats such +// as plain table, block based table with block based filters and +// partitioned indexes will still work, but will not get any performance +// benefits. +// +// Note that all the keys passed to this API are restricted to a single +// column family. +// +// Parameters - +// db - the RocksDB instance. +// options - ReadOptions +// column_family - ColumnFamilyHandle* that the keys belong to. All the keys +// passed to the API are restricted to a single column family +// num_keys - Number of keys to lookup +// keys_list - Pointer to C style array of keys with num_keys elements +// keys_list_sizes - Pointer to C style array of the size of corresponding key +// in key_list with num_keys elements. +// values - Pointer to C style array of PinnableSlices with num_keys elements +// statuses - Pointer to C style array of Status with num_keys elements +// sorted_input - If true, it means the input keys are already sorted by key +// order, so the MultiGet() API doesn't have to sort them +// again. If false, the keys will be copied and sorted +// internally by the API - the input array will not be +// modified +extern ROCKSDB_LIBRARY_API void rocksdb_batched_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + rocksdb_pinnableslice_t** values, char** errs, const bool sorted_input); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_wal_iterator_t* rocksdb_get_updates_since( + rocksdb_t* db, uint64_t seq_number, + const rocksdb_wal_readoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( + rocksdb_t* db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( + rocksdb_t* db, const rocksdb_snapshot_t* snapshot); + +/* Returns NULL if property name is unknown. + Else returns a pointer to a malloc()-ed null-terminated value. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, + const char* propname); +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int(rocksdb_t* db, + const char* propname, + uint64_t* out_val); + +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( + rocksdb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, + const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt( + rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key, + size_t start_key_len, const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, + const char* name); + +extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush( + rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cf( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cfs( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_family, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_wal(rocksdb_t* db, + unsigned char sync, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions( + rocksdb_t* db, unsigned char force, char** errptr); + +/* Management operations */ + +extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +/* Iterator */ + +extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( + const rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, + const char* k, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*, + const char* k, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( + const rocksdb_iterator_t*, size_t* klen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( + const rocksdb_iterator_t*, size_t* vlen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_timestamp( + const rocksdb_iterator_t*, size_t* tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( + const rocksdb_iterator_t*, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_next( + rocksdb_wal_iterator_t* iter); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_wal_iter_valid( + const rocksdb_wal_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_status( + const rocksdb_wal_iterator_t* iter, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_wal_iter_get_batch( + const rocksdb_wal_iterator_t* iter, uint64_t* seq); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_get_latest_sequence_number(rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_destroy( + const rocksdb_wal_iterator_t* iter); + +/* Write batch */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create( + void); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( + const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, + const char* key, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete( + rocksdb_writebatch_t* b, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf_with_ts( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range( + rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev( + rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( + rocksdb_writebatch_t*, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( + rocksdb_writebatch_t*, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point( + rocksdb_writebatch_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point( + rocksdb_writebatch_t*, char** errptr); + +/* Write batch with index */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create(size_t reserved_bytes, + unsigned char overwrite_keys); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create_from(const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count( + rocksdb_writebatch_wi_t* b); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_range is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range( + rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_range_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev( + rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* start_keys_list, const size_t* start_keys_list_sizes, + const char* const* end_keys_list, const size_t* end_keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data( + rocksdb_writebatch_wi_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate( + rocksdb_writebatch_wi_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data( + rocksdb_writebatch_wi_t* b, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point( + rocksdb_writebatch_wi_t*, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + const char* key, size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, const char* key, size_t keylen, + size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_wi_t* wbwi, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator, + rocksdb_column_family_handle_t* cf); + +/* Options utils */ + +// Load the latest rocksdb options from the specified db_path. +// +// On success, num_column_families will be updated with a non-zero +// number indicating the number of column families. +// The returned db_options, column_family_names, and column_family_options +// should be released via rocksdb_load_latest_options_destroy(). +// +// On error, a non-null errptr that includes the error message will be +// returned. db_options, column_family_names, and column_family_options +// will be set to NULL. +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options( + const char* db_path, rocksdb_env_t* env, bool ignore_unknown_options, + rocksdb_cache_t* cache, rocksdb_options_t** db_options, + size_t* num_column_families, char*** column_family_names, + rocksdb_options_t*** column_family_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options_destroy( + rocksdb_options_t* db_options, char** list_column_family_names, + rocksdb_options_t** list_column_family_options, size_t len); + +/* Block based table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_checksum( + rocksdb_block_based_table_options_t*, char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_index_block_restart_interval( + rocksdb_block_based_table_options_t* options, + int index_block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_metadata_block_size( + rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_partition_filters( + rocksdb_block_based_table_options_t* options, + unsigned char partition_filters); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_optimize_filters_for_memory( + rocksdb_block_based_table_options_t* options, + unsigned char optimize_filters_for_memory); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_use_delta_encoding( + rocksdb_block_based_table_options_t* options, + unsigned char use_delta_encoding); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t*, int); +enum { + rocksdb_block_based_table_index_type_binary_search = 0, + rocksdb_block_based_table_index_type_hash_search = 1, + rocksdb_block_based_table_index_type_two_level_index_search = 2, +}; +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +enum { + rocksdb_block_based_table_data_block_index_type_binary_search = 0, + rocksdb_block_based_table_data_block_index_type_binary_search_and_hash = 1, +}; +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_hash_ratio( + rocksdb_block_based_table_options_t* options, double v); +// rocksdb_block_based_options_set_hash_index_allow_collision() +// is removed since BlockBasedTableOptions.hash_index_allow_collision() +// is removed +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_top_level_index_and_filter( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); + +/* Cuckoo table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void +rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); + +/* Options */ +extern ROCKSDB_LIBRARY_API void rocksdb_set_options(rocksdb_t* db, int count, + const char* const keys[], + const char* const values[], + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_set_options_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, + const char* const keys[], const char* const values[], char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create_copy( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_ingest_behind( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_ingest_behind(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( + rocksdb_options_t*, rocksdb_compactionfilter_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_compaction_readahead_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( + rocksdb_options_t*, rocksdb_comparator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( + rocksdb_options_t*, rocksdb_mergeoperator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( + rocksdb_options_t* opt, const int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_create_if_missing( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_create_missing_column_families(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_error_if_exists( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_paranoid_checks( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths( + rocksdb_options_t*, const rocksdb_dbpath_t** path_values, size_t num_paths); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, + rocksdb_env_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, + rocksdb_logger_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_info_log_level( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_db_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_open_files( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_file_opening_threads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( + rocksdb_options_t* opt, uint64_t n); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( + rocksdb_options_t*, int, int, int, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_zstd_max_train_bytes(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_zstd_max_train_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_parallel_threads(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_parallel_threads( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_compression_options_max_dict_buffer_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options(rocksdb_options_t*, int, int, + int, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes( + rocksdb_options_t*, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( + rocksdb_options_t*, rocksdb_slicetransform_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_num_levels( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_file_num_compaction_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_level0_stop_writes_trigger( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_target_file_size_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_target_file_size_multiplier( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_level_compaction_dynamic_level_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_max_bytes_for_level_multiplier(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t*, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, + unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_stats_update_on_db_open(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt); + +/* Blob Options Settings */ +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_files( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_files( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_blob_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_min_blob_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_file_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_compression_type( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_compression_type( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_gc( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_gc( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_age_cutoff( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_age_cutoff( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_force_threshold( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_force_threshold( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_blob_compaction_readahead_size(rocksdb_options_t* opt, + uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_compaction_readahead_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_starting_level( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_file_starting_level( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_cache( + rocksdb_options_t* opt, rocksdb_cache_t* blob_cache); + +enum { + rocksdb_prepopulate_blob_disable = 0, + rocksdb_prepopulate_blob_flush_only = 1 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prepopulate_blob_cache( + rocksdb_options_t* opt, int val); + +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_prepopulate_blob_cache( + rocksdb_options_t* opt); + +/* returns a pointer to a malloc()-ed, null terminated string */ +extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_write_buffer_number( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_min_write_buffer_number_to_merge(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_max_write_buffer_number_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_size_to_maintain(rocksdb_options_t*, + int64_t); +extern ROCKSDB_LIBRARY_API int64_t +rocksdb_options_get_max_write_buffer_size_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_pipelined_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_pipelined_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_unordered_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_unordered_write( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_subcompactions( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_max_subcompactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( + rocksdb_options_t*, const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, + const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_prefix_bloom_size_ratio(rocksdb_options_t*, + double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t*, size_t, int32_t, int32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( + rocksdb_options_t*, uint32_t, int, double, size_t); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( + rocksdb_options_t* opt, int level); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_avoid_unnecessary_blocking_io(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_avoid_unnecessary_blocking_io(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_experimental_mempurge_threshold(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_experimental_mempurge_threshold(rocksdb_options_t*); + +enum { + rocksdb_tolerate_corrupted_tail_records_recovery = 0, + rocksdb_absolute_consistency_recovery = 1, + rocksdb_point_in_time_recovery = 2, + rocksdb_skip_any_corrupted_records_recovery = 3 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); + +enum { + rocksdb_no_compression = 0, + rocksdb_snappy_compression = 1, + rocksdb_zlib_compression = 2, + rocksdb_bz2_compression = 3, + rocksdb_lz4_compression = 4, + rocksdb_lz4hc_compression = 5, + rocksdb_xpress_compression = 6, + rocksdb_zstd_compression = 7 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); + +enum { + rocksdb_level_compaction = 0, + rocksdb_universal_compaction = 1, + rocksdb_fifo_compaction = 2 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_universal_compaction_options( + rocksdb_options_t*, rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( + rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( + rocksdb_options_t* opt, rocksdb_cache_t* cache); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_add_compact_on_deletion_collector_factory( + rocksdb_options_t*, size_t window_size, size_t num_dels_trigger); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manual_wal_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_manual_wal_flush( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_compression( + rocksdb_options_t* opt, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_compression( + rocksdb_options_t* opt); + +/* RateLimiter */ +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( + int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness); +extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy( + rocksdb_ratelimiter_t*); + +/* PerfContext */ +enum { + rocksdb_uninitialized = 0, + rocksdb_disable = 1, + rocksdb_enable_count = 2, + rocksdb_enable_time_except_for_mutex = 3, + rocksdb_enable_time = 4, + rocksdb_out_of_bounds = 5 +}; + +enum { + rocksdb_user_key_comparison_count = 0, + rocksdb_block_cache_hit_count, + rocksdb_block_read_count, + rocksdb_block_read_byte, + rocksdb_block_read_time, + rocksdb_block_checksum_time, + rocksdb_block_decompress_time, + rocksdb_get_read_bytes, + rocksdb_multiget_read_bytes, + rocksdb_iter_read_bytes, + rocksdb_internal_key_skipped_count, + rocksdb_internal_delete_skipped_count, + rocksdb_internal_recent_skipped_count, + rocksdb_internal_merge_count, + rocksdb_get_snapshot_time, + rocksdb_get_from_memtable_time, + rocksdb_get_from_memtable_count, + rocksdb_get_post_process_time, + rocksdb_get_from_output_files_time, + rocksdb_seek_on_memtable_time, + rocksdb_seek_on_memtable_count, + rocksdb_next_on_memtable_count, + rocksdb_prev_on_memtable_count, + rocksdb_seek_child_seek_time, + rocksdb_seek_child_seek_count, + rocksdb_seek_min_heap_time, + rocksdb_seek_max_heap_time, + rocksdb_seek_internal_seek_time, + rocksdb_find_next_user_entry_time, + rocksdb_write_wal_time, + rocksdb_write_memtable_time, + rocksdb_write_delay_time, + rocksdb_write_pre_and_post_process_time, + rocksdb_db_mutex_lock_nanos, + rocksdb_db_condition_wait_nanos, + rocksdb_merge_operator_time_nanos, + rocksdb_read_index_block_nanos, + rocksdb_read_filter_block_nanos, + rocksdb_new_table_block_iter_nanos, + rocksdb_new_table_iterator_nanos, + rocksdb_block_seek_nanos, + rocksdb_find_table_nanos, + rocksdb_bloom_memtable_hit_count, + rocksdb_bloom_memtable_miss_count, + rocksdb_bloom_sst_hit_count, + rocksdb_bloom_sst_miss_count, + rocksdb_key_lock_wait_time, + rocksdb_key_lock_wait_count, + rocksdb_env_new_sequential_file_nanos, + rocksdb_env_new_random_access_file_nanos, + rocksdb_env_new_writable_file_nanos, + rocksdb_env_reuse_writable_file_nanos, + rocksdb_env_new_random_rw_file_nanos, + rocksdb_env_new_directory_nanos, + rocksdb_env_file_exists_nanos, + rocksdb_env_get_children_nanos, + rocksdb_env_get_children_file_attributes_nanos, + rocksdb_env_delete_file_nanos, + rocksdb_env_create_dir_nanos, + rocksdb_env_create_dir_if_missing_nanos, + rocksdb_env_delete_dir_nanos, + rocksdb_env_get_file_size_nanos, + rocksdb_env_get_file_modification_time_nanos, + rocksdb_env_rename_file_nanos, + rocksdb_env_link_file_nanos, + rocksdb_env_lock_file_nanos, + rocksdb_env_unlock_file_nanos, + rocksdb_env_new_logger_nanos, + rocksdb_number_async_seek, + rocksdb_blob_cache_hit_count, + rocksdb_blob_read_count, + rocksdb_blob_read_byte, + rocksdb_blob_read_time, + rocksdb_blob_checksum_time, + rocksdb_blob_decompress_time, + rocksdb_internal_range_del_reseek_count, + rocksdb_total_metric_count = 78 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_set_perf_level(int); +extern ROCKSDB_LIBRARY_API rocksdb_perfcontext_t* rocksdb_perfcontext_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_reset( + rocksdb_perfcontext_t* context); +extern ROCKSDB_LIBRARY_API char* rocksdb_perfcontext_report( + rocksdb_perfcontext_t* context, unsigned char exclude_zero_counters); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context, int metric); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_destroy( + rocksdb_perfcontext_t* context); + +/* Compaction Filter */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* +rocksdb_compactionfilter_create( + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots( + rocksdb_compactionfilter_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( + rocksdb_compactionfilter_t*); + +/* Compaction Filter Context */ + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context); + +/* Compaction Filter Factory */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* +rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t*); + +/* Comparator */ + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( + rocksdb_comparator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* +rocksdb_comparator_with_ts_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + int (*compare_ts)(void*, const char* a_ts, size_t a_tslen, const char* b_ts, + size_t b_tslen), + int (*compare_without_ts)(void*, const char* a, size_t alen, + unsigned char a_has_ts, const char* b, + size_t blen, unsigned char b_has_ts), + const char* (*name)(void*), size_t timestamp_size); + +/* Filter policy */ + +extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( + rocksdb_filterpolicy_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom_full(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon(double bloom_equivalent_bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon_hybrid(double bloom_equivalent_bits_per_key, + int bloom_before_level); + +/* Merge Operator */ + +extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* +rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( + rocksdb_mergeoperator_t*); + +/* Read options */ + +extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_verify_checksums(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_fill_cache( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t*, const rocksdb_snapshot_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_lower_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_readoptions_get_read_tier( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_tailing( + rocksdb_readoptions_t*); +// The functionality that this option controlled has been removed. +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_managed( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_prefix_same_as_start( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_prefix_same_as_start(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_pin_data( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_total_order_seek(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_max_skippable_internal_keys(rocksdb_readoptions_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_max_skippable_internal_keys(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_ignore_range_deletions( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_ignore_range_deletions(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_deadline( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_deadline(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_io_timeout( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_io_timeout(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_async_io( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_async_io( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_timestamp( + rocksdb_readoptions_t*, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iter_start_ts( + rocksdb_readoptions_t*, const char* ts, size_t tslen); + +/* Write options */ + +extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* rocksdb_writeoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_sync( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( + rocksdb_writeoptions_t* opt, int disable); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_disable_WAL( + rocksdb_writeoptions_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_ignore_missing_column_families(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_ignore_missing_column_families( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_no_slowdown( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_no_slowdown( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_low_pri( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_low_pri( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_memtable_insert_hint_per_batch(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_memtable_insert_hint_per_batch( + rocksdb_writeoptions_t*); + +/* Compact range options */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t* +rocksdb_compactoptions_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_exclusive_manual_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_exclusive_manual_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_bottommost_level_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_bottommost_level_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_change_level(rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level( + rocksdb_compactoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_compactoptions_get_target_level( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_full_history_ts_low( + rocksdb_compactoptions_t*, char* ts, size_t tslen); + +/* Flush options */ + +extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* rocksdb_flushoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( + rocksdb_flushoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_flushoptions_get_wait( + rocksdb_flushoptions_t*); + +/* Memory allocator */ + +extern ROCKSDB_LIBRARY_API rocksdb_memory_allocator_t* +rocksdb_jemalloc_nodump_allocator_create(char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_allocator_destroy( + rocksdb_memory_allocator_t*); + +/* Cache */ + +extern ROCKSDB_LIBRARY_API rocksdb_lru_cache_options_t* +rocksdb_lru_cache_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_destroy( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_capacity( + rocksdb_lru_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_num_shard_bits( + rocksdb_lru_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_memory_allocator( + rocksdb_lru_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( + size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_lru_with_strict_capacity_limit(size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru_opts( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_disown_data( + rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( + rocksdb_cache_t* cache, size_t capacity); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_capacity(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_usage(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache); + +/* HyperClockCache */ +extern ROCKSDB_LIBRARY_API rocksdb_hyper_clock_cache_options_t* +rocksdb_hyper_clock_cache_options_create(size_t capacity, + size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_destroy( + rocksdb_hyper_clock_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_set_capacity( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_estimated_entry_charge( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_num_shard_bits( + rocksdb_hyper_clock_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_memory_allocator( + rocksdb_hyper_clock_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_hyper_clock( + size_t capacity, size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_hyper_clock_opts(rocksdb_hyper_clock_cache_options_t*); + +/* DBPath */ + +extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create( + const char* path, uint64_t target_size); +extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); + +/* Env */ + +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(void); +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(void); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_high_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_low_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int +rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_cpu_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy( + rocksdb_envoptions_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_create_dir_if_missing( + rocksdb_env_t* env, const char* path, char** errptr); + +/* SstFile */ + +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env, + const rocksdb_options_t* io_options); +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create_with_comparator( + const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, + const rocksdb_comparator_t* comparator); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open( + rocksdb_sstfilewriter_t* writer, const char* name, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_range( + rocksdb_sstfilewriter_t* writer, const char* begin_key, size_t begin_keylen, + const char* end_key, size_t end_keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish( + rocksdb_sstfilewriter_t* writer, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_file_size( + rocksdb_sstfilewriter_t* writer, uint64_t* file_size); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy( + rocksdb_sstfilewriter_t* writer); +extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t* +rocksdb_ingestexternalfileoptions_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_move_files( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_snapshot_consistency( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char snapshot_consistency); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_global_seqno( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_blocking_flush( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char allow_blocking_flush); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_ingest_behind( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char ingest_behind); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_fail_if_not_bottommost_level( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char fail_if_not_bottommost_level); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy( + rocksdb_ingestexternalfileoptions_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file( + rocksdb_t* db, const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, + const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_try_catch_up_with_primary( + rocksdb_t* db, char** errptr); + +/* SliceTransform */ + +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create( + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* + rocksdb_slicetransform_create_fixed_prefix(size_t); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create_noop(void); +extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( + rocksdb_slicetransform_t*); + +/* Universal Compaction options */ + +enum { + rocksdb_similar_size_compaction_stop_style = 0, + rocksdb_total_size_compaction_stop_style = 1 +}; + +extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_size_ratio( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_min_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_compression_size_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_stop_style( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* +rocksdb_fifo_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts, unsigned char allow_compaction); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_fifo_compaction_options_get_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_fifo_compaction_options_get_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts); + +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( + const rocksdb_livefiles_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_column_family_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_entries(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_deletions(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t*); + +/* Utility Helpers */ + +extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( + const rocksdb_options_t* base_options, const char* opts_str, + rocksdb_options_t* new_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +/* MetaData */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata(rocksdb_t* db); + +/** + * Returns the rocksdb_column_family_metadata_t of the specified + * column family. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_column_family_metadata_destroy. + */ +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_metadata_destroy( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_column_family_metadata_get_size( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t rocksdb_column_family_metadata_get_file_count( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_metadata_get_name( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_column_family_metadata_get_level_count( + rocksdb_column_family_metadata_t* cf_meta); + +/** + * Returns the rocksdb_level_metadata_t of the ith level from the specified + * column family metadata. + * + * If the specified i is greater than or equal to the number of levels + * in the specified column family, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_level_metadata_destroy before releasing its parent + * rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_level_metadata_t* +rocksdb_column_family_metadata_get_level_metadata( + rocksdb_column_family_metadata_t* cf_meta, size_t i); + +/** + * Releases the specified rocksdb_level_metadata_t. + * + * Note that the specified rocksdb_level_metadata_t must be released + * before the release of its parent rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_level_metadata_destroy( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API int rocksdb_level_metadata_get_level( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_level_metadata_get_size(rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_level_metadata_get_file_count(rocksdb_level_metadata_t* level_meta); + +/** + * Returns the sst_file_metadata_t of the ith file from the specified level + * metadata. + * + * If the specified i is greater than or equal to the number of files + * in the specified level, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_sst_file_metadata_destroy before releasing its + * parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_sst_file_metadata_t* +rocksdb_level_metadata_get_sst_file_metadata( + rocksdb_level_metadata_t* level_meta, size_t i); + +/** + * Releases the specified rocksdb_sst_file_metadata_t. + * + * Note that the specified rocksdb_sst_file_metadata_t must be released + * before the release of its parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_sst_file_metadata_destroy( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API char* +rocksdb_sst_file_metadata_get_relative_filename( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_sst_file_metadata_get_size(rocksdb_sst_file_metadata_t* file_meta); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_smallestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_largestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/* Transactions */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* +rocksdb_transactiondb_open_column_families( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + int num_column_families, const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot( + rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_property_value( + rocksdb_transactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_transactiondb_property_int( + rocksdb_transactiondb_t* db, const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin( + rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_transaction_options_t* txn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t** +rocksdb_transactiondb_get_prepared_transactions(rocksdb_transactiondb_t* txn_db, + size_t* cnt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_name( + rocksdb_transaction_t* txn, const char* name, size_t name_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_name( + rocksdb_transaction_t* txn, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_prepare( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_savepoint( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback_to_savepoint( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_transaction_get_writebatch_wi(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch( + rocksdb_transaction_t* txn, rocksdb_writebatch_t* writebatch, + char** errptr); + +// This rocksdb_writebatch_wi_t should be freed with rocksdb_free +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch_wi( + rocksdb_transaction_t* txn, rocksdb_writebatch_wi_t* wi, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_commit_timestamp( + rocksdb_transaction_t* txn, uint64_t commit_timestamp); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_set_read_timestamp_for_validation( + rocksdb_transaction_t* txn, uint64_t read_timestamp); + +// This snapshot should be freed using rocksdb_free +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_cf(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, + unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( + rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( + rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cfs( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_families, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_wal( + rocksdb_transactiondb_t* txn_db, unsigned char sync, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, + const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_optimistictransactiondb_get_base_db( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close_base_db( + rocksdb_t* base_db); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* +rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_write( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_optimistictransactiondb_checkpoint_object_create( + rocksdb_optimistictransactiondb_t* otxn_db, char** errptr); + +/* Transaction Options */ + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* +rocksdb_transactiondb_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy( + rocksdb_transactiondb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks( + rocksdb_transactiondb_options_t* opt, int64_t max_num_locks); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes( + rocksdb_transactiondb_options_t* opt, size_t num_stripes); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_transaction_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_default_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t* +rocksdb_transaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy( + rocksdb_transaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout( + rocksdb_transaction_options_t* opt, int64_t lock_timeout); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration( + rocksdb_transaction_options_t* opt, int64_t expiration); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_deadlock_detect_depth( + rocksdb_transaction_options_t* opt, int64_t depth); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_max_write_batch_size( + rocksdb_transaction_options_t* opt, size_t size); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_skip_prepare( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API char* rocksdb_optimistictransactiondb_property_value( + rocksdb_optimistictransactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_optimistictransactiondb_property_int( + rocksdb_optimistictransactiondb_t* db, const char* propname, + uint64_t* out_val); + +// referring to convention (3), this should be used by client +// to free memory that was malloc()ed +extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy( + rocksdb_pinnableslice_t* v); +extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( + const rocksdb_pinnableslice_t* t, size_t* vlen); + +extern ROCKSDB_LIBRARY_API rocksdb_memory_consumers_t* +rocksdb_memory_consumers_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_db( + rocksdb_memory_consumers_t* consumers, rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_cache( + rocksdb_memory_consumers_t* consumers, rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_destroy( + rocksdb_memory_consumers_t* consumers); +extern ROCKSDB_LIBRARY_API rocksdb_memory_usage_t* +rocksdb_approximate_memory_usage_create(rocksdb_memory_consumers_t* consumers, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_memory_usage_destroy( + rocksdb_memory_usage_t* usage); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_unflushed( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_readers_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_cache_total( + rocksdb_memory_usage_t* memory_usage); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_dump_malloc_stats( + rocksdb_options_t*, unsigned char); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t*, + unsigned char); + +extern ROCKSDB_LIBRARY_API void rocksdb_cancel_all_background_work( + rocksdb_t* db, unsigned char wait); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_manual_compaction( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_manual_compaction(rocksdb_t* db); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif diff --git a/deps/x86/rocksdb_static/librocksdb.a b/deps/x86/rocksdb_static/librocksdb.a new file mode 100644 index 0000000000000000000000000000000000000000..0c788dd67443bf7de6c8ddc4067da8fd85cf21a9 Binary files /dev/null and b/deps/x86/rocksdb_static/librocksdb.a differ diff --git a/deps/x86/rocksdb_static/rocksdb/c.h b/deps/x86/rocksdb_static/rocksdb/c.h new file mode 100644 index 0000000000000000000000000000000000000000..1ba7fabefe92f674ecbd27a91bfbb57b0ed17d23 --- /dev/null +++ b/deps/x86/rocksdb_static/rocksdb/c.h @@ -0,0 +1,2844 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. + + C bindings for rocksdb. May be useful as a stable ABI that can be + used by programs that keep rocksdb in a shared library, or for + a JNI api. + + Does not support: + . getters for the option types + . custom comparators that implement key shortening + . capturing post-write-snapshot + . custom iter, db, env, cache implementations using just the C bindings + + Some conventions: + + (1) We expose just opaque struct pointers and functions to clients. + This allows us to change internal representations without having to + recompile clients. + + (2) For simplicity, there is no equivalent to the Slice type. Instead, + the caller has to pass the pointer and length as separate + arguments. + + (3) Errors are represented by a null-terminated c string. NULL + means no error. All operations that can raise an error are passed + a "char** errptr" as the last argument. One of the following must + be true on entry: + *errptr == NULL + *errptr points to a malloc()ed null-terminated error message + On success, a leveldb routine leaves *errptr unchanged. + On failure, leveldb frees the old value of *errptr and + set *errptr to a malloc()ed error message. + + (4) Bools have the type unsigned char (0 == false; rest == true) + + (5) All of the pointer arguments must be non-NULL. +*/ + +#pragma once + +#ifdef _WIN32 +#ifdef ROCKSDB_DLL +#ifdef ROCKSDB_LIBRARY_EXPORTS +#define ROCKSDB_LIBRARY_API __declspec(dllexport) +#else +#define ROCKSDB_LIBRARY_API __declspec(dllimport) +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/* Exported types */ + +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; +typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_backup_engine_options_t rocksdb_backup_engine_options_t; +typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; +typedef struct rocksdb_memory_allocator_t rocksdb_memory_allocator_t; +typedef struct rocksdb_lru_cache_options_t rocksdb_lru_cache_options_t; +typedef struct rocksdb_hyper_clock_cache_options_t + rocksdb_hyper_clock_cache_options_t; +typedef struct rocksdb_cache_t rocksdb_cache_t; +typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; +typedef struct rocksdb_compactionfiltercontext_t + rocksdb_compactionfiltercontext_t; +typedef struct rocksdb_compactionfilterfactory_t + rocksdb_compactionfilterfactory_t; +typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; +typedef struct rocksdb_env_t rocksdb_env_t; +typedef struct rocksdb_fifo_compaction_options_t + rocksdb_fifo_compaction_options_t; +typedef struct rocksdb_filelock_t rocksdb_filelock_t; +typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; +typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; +typedef struct rocksdb_logger_t rocksdb_logger_t; +typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t; +typedef struct rocksdb_block_based_table_options_t + rocksdb_block_based_table_options_t; +typedef struct rocksdb_cuckoo_table_options_t rocksdb_cuckoo_table_options_t; +typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; +typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; +typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; +typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; +typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; +typedef struct rocksdb_writebatch_wi_t rocksdb_writebatch_wi_t; +typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; +typedef struct rocksdb_universal_compaction_options_t + rocksdb_universal_compaction_options_t; +typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; +typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; +typedef struct rocksdb_column_family_metadata_t + rocksdb_column_family_metadata_t; +typedef struct rocksdb_level_metadata_t rocksdb_level_metadata_t; +typedef struct rocksdb_sst_file_metadata_t rocksdb_sst_file_metadata_t; +typedef struct rocksdb_envoptions_t rocksdb_envoptions_t; +typedef struct rocksdb_ingestexternalfileoptions_t + rocksdb_ingestexternalfileoptions_t; +typedef struct rocksdb_sstfilewriter_t rocksdb_sstfilewriter_t; +typedef struct rocksdb_ratelimiter_t rocksdb_ratelimiter_t; +typedef struct rocksdb_perfcontext_t rocksdb_perfcontext_t; +typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; +typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; +typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; +typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; +typedef struct rocksdb_optimistictransactiondb_t + rocksdb_optimistictransactiondb_t; +typedef struct rocksdb_optimistictransaction_options_t + rocksdb_optimistictransaction_options_t; +typedef struct rocksdb_transaction_t rocksdb_transaction_t; +typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; +typedef struct rocksdb_wal_iterator_t rocksdb_wal_iterator_t; +typedef struct rocksdb_wal_readoptions_t rocksdb_wal_readoptions_t; +typedef struct rocksdb_memory_consumers_t rocksdb_memory_consumers_t; +typedef struct rocksdb_memory_usage_t rocksdb_memory_usage_t; + +/* DB operations */ + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_with_ttl( + const rocksdb_options_t* options, const char* name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, const char* name, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* +rocksdb_backup_engine_open_opts(const rocksdb_backup_engine_options_t* options, + rocksdb_env_t* env, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( + rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup_flush( + rocksdb_backup_engine_t* be, rocksdb_t* db, + unsigned char flush_before_backup, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( + rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* +rocksdb_restore_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( + rocksdb_restore_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( + rocksdb_restore_options_t* opt, int v); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_verify_backup( + rocksdb_backup_engine_t* be, uint32_t backup_id, char** errptr); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* +rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API int64_t rocksdb_backup_engine_info_timestamp( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_backup_id( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_backup_engine_info_size( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( + rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_increase_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* ts_low, size_t ts_lowlen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + size_t* ts_lowlen, char** errptr); + +/* BackupEngineOptions */ + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_options_t* +rocksdb_backup_engine_options_create(const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_backup_dir( + rocksdb_backup_engine_options_t* options, const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_env( + rocksdb_backup_engine_options_t* options, rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_table_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_share_table_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_sync( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_backup_engine_options_get_sync( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_destroy_old_data( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_destroy_old_data( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_log_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_backup_log_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_backup_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_restore_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_restore_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_background_operations( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_background_operations( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options, uint64_t size); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_destroy( + rocksdb_backup_engine_options_t*); + +/* Checkpoint */ + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create( + rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir, + uint64_t log_size_for_flush, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy( + rocksdb_checkpoint_t* checkpoint); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_and_trim_history( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char* trim_ts, + size_t trim_tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families_with_ttl( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, const int* ttls, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary_column_families( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( + const rocksdb_options_t* options, const char* name, size_t* lencf, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( + char** list, size_t len); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family_with_ttl( + rocksdb_t* db, const rocksdb_options_t* column_family_options, + const char* column_family_name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t*); + +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_column_family_handle_get_id(rocksdb_column_family_handle_t* handle); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_handle_get_name( + rocksdb_column_family_handle_t* handle, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_put( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_range_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* start_key, + size_t start_key_len, const char* end_key, size_t end_key_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_write( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +/* Returns NULL if not found. A malloc()ed array otherwise. + Stores the length of the array in *vallen. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +// if values_list[i] == NULL and errs[i] == NULL, +// then we got status.IsNotFound(), which we will not return. +// all errors except status status.ok() and status.IsNotFound() are returned. +// +// errs, values_list and values_list_sizes must be num_keys in length, +// allocated by the caller. +// errs is a list of strings as opposed to the conventional one error, +// where errs[i] is the status for retrieval of keys_list[i]. +// each non-NULL errs entry is a malloc()ed, null terminated string. +// each non-NULL values_list entry is a malloc()ed array, with +// the length for each stored in values_list_sizes[i]. +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** timestamp_list, + size_t* timestamp_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** timestamps_list, + size_t* timestamps_list_sizes, char** errs); + +// The MultiGet API that improves performance by batching operations +// in the read path for greater efficiency. Currently, only the block based +// table format with full filters are supported. Other table formats such +// as plain table, block based table with block based filters and +// partitioned indexes will still work, but will not get any performance +// benefits. +// +// Note that all the keys passed to this API are restricted to a single +// column family. +// +// Parameters - +// db - the RocksDB instance. +// options - ReadOptions +// column_family - ColumnFamilyHandle* that the keys belong to. All the keys +// passed to the API are restricted to a single column family +// num_keys - Number of keys to lookup +// keys_list - Pointer to C style array of keys with num_keys elements +// keys_list_sizes - Pointer to C style array of the size of corresponding key +// in key_list with num_keys elements. +// values - Pointer to C style array of PinnableSlices with num_keys elements +// statuses - Pointer to C style array of Status with num_keys elements +// sorted_input - If true, it means the input keys are already sorted by key +// order, so the MultiGet() API doesn't have to sort them +// again. If false, the keys will be copied and sorted +// internally by the API - the input array will not be +// modified +extern ROCKSDB_LIBRARY_API void rocksdb_batched_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + rocksdb_pinnableslice_t** values, char** errs, const bool sorted_input); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_wal_iterator_t* rocksdb_get_updates_since( + rocksdb_t* db, uint64_t seq_number, + const rocksdb_wal_readoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( + rocksdb_t* db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( + rocksdb_t* db, const rocksdb_snapshot_t* snapshot); + +/* Returns NULL if property name is unknown. + Else returns a pointer to a malloc()-ed null-terminated value. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, + const char* propname); +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int(rocksdb_t* db, + const char* propname, + uint64_t* out_val); + +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( + rocksdb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, + const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt( + rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key, + size_t start_key_len, const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, + const char* name); + +extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush( + rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cf( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cfs( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_family, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_wal(rocksdb_t* db, + unsigned char sync, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions( + rocksdb_t* db, unsigned char force, char** errptr); + +/* Management operations */ + +extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +/* Iterator */ + +extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( + const rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, + const char* k, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*, + const char* k, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( + const rocksdb_iterator_t*, size_t* klen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( + const rocksdb_iterator_t*, size_t* vlen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_timestamp( + const rocksdb_iterator_t*, size_t* tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( + const rocksdb_iterator_t*, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_next( + rocksdb_wal_iterator_t* iter); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_wal_iter_valid( + const rocksdb_wal_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_status( + const rocksdb_wal_iterator_t* iter, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_wal_iter_get_batch( + const rocksdb_wal_iterator_t* iter, uint64_t* seq); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_get_latest_sequence_number(rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_destroy( + const rocksdb_wal_iterator_t* iter); + +/* Write batch */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create( + void); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( + const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, + const char* key, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete( + rocksdb_writebatch_t* b, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf_with_ts( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range( + rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev( + rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( + rocksdb_writebatch_t*, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( + rocksdb_writebatch_t*, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point( + rocksdb_writebatch_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point( + rocksdb_writebatch_t*, char** errptr); + +/* Write batch with index */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create(size_t reserved_bytes, + unsigned char overwrite_keys); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create_from(const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count( + rocksdb_writebatch_wi_t* b); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_range is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range( + rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_range_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev( + rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* start_keys_list, const size_t* start_keys_list_sizes, + const char* const* end_keys_list, const size_t* end_keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data( + rocksdb_writebatch_wi_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate( + rocksdb_writebatch_wi_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data( + rocksdb_writebatch_wi_t* b, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point( + rocksdb_writebatch_wi_t*, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + const char* key, size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, const char* key, size_t keylen, + size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_wi_t* wbwi, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator, + rocksdb_column_family_handle_t* cf); + +/* Options utils */ + +// Load the latest rocksdb options from the specified db_path. +// +// On success, num_column_families will be updated with a non-zero +// number indicating the number of column families. +// The returned db_options, column_family_names, and column_family_options +// should be released via rocksdb_load_latest_options_destroy(). +// +// On error, a non-null errptr that includes the error message will be +// returned. db_options, column_family_names, and column_family_options +// will be set to NULL. +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options( + const char* db_path, rocksdb_env_t* env, bool ignore_unknown_options, + rocksdb_cache_t* cache, rocksdb_options_t** db_options, + size_t* num_column_families, char*** column_family_names, + rocksdb_options_t*** column_family_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options_destroy( + rocksdb_options_t* db_options, char** list_column_family_names, + rocksdb_options_t** list_column_family_options, size_t len); + +/* Block based table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_checksum( + rocksdb_block_based_table_options_t*, char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_index_block_restart_interval( + rocksdb_block_based_table_options_t* options, + int index_block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_metadata_block_size( + rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_partition_filters( + rocksdb_block_based_table_options_t* options, + unsigned char partition_filters); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_optimize_filters_for_memory( + rocksdb_block_based_table_options_t* options, + unsigned char optimize_filters_for_memory); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_use_delta_encoding( + rocksdb_block_based_table_options_t* options, + unsigned char use_delta_encoding); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t*, int); +enum { + rocksdb_block_based_table_index_type_binary_search = 0, + rocksdb_block_based_table_index_type_hash_search = 1, + rocksdb_block_based_table_index_type_two_level_index_search = 2, +}; +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +enum { + rocksdb_block_based_table_data_block_index_type_binary_search = 0, + rocksdb_block_based_table_data_block_index_type_binary_search_and_hash = 1, +}; +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_hash_ratio( + rocksdb_block_based_table_options_t* options, double v); +// rocksdb_block_based_options_set_hash_index_allow_collision() +// is removed since BlockBasedTableOptions.hash_index_allow_collision() +// is removed +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_top_level_index_and_filter( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); + +/* Cuckoo table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void +rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); + +/* Options */ +extern ROCKSDB_LIBRARY_API void rocksdb_set_options(rocksdb_t* db, int count, + const char* const keys[], + const char* const values[], + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_set_options_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, + const char* const keys[], const char* const values[], char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create_copy( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_ingest_behind( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_ingest_behind(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( + rocksdb_options_t*, rocksdb_compactionfilter_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_compaction_readahead_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( + rocksdb_options_t*, rocksdb_comparator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( + rocksdb_options_t*, rocksdb_mergeoperator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( + rocksdb_options_t* opt, const int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_create_if_missing( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_create_missing_column_families(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_error_if_exists( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_paranoid_checks( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths( + rocksdb_options_t*, const rocksdb_dbpath_t** path_values, size_t num_paths); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, + rocksdb_env_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, + rocksdb_logger_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_info_log_level( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_db_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_open_files( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_file_opening_threads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( + rocksdb_options_t* opt, uint64_t n); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( + rocksdb_options_t*, int, int, int, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_zstd_max_train_bytes(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_zstd_max_train_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_parallel_threads(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_parallel_threads( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_compression_options_max_dict_buffer_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options(rocksdb_options_t*, int, int, + int, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes( + rocksdb_options_t*, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( + rocksdb_options_t*, rocksdb_slicetransform_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_num_levels( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_file_num_compaction_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_level0_stop_writes_trigger( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_target_file_size_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_target_file_size_multiplier( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_level_compaction_dynamic_level_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_max_bytes_for_level_multiplier(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t*, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, + unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_stats_update_on_db_open(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt); + +/* Blob Options Settings */ +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_files( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_files( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_blob_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_min_blob_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_file_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_compression_type( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_compression_type( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_gc( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_gc( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_age_cutoff( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_age_cutoff( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_force_threshold( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_force_threshold( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_blob_compaction_readahead_size(rocksdb_options_t* opt, + uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_compaction_readahead_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_starting_level( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_file_starting_level( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_cache( + rocksdb_options_t* opt, rocksdb_cache_t* blob_cache); + +enum { + rocksdb_prepopulate_blob_disable = 0, + rocksdb_prepopulate_blob_flush_only = 1 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prepopulate_blob_cache( + rocksdb_options_t* opt, int val); + +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_prepopulate_blob_cache( + rocksdb_options_t* opt); + +/* returns a pointer to a malloc()-ed, null terminated string */ +extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_write_buffer_number( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_min_write_buffer_number_to_merge(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_max_write_buffer_number_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_size_to_maintain(rocksdb_options_t*, + int64_t); +extern ROCKSDB_LIBRARY_API int64_t +rocksdb_options_get_max_write_buffer_size_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_pipelined_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_pipelined_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_unordered_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_unordered_write( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_subcompactions( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_max_subcompactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( + rocksdb_options_t*, const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, + const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_prefix_bloom_size_ratio(rocksdb_options_t*, + double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t*, size_t, int32_t, int32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( + rocksdb_options_t*, uint32_t, int, double, size_t); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( + rocksdb_options_t* opt, int level); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_avoid_unnecessary_blocking_io(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_avoid_unnecessary_blocking_io(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_experimental_mempurge_threshold(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_experimental_mempurge_threshold(rocksdb_options_t*); + +enum { + rocksdb_tolerate_corrupted_tail_records_recovery = 0, + rocksdb_absolute_consistency_recovery = 1, + rocksdb_point_in_time_recovery = 2, + rocksdb_skip_any_corrupted_records_recovery = 3 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); + +enum { + rocksdb_no_compression = 0, + rocksdb_snappy_compression = 1, + rocksdb_zlib_compression = 2, + rocksdb_bz2_compression = 3, + rocksdb_lz4_compression = 4, + rocksdb_lz4hc_compression = 5, + rocksdb_xpress_compression = 6, + rocksdb_zstd_compression = 7 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); + +enum { + rocksdb_level_compaction = 0, + rocksdb_universal_compaction = 1, + rocksdb_fifo_compaction = 2 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_universal_compaction_options( + rocksdb_options_t*, rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( + rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( + rocksdb_options_t* opt, rocksdb_cache_t* cache); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_add_compact_on_deletion_collector_factory( + rocksdb_options_t*, size_t window_size, size_t num_dels_trigger); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manual_wal_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_manual_wal_flush( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_compression( + rocksdb_options_t* opt, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_compression( + rocksdb_options_t* opt); + +/* RateLimiter */ +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( + int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness); +extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy( + rocksdb_ratelimiter_t*); + +/* PerfContext */ +enum { + rocksdb_uninitialized = 0, + rocksdb_disable = 1, + rocksdb_enable_count = 2, + rocksdb_enable_time_except_for_mutex = 3, + rocksdb_enable_time = 4, + rocksdb_out_of_bounds = 5 +}; + +enum { + rocksdb_user_key_comparison_count = 0, + rocksdb_block_cache_hit_count, + rocksdb_block_read_count, + rocksdb_block_read_byte, + rocksdb_block_read_time, + rocksdb_block_checksum_time, + rocksdb_block_decompress_time, + rocksdb_get_read_bytes, + rocksdb_multiget_read_bytes, + rocksdb_iter_read_bytes, + rocksdb_internal_key_skipped_count, + rocksdb_internal_delete_skipped_count, + rocksdb_internal_recent_skipped_count, + rocksdb_internal_merge_count, + rocksdb_get_snapshot_time, + rocksdb_get_from_memtable_time, + rocksdb_get_from_memtable_count, + rocksdb_get_post_process_time, + rocksdb_get_from_output_files_time, + rocksdb_seek_on_memtable_time, + rocksdb_seek_on_memtable_count, + rocksdb_next_on_memtable_count, + rocksdb_prev_on_memtable_count, + rocksdb_seek_child_seek_time, + rocksdb_seek_child_seek_count, + rocksdb_seek_min_heap_time, + rocksdb_seek_max_heap_time, + rocksdb_seek_internal_seek_time, + rocksdb_find_next_user_entry_time, + rocksdb_write_wal_time, + rocksdb_write_memtable_time, + rocksdb_write_delay_time, + rocksdb_write_pre_and_post_process_time, + rocksdb_db_mutex_lock_nanos, + rocksdb_db_condition_wait_nanos, + rocksdb_merge_operator_time_nanos, + rocksdb_read_index_block_nanos, + rocksdb_read_filter_block_nanos, + rocksdb_new_table_block_iter_nanos, + rocksdb_new_table_iterator_nanos, + rocksdb_block_seek_nanos, + rocksdb_find_table_nanos, + rocksdb_bloom_memtable_hit_count, + rocksdb_bloom_memtable_miss_count, + rocksdb_bloom_sst_hit_count, + rocksdb_bloom_sst_miss_count, + rocksdb_key_lock_wait_time, + rocksdb_key_lock_wait_count, + rocksdb_env_new_sequential_file_nanos, + rocksdb_env_new_random_access_file_nanos, + rocksdb_env_new_writable_file_nanos, + rocksdb_env_reuse_writable_file_nanos, + rocksdb_env_new_random_rw_file_nanos, + rocksdb_env_new_directory_nanos, + rocksdb_env_file_exists_nanos, + rocksdb_env_get_children_nanos, + rocksdb_env_get_children_file_attributes_nanos, + rocksdb_env_delete_file_nanos, + rocksdb_env_create_dir_nanos, + rocksdb_env_create_dir_if_missing_nanos, + rocksdb_env_delete_dir_nanos, + rocksdb_env_get_file_size_nanos, + rocksdb_env_get_file_modification_time_nanos, + rocksdb_env_rename_file_nanos, + rocksdb_env_link_file_nanos, + rocksdb_env_lock_file_nanos, + rocksdb_env_unlock_file_nanos, + rocksdb_env_new_logger_nanos, + rocksdb_number_async_seek, + rocksdb_blob_cache_hit_count, + rocksdb_blob_read_count, + rocksdb_blob_read_byte, + rocksdb_blob_read_time, + rocksdb_blob_checksum_time, + rocksdb_blob_decompress_time, + rocksdb_internal_range_del_reseek_count, + rocksdb_total_metric_count = 78 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_set_perf_level(int); +extern ROCKSDB_LIBRARY_API rocksdb_perfcontext_t* rocksdb_perfcontext_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_reset( + rocksdb_perfcontext_t* context); +extern ROCKSDB_LIBRARY_API char* rocksdb_perfcontext_report( + rocksdb_perfcontext_t* context, unsigned char exclude_zero_counters); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context, int metric); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_destroy( + rocksdb_perfcontext_t* context); + +/* Compaction Filter */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* +rocksdb_compactionfilter_create( + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots( + rocksdb_compactionfilter_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( + rocksdb_compactionfilter_t*); + +/* Compaction Filter Context */ + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context); + +/* Compaction Filter Factory */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* +rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t*); + +/* Comparator */ + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( + rocksdb_comparator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* +rocksdb_comparator_with_ts_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + int (*compare_ts)(void*, const char* a_ts, size_t a_tslen, const char* b_ts, + size_t b_tslen), + int (*compare_without_ts)(void*, const char* a, size_t alen, + unsigned char a_has_ts, const char* b, + size_t blen, unsigned char b_has_ts), + const char* (*name)(void*), size_t timestamp_size); + +/* Filter policy */ + +extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( + rocksdb_filterpolicy_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom_full(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon(double bloom_equivalent_bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon_hybrid(double bloom_equivalent_bits_per_key, + int bloom_before_level); + +/* Merge Operator */ + +extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* +rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( + rocksdb_mergeoperator_t*); + +/* Read options */ + +extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_verify_checksums(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_fill_cache( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t*, const rocksdb_snapshot_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_lower_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_readoptions_get_read_tier( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_tailing( + rocksdb_readoptions_t*); +// The functionality that this option controlled has been removed. +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_managed( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_prefix_same_as_start( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_prefix_same_as_start(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_pin_data( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_total_order_seek(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_max_skippable_internal_keys(rocksdb_readoptions_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_max_skippable_internal_keys(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_ignore_range_deletions( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_ignore_range_deletions(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_deadline( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_deadline(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_io_timeout( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_io_timeout(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_async_io( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_async_io( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_timestamp( + rocksdb_readoptions_t*, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iter_start_ts( + rocksdb_readoptions_t*, const char* ts, size_t tslen); + +/* Write options */ + +extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* rocksdb_writeoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_sync( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( + rocksdb_writeoptions_t* opt, int disable); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_disable_WAL( + rocksdb_writeoptions_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_ignore_missing_column_families(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_ignore_missing_column_families( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_no_slowdown( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_no_slowdown( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_low_pri( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_low_pri( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_memtable_insert_hint_per_batch(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_memtable_insert_hint_per_batch( + rocksdb_writeoptions_t*); + +/* Compact range options */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t* +rocksdb_compactoptions_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_exclusive_manual_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_exclusive_manual_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_bottommost_level_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_bottommost_level_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_change_level(rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level( + rocksdb_compactoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_compactoptions_get_target_level( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_full_history_ts_low( + rocksdb_compactoptions_t*, char* ts, size_t tslen); + +/* Flush options */ + +extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* rocksdb_flushoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( + rocksdb_flushoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_flushoptions_get_wait( + rocksdb_flushoptions_t*); + +/* Memory allocator */ + +extern ROCKSDB_LIBRARY_API rocksdb_memory_allocator_t* +rocksdb_jemalloc_nodump_allocator_create(char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_allocator_destroy( + rocksdb_memory_allocator_t*); + +/* Cache */ + +extern ROCKSDB_LIBRARY_API rocksdb_lru_cache_options_t* +rocksdb_lru_cache_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_destroy( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_capacity( + rocksdb_lru_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_num_shard_bits( + rocksdb_lru_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_memory_allocator( + rocksdb_lru_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( + size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_lru_with_strict_capacity_limit(size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru_opts( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_disown_data( + rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( + rocksdb_cache_t* cache, size_t capacity); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_capacity(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_usage(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache); + +/* HyperClockCache */ +extern ROCKSDB_LIBRARY_API rocksdb_hyper_clock_cache_options_t* +rocksdb_hyper_clock_cache_options_create(size_t capacity, + size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_destroy( + rocksdb_hyper_clock_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_set_capacity( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_estimated_entry_charge( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_num_shard_bits( + rocksdb_hyper_clock_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_memory_allocator( + rocksdb_hyper_clock_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_hyper_clock( + size_t capacity, size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_hyper_clock_opts(rocksdb_hyper_clock_cache_options_t*); + +/* DBPath */ + +extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create( + const char* path, uint64_t target_size); +extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); + +/* Env */ + +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(void); +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(void); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_high_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_low_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int +rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_cpu_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy( + rocksdb_envoptions_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_create_dir_if_missing( + rocksdb_env_t* env, const char* path, char** errptr); + +/* SstFile */ + +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env, + const rocksdb_options_t* io_options); +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create_with_comparator( + const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, + const rocksdb_comparator_t* comparator); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open( + rocksdb_sstfilewriter_t* writer, const char* name, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_range( + rocksdb_sstfilewriter_t* writer, const char* begin_key, size_t begin_keylen, + const char* end_key, size_t end_keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish( + rocksdb_sstfilewriter_t* writer, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_file_size( + rocksdb_sstfilewriter_t* writer, uint64_t* file_size); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy( + rocksdb_sstfilewriter_t* writer); +extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t* +rocksdb_ingestexternalfileoptions_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_move_files( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_snapshot_consistency( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char snapshot_consistency); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_global_seqno( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_blocking_flush( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char allow_blocking_flush); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_ingest_behind( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char ingest_behind); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_fail_if_not_bottommost_level( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char fail_if_not_bottommost_level); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy( + rocksdb_ingestexternalfileoptions_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file( + rocksdb_t* db, const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, + const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_try_catch_up_with_primary( + rocksdb_t* db, char** errptr); + +/* SliceTransform */ + +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create( + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* + rocksdb_slicetransform_create_fixed_prefix(size_t); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create_noop(void); +extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( + rocksdb_slicetransform_t*); + +/* Universal Compaction options */ + +enum { + rocksdb_similar_size_compaction_stop_style = 0, + rocksdb_total_size_compaction_stop_style = 1 +}; + +extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_size_ratio( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_min_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_compression_size_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_stop_style( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* +rocksdb_fifo_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts, unsigned char allow_compaction); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_fifo_compaction_options_get_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_fifo_compaction_options_get_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts); + +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( + const rocksdb_livefiles_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_column_family_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_entries(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_deletions(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t*); + +/* Utility Helpers */ + +extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( + const rocksdb_options_t* base_options, const char* opts_str, + rocksdb_options_t* new_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +/* MetaData */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata(rocksdb_t* db); + +/** + * Returns the rocksdb_column_family_metadata_t of the specified + * column family. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_column_family_metadata_destroy. + */ +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_metadata_destroy( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_column_family_metadata_get_size( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t rocksdb_column_family_metadata_get_file_count( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_metadata_get_name( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_column_family_metadata_get_level_count( + rocksdb_column_family_metadata_t* cf_meta); + +/** + * Returns the rocksdb_level_metadata_t of the ith level from the specified + * column family metadata. + * + * If the specified i is greater than or equal to the number of levels + * in the specified column family, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_level_metadata_destroy before releasing its parent + * rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_level_metadata_t* +rocksdb_column_family_metadata_get_level_metadata( + rocksdb_column_family_metadata_t* cf_meta, size_t i); + +/** + * Releases the specified rocksdb_level_metadata_t. + * + * Note that the specified rocksdb_level_metadata_t must be released + * before the release of its parent rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_level_metadata_destroy( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API int rocksdb_level_metadata_get_level( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_level_metadata_get_size(rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_level_metadata_get_file_count(rocksdb_level_metadata_t* level_meta); + +/** + * Returns the sst_file_metadata_t of the ith file from the specified level + * metadata. + * + * If the specified i is greater than or equal to the number of files + * in the specified level, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_sst_file_metadata_destroy before releasing its + * parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_sst_file_metadata_t* +rocksdb_level_metadata_get_sst_file_metadata( + rocksdb_level_metadata_t* level_meta, size_t i); + +/** + * Releases the specified rocksdb_sst_file_metadata_t. + * + * Note that the specified rocksdb_sst_file_metadata_t must be released + * before the release of its parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_sst_file_metadata_destroy( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API char* +rocksdb_sst_file_metadata_get_relative_filename( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_sst_file_metadata_get_size(rocksdb_sst_file_metadata_t* file_meta); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_smallestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_largestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/* Transactions */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* +rocksdb_transactiondb_open_column_families( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + int num_column_families, const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot( + rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_property_value( + rocksdb_transactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_transactiondb_property_int( + rocksdb_transactiondb_t* db, const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin( + rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_transaction_options_t* txn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t** +rocksdb_transactiondb_get_prepared_transactions(rocksdb_transactiondb_t* txn_db, + size_t* cnt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_name( + rocksdb_transaction_t* txn, const char* name, size_t name_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_name( + rocksdb_transaction_t* txn, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_prepare( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_savepoint( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback_to_savepoint( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_transaction_get_writebatch_wi(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch( + rocksdb_transaction_t* txn, rocksdb_writebatch_t* writebatch, + char** errptr); + +// This rocksdb_writebatch_wi_t should be freed with rocksdb_free +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch_wi( + rocksdb_transaction_t* txn, rocksdb_writebatch_wi_t* wi, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_commit_timestamp( + rocksdb_transaction_t* txn, uint64_t commit_timestamp); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_set_read_timestamp_for_validation( + rocksdb_transaction_t* txn, uint64_t read_timestamp); + +// This snapshot should be freed using rocksdb_free +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_cf(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, + unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( + rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( + rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cfs( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_families, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_wal( + rocksdb_transactiondb_t* txn_db, unsigned char sync, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, + const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_optimistictransactiondb_get_base_db( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close_base_db( + rocksdb_t* base_db); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* +rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_write( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_optimistictransactiondb_checkpoint_object_create( + rocksdb_optimistictransactiondb_t* otxn_db, char** errptr); + +/* Transaction Options */ + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* +rocksdb_transactiondb_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy( + rocksdb_transactiondb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks( + rocksdb_transactiondb_options_t* opt, int64_t max_num_locks); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes( + rocksdb_transactiondb_options_t* opt, size_t num_stripes); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_transaction_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_default_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t* +rocksdb_transaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy( + rocksdb_transaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout( + rocksdb_transaction_options_t* opt, int64_t lock_timeout); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration( + rocksdb_transaction_options_t* opt, int64_t expiration); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_deadlock_detect_depth( + rocksdb_transaction_options_t* opt, int64_t depth); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_max_write_batch_size( + rocksdb_transaction_options_t* opt, size_t size); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_skip_prepare( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API char* rocksdb_optimistictransactiondb_property_value( + rocksdb_optimistictransactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_optimistictransactiondb_property_int( + rocksdb_optimistictransactiondb_t* db, const char* propname, + uint64_t* out_val); + +// referring to convention (3), this should be used by client +// to free memory that was malloc()ed +extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy( + rocksdb_pinnableslice_t* v); +extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( + const rocksdb_pinnableslice_t* t, size_t* vlen); + +extern ROCKSDB_LIBRARY_API rocksdb_memory_consumers_t* +rocksdb_memory_consumers_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_db( + rocksdb_memory_consumers_t* consumers, rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_cache( + rocksdb_memory_consumers_t* consumers, rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_destroy( + rocksdb_memory_consumers_t* consumers); +extern ROCKSDB_LIBRARY_API rocksdb_memory_usage_t* +rocksdb_approximate_memory_usage_create(rocksdb_memory_consumers_t* consumers, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_memory_usage_destroy( + rocksdb_memory_usage_t* usage); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_unflushed( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_readers_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_cache_total( + rocksdb_memory_usage_t* memory_usage); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_dump_malloc_stats( + rocksdb_options_t*, unsigned char); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t*, + unsigned char); + +extern ROCKSDB_LIBRARY_API void rocksdb_cancel_all_background_work( + rocksdb_t* db, unsigned char wait); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_manual_compaction( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_manual_compaction(rocksdb_t* db); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 6bd02644d468b0480168ca3e02c6204e6ee44813..aa1186fa52058aa88412546bfa252e43f1a3fcf9 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -667,6 +667,137 @@ Insert with req_id argument +### Parameter Binding + +The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound. + + + + +#### Create Stmt + +Call the `statement` method in `Connection` to create the `stmt` for parameter binding. + +``` +import taos + +conn = taos.connect() +stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") +``` + +#### parameter binding + +Call the `new_multi_binds` function to create the parameter list for parameter bindings. + +``` +params = new_multi_binds(16) +params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) +params[1].bool((True, None, False)) +params[2].tinyint([-128, -128, None]) # -128 is tinyint null +params[3].tinyint([0, 127, None]) +params[4].smallint([3, None, 2]) +params[5].int([3, 4, None]) +params[6].bigint([3, 4, None]) +params[7].tinyint_unsigned([3, 4, None]) +params[8].smallint_unsigned([3, 4, None]) +params[9].int_unsigned([3, 4, None]) +params[10].bigint_unsigned([3, 4, None]) +params[11].float([3, None, 1]) +params[12].double([3, None, 1.2]) +params[13].binary(["abc", "dddafadfadfadfadfa", None]) +params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) +params[15].timestamp([None, None, 1626861392591]) +``` + +Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values. + +``` +stmt.bind_param_batch(params) +``` + +#### execute sql + +Call `execute` method to execute sql. + +``` +stmt.execute() +``` + +#### Close Stmt + +``` +stmt.close() +``` + +#### Example + +```python +{{#include docs/examples/python/stmt_example.py}} +``` + + + + +#### Create Stmt + +Call the `statement` method in `Connection` to create the `stmt` for parameter binding. + +``` +import taosws + +conn = taosws.connect('taosws://localhost:6041/test') +stmt = conn.statement() +``` + +#### Prepare sql + +Call `prepare` method in stmt to prepare sql. + +``` +stmt.prepare("insert into t1 values (?, ?, ?, ?)") +``` + +#### parameter binding + +Call the `bind_param` method to bind parameters. + +``` +stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), +]) +``` + +Call the `add_batch` method to add parameters to the batch. + +``` +stmt.add_batch() +``` + +#### execute sql + +Call `execute` method to execute sql. + +``` +stmt.execute() +``` + +#### Close Stmt + +``` +stmt.close() +``` + +#### Example + +```python +{{#include docs/examples/python/stmt_websocket_example.py}} +``` + + + ### Other sample programs | Example program links | Example program content | diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index 7348add4bd276c9f105c47f8b09b12dc645e3206..6d5547e7a97ad92d5e37f6c46f5298ac62ec8768 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -79,8 +79,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -e, --escape-character Use escaped character for database name -N, --without-property Dump database without its properties. -s, --schemaonly Only dump table schemas. - -y, --answer-yes Input yes for prompt. It will skip data file - checking! -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, and lzma. -S, --start-time=START_TIME Start time to dump. Either epoch or diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md old mode 100644 new mode 100755 index 2d290a5f49e28242530a30945408042b3051a4c7..bfc5aabe7bb13b3fada56e1c28c5b5970f734ed8 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -365,6 +365,16 @@ The charset that takes effect is UTF-8. | Unit | GB | | Default Value | 2.0 | +### metaCacheMaxSize + +| Attribute | Description | +| ------------- | ------------------------------------------------------------------------------------------------- | +| Applicable | Client Only | +| Meaning | Maximum meta cache size in single client process | +| Unit | MB | +| Default Value | -1 (No limitation) | + + ## Cluster Parameters ### supportVnodes @@ -433,6 +443,26 @@ The charset that takes effect is UTF-8. | Default Value | 0 | | Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time | +### slowLogThreshold + +| Attribute | Description | +| ------------- | -------------------------------------------------------------------------------------------------------- | +| Applicable | Client only | +| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file | +| Unit | second | +| Default Value | 3 | +| Note | All slow operations will be logged in file "taosSlowLog" in the log directory | + +### slowLogScope + +| Attribute | Description | +| --------------- | ----------------------------------------------------------------------- | +| Applicable | Client only | +| Meaning | Slow log type to be logged | +| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE | +| Default Value | ALL | +| Note | All slow operations will be logged by default, one option could be set | + ### debugFlag | Attribute | Description | diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index a9336697f2d2fb339a0ebe779ec330750b08aae1..f4d9ba8e428792cbd525f15632eff5e14a3ba83a 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.5.1 + + + ## 3.0.5.0 diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md index 28c2ff7a7fe30cb9364c5b7387f595420fa2973d..f3099b13b461548d50228e3bdd2ee5366d214b22 100644 --- a/docs/en/28-releases/02-tools.md +++ b/docs/en/28-releases/02-tools.md @@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat import Release from "/components/ReleaseV3"; +## 2.5.2 + + + ## 2.5.1 diff --git a/docs/examples/python/stmt_example.py b/docs/examples/python/stmt_example.py new file mode 100644 index 0000000000000000000000000000000000000000..83197a777ab6419962212e7f1106fba7b78e884e --- /dev/null +++ b/docs/examples/python/stmt_example.py @@ -0,0 +1,82 @@ +#! + +import taosws + +import taos + +db_name = 'test_ws_stmt' + + +def before(): + taos_conn = taos.connect() + taos_conn.execute("drop database if exists %s" % db_name) + taos_conn.execute("create database %s" % db_name) + taos_conn.select_db(db_name) + taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))") + taos_conn.execute( + "create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))") + taos_conn.close() + + +def stmt_insert(): + before() + + conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name) + + while True: + try: + stmt = conn.statement() + stmt.prepare("insert into t1 values (?, ?, ?, ?)") + + stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), + ]) + + stmt.add_batch() + rows = stmt.execute() + print(rows) + stmt.close() + except Exception as e: + if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed + continue + else: + raise e + + break + + +def stmt_insert_into_stable(): + before() + + conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name) + + while True: + try: + stmt = conn.statement() + stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)") + stmt.set_tbname('stb1_1') + stmt.set_tags([ + taosws.int_to_tag(1), + taosws.varchar_to_tag('aaa'), + ]) + stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), + ]) + + stmt.add_batch() + rows = stmt.execute() + print(rows) + stmt.close() + except Exception as e: + if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed + continue + else: + raise e + + break diff --git a/docs/examples/python/stmt_websocket_example.py b/docs/examples/python/stmt_websocket_example.py new file mode 100644 index 0000000000000000000000000000000000000000..d0824cfa9f29320a6613eb49ee58d108fc61cfd7 --- /dev/null +++ b/docs/examples/python/stmt_websocket_example.py @@ -0,0 +1,78 @@ +#! +import time + +import taosws + +import taos + + +def before_test(db_name): + taos_conn = taos.connect() + taos_conn.execute("drop database if exists %s" % db_name) + taos_conn.execute("create database %s" % db_name) + taos_conn.select_db(db_name) + taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))") + taos_conn.execute( + "create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))") + taos_conn.close() + + +def after_test(db_name): + taos_conn = taos.connect() + taos_conn.execute("drop database if exists %s" % db_name) + taos_conn.close() + + +def stmt_insert(): + db_name = 'test_ws_stmt_{}'.format(int(time.time())) + before_test(db_name) + + conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name) + + stmt = conn.statement() + stmt.prepare("insert into t1 values (?, ?, ?, ?)") + + stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), + ]) + + stmt.add_batch() + rows = stmt.execute() + assert rows == 4 + stmt.close() + after_test(db_name) + + +def stmt_insert_into_stable(): + db_name = 'test_ws_stmt_{}'.format(int(time.time())) + before_test(db_name) + + conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name) + + stmt = conn.statement() + stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)") + stmt.set_tbname('stb1_1') + stmt.set_tags([ + taosws.int_to_tag(1), + taosws.varchar_to_tag('aaa'), + ]) + stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), + ]) + + stmt.add_batch() + rows = stmt.execute() + assert rows == 4 + stmt.close() + after_test(db_name) + + +if __name__ == '__main__': + stmt_insert() + stmt_insert_into_stable() diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index 1cd0076ba592e5055b7018f0149b5f6f53f18626..bab6377c7e1781c614e240b60f52d0c0481e65e7 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -299,7 +299,7 @@ SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10; ``` -对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计: +对表 `d10` 按每 10 秒进行平均值、最大值和最小值聚合统计: ```sql SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s); diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 10fb2238eee4231740dffb70dc4043ca51049c7c..f4f1aad63b846aff51fbbba504e5c0e24479315f 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -672,6 +672,141 @@ consumer.close() +### 通过参数绑定写入数据 + +TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。 + + + + +#### 创建 stmt + +Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。 + +``` +import taos + +conn = taos.connect() +stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") +``` + +#### 参数绑定 + +调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。 + +``` +params = new_multi_binds(16) +params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) +params[1].bool((True, None, False)) +params[2].tinyint([-128, -128, None]) # -128 is tinyint null +params[3].tinyint([0, 127, None]) +params[4].smallint([3, None, 2]) +params[5].int([3, 4, None]) +params[6].bigint([3, 4, None]) +params[7].tinyint_unsigned([3, 4, None]) +params[8].smallint_unsigned([3, 4, None]) +params[9].int_unsigned([3, 4, None]) +params[10].bigint_unsigned([3, 4, None]) +params[11].float([3, None, 1]) +params[12].double([3, None, 1.2]) +params[13].binary(["abc", "dddafadfadfadfadfa", None]) +params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) +params[15].timestamp([None, None, 1626861392591]) +``` + +调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。 + +``` +stmt.bind_param_batch(params) +``` + +#### 执行 sql + +调用 stmt 的 `execute` 方法执行 sql + +``` +stmt.execute() +``` + +#### 关闭 stmt + +最后需要关闭 stmt。 + +``` +stmt.close() +``` + +#### 示例代码 + +```python +{{#include docs/examples/python/stmt_example.py}} +``` + + + + +#### 创建 stmt + +Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。 + +``` +import taosws + +conn = taosws.connect('taosws://localhost:6041/test') +stmt = conn.statement() +``` + +#### 解析 sql + +调用 stmt 的 `prepare` 方法来解析 insert 语句。 + +``` +stmt.prepare("insert into t1 values (?, ?, ?, ?)") +``` + +#### 参数绑定 + +调用 stmt 的 `bind_param` 方法绑定参数。 + +``` +stmt.bind_param([ + taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]), + taosws.ints_to_column([1, 2, 3, 4]), + taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]), + taosws.varchar_to_column(['a', 'b', 'c', 'd']), +]) +``` + +调用 stmt 的 `add_batch` 方法,将参数加入批处理。 + +``` +stmt.add_batch() +``` + +#### 执行 sql + +调用 stmt 的 `execute` 方法执行 sql + +``` +stmt.execute() +``` + +#### 关闭 stmt + +最后需要关闭 stmt。 + +``` +stmt.close() +``` + +#### 示例代码 + +```python +{{#include docs/examples/python/stmt_websocket_example.py}} +``` + + + ### 其它示例程序 | 示例程序链接 | 示例程序内容 | diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md index 8ff1287c3ec65bbb9975dd8530b60c7acd108b2b..12122edd32dd3aa0f7b335ac7335c82d1c0e2379 100644 --- a/docs/zh/14-reference/06-taosdump.md +++ b/docs/zh/14-reference/06-taosdump.md @@ -82,8 +82,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -e, --escape-character Use escaped character for database name -N, --without-property Dump database without its properties. -s, --schemaonly Only dump tables' schema. - -y, --answer-yes Input yes for prompt. It will skip data file - checking! -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, and lzma. -S, --start-time=START_TIME Start time to dump. Either epoch or diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md old mode 100644 new mode 100755 index 115d0ca2c7ae4205d8145c96e8d13627d778f609..51748b68c4629baaa94fbafbb9f3f7c462daa7c7 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -384,6 +384,15 @@ charset 的有效值是 UTF-8。 | 单位 | GB | | 缺省值 | 2.0 | +### metaCacheMaxSize + +| 属性 | 说明 | +| -------- | ---------------------------------------------- | +| 适用范围 | 仅客户端适用 | +| 含义 | 指定单个客户端元数据缓存大小的最大值 | +| 单位 | MB | +| 缺省值 | -1 (无限制) | + ## 集群相关 ### supportVnodes @@ -452,6 +461,26 @@ charset 的有效值是 UTF-8。 | 缺省值 | 0 | | 补充说明 | 大于 0 时,日志文件会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳。 | +### slowLogThreshold + +| 属性 | 说明 | +| -------- | ------------------------------------------------------------- | +| 适用范围 | 仅客户端适用 | +| 含义 | 指定慢查询门限值,大于等于门限值认为是慢查询 | +| 单位 | 秒 | +| 缺省值 | 3 | +| 补充说明 | 每个客户端中所有慢查询会被记录在日志目录下的taosSlowLog文件中 | + +### slowLogScope + +| 属性 | 说明 | +| -------- | --------------------------------------------------------------| +| 适用范围 | 仅客户端适用 | +| 含义 | 指定启动记录哪些类型的慢查询 | +| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE | +| 缺省值 | ALL | +| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 | + ### debugFlag | 属性 | 说明 | diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 3ee19de84f0177d92555c05520137fce5deb911d..ae47388566ef7c1104e50b5a35bee08e8889134a 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.5.1 + + + ## 3.0.5.0 diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md index fbd12b1440abd862f72361e5eb9b4443638f18cd..ba58ed96005cba56cf4b20dc729b10c2c1844769 100644 --- a/docs/zh/28-releases/02-tools.md +++ b/docs/zh/28-releases/02-tools.md @@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 2.5.2 + + + ## 2.5.1 diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index 5bc23403087578c0791b0a5e6fca74a47aad8184..aeb75cc3a2f4fc97d7ff7af6e451803e7fcacb8f 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -51,27 +51,27 @@ public class JdbcDemo { private void createDatabase() { String sql = "create database if not exists " + dbName; - exuete(sql); + execute(sql); } private void useDatabase() { String sql = "use " + dbName; - exuete(sql); + execute(sql); } private void dropTable() { final String sql = "drop table if exists " + dbName + "." + tbName + ""; - exuete(sql); + execute(sql); } private void createTable() { final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)"; - exuete(sql); + execute(sql); } private void insert() { final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)"; - exuete(sql); + execute(sql); } private void select() { @@ -120,7 +120,7 @@ public class JdbcDemo { System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); } - private void exuete(String sql) { + private void execute(String sql) { long start = System.currentTimeMillis(); try (Statement statement = connection.createStatement()) { boolean execute = statement.execute(sql); diff --git a/examples/JDBC/consumer-demo/pom.xml b/examples/JDBC/consumer-demo/pom.xml index aa3cb154e5df957c3ca8b7447c2a5a02168b6be7..6199efb76ea461909e64aa221ed05899001f2312 100644 --- a/examples/JDBC/consumer-demo/pom.xml +++ b/examples/JDBC/consumer-demo/pom.xml @@ -22,7 +22,7 @@ com.google.guava guava - 30.1.1-jre + 32.0.0-jre diff --git a/include/common/tcommon.h b/include/common/tcommon.h index d2352e100c3a3ef016a57f48f9db0f26809fc7e3..ea17262abdb280618ae00814b3dcd23ca87a0865 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -198,6 +198,7 @@ typedef struct SDataBlockInfo { SBlockID id; int16_t hasVarCol; int16_t dataLoad; // denote if the data is loaded or not + uint8_t scanFlag; // TODO: optimize and remove following int64_t version; // used for stream, and need serialization diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 01281a6dc75884fa0672c42b02de876da5cedaf1..8aa17e46d1b832c170d808d4124ce57dcb25b4c4 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -82,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs; // mnode extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndLogRetention; +extern int8_t tsGrant; extern bool tsMndSkipGrant; // monitor @@ -119,6 +120,7 @@ extern bool tsQueryUseNodeAllocator; extern bool tsKeepColumnName; extern bool tsEnableQueryHb; extern bool tsEnableScience; +extern bool tsTtlChangeOnWrite; extern int32_t tsRedirectPeriod; extern int32_t tsRedirectFactor; extern int32_t tsRedirectMaxPeriod; @@ -198,6 +200,7 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite); void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite); int32_t taosApplyLocalCfg(SConfig *pCfg, char *name); void taosLocalCfgForbiddenToChange(char *name, bool *forbidden); +int8_t taosGranted(); #ifdef __cplusplus } diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 7c5182d76cc0282c166f285eddc26cf9afd920c9..929f2f208a4c0be0a6732bcab4e0427f41fba52a 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -945,7 +945,7 @@ int32_t tSerializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq); int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq); typedef struct { - int32_t timestamp; + int32_t timestampSec; } SVDropTtlTableReq; int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq); @@ -2278,7 +2278,7 @@ typedef struct SVCreateTbReq { int32_t flags; char* name; tb_uid_t uid; - int64_t ctime; + int64_t btime; int32_t ttl; int32_t commentLen; char* comment; @@ -2415,10 +2415,12 @@ typedef struct { int32_t newTTL; int32_t newCommentLen; char* newComment; + int64_t ctimeMs; // fill by vnode } SVAlterTbReq; int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq); int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq); +int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs); typedef struct { int32_t code; @@ -3436,6 +3438,7 @@ typedef struct SDeleteRes { int64_t affectedRows; char tableFName[TSDB_TABLE_NAME_LEN]; char tsColName[TSDB_COL_NAME_LEN]; + int64_t ctimeMs; // fill by vnode } SDeleteRes; int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes); @@ -3454,10 +3457,12 @@ int32_t tDecodeSSingleDeleteReq(SDecoder* pCoder, SSingleDeleteReq* pReq); typedef struct { int64_t suid; SArray* deleteReqs; // SArray + int64_t ctimeMs; // fill by vnode } SBatchDeleteReq; int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq); int32_t tDecodeSBatchDeleteReq(SDecoder* pCoder, SBatchDeleteReq* pReq); +int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder* pDecoder, SBatchDeleteReq* pReq, int64_t ctimeMs); typedef struct { int32_t msgIdx; @@ -3525,6 +3530,7 @@ typedef struct { SArray* aRowP; SArray* aCol; }; + int64_t ctimeMs; } SSubmitTbData; typedef struct { diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index ee1888ff4922f55572120789312ecbe0a7acf859..2cf8eacdac98406fe54dce79b090334913ddcae8 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -310,6 +310,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_TMQ_ADD_CHECKINFO, "vnode-tmq-add-checkinfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DEL_CHECKINFO, "vnode-del-checkinfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME, "vnode-tmq-consume", SMqPollReq, SMqDataBlkRsp) + TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME_PUSH, "vnode-tmq-consume-push", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index e2944d13dadf12391fd256846ab23cf7fb11e3fc..6031b99cfcef2259123457a792e711b3d80e9004 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -54,7 +54,7 @@ typedef struct SMetaEntry { SRSmaParam rsmaParam; } stbEntry; struct { - int64_t ctime; + int64_t btime; int32_t ttlDays; int32_t commentLen; char* comment; @@ -62,7 +62,7 @@ typedef struct SMetaEntry { uint8_t* pTags; } ctbEntry; struct { - int64_t ctime; + int64_t btime; int32_t ttlDays; int32_t commentLen; char* comment; diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index f44b622cc06e02c61fac0afba92fafb3cc048cc2..453c5d49142934b79758f3faf5e3c12c80c16a55 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -53,6 +53,8 @@ typedef struct SLogicNode { EDataOrderLevel requireDataOrder; // requirements for input data EDataOrderLevel resultDataOrder; // properties of the output data EGroupAction groupAction; + EOrder inputTsOrder; + EOrder outputTsOrder; } SLogicNode; typedef enum EScanType { @@ -111,7 +113,6 @@ typedef struct SJoinLogicNode { SNode* pMergeCondition; SNode* pOnConditions; bool isSingleTableJoin; - EOrder inputTsOrder; SNode* pColEqualOnConditions; } SJoinLogicNode; @@ -229,8 +230,6 @@ typedef struct SWindowLogicNode { int8_t igExpired; int8_t igCheckUpdate; EWindowAlgorithm windowAlgo; - EOrder inputTsOrder; - EOrder outputTsOrder; } SWindowLogicNode; typedef struct SFillLogicNode { @@ -241,7 +240,6 @@ typedef struct SFillLogicNode { SNode* pWStartTs; SNode* pValues; // SNodeListNode STimeWindow timeRange; - EOrder inputTsOrder; } SFillLogicNode; typedef struct SSortLogicNode { @@ -310,6 +308,8 @@ typedef struct SDataBlockDescNode { typedef struct SPhysiNode { ENodeType type; + EOrder inputTsOrder; + EOrder outputTsOrder; SDataBlockDescNode* pOutputDataBlockDesc; SNode* pConditions; SNodeList* pChildren; @@ -406,7 +406,6 @@ typedef struct SSortMergeJoinPhysiNode { SNode* pMergeCondition; SNode* pOnConditions; SNodeList* pTargets; - EOrder inputTsOrder; SNode* pColEqualOnConditions; } SSortMergeJoinPhysiNode; @@ -460,8 +459,6 @@ typedef struct SWindowPhysiNode { int64_t watermark; int64_t deleteMark; int8_t igExpired; - EOrder inputTsOrder; - EOrder outputTsOrder; bool mergeDataBlock; } SWindowPhysiNode; @@ -488,7 +485,6 @@ typedef struct SFillPhysiNode { SNode* pWStartTs; // SColumnNode SNode* pValues; // SNodeListNode STimeWindow timeRange; - EOrder inputTsOrder; } SFillPhysiNode; typedef SFillPhysiNode SStreamFillPhysiNode; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index f570698395d0d58f9c6c51b8c88ecfbb5ac491f0..678c694d9b8ed32a3049962a0b0c6ccd04be5f1b 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -69,6 +69,7 @@ typedef struct SColumnNode { uint64_t tableId; int8_t tableType; col_id_t colId; + uint16_t projIdx; // the idx in project list, start from 1 EColumnType colType; // column or tag bool hasIndex; char dbName[TSDB_DB_NAME_LEN]; diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index de3eba599da0ab436e0f3f2a166357b3c0b629ba..6a1091e658cdb297c7153357fa020880acb34c4e 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -281,7 +281,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t (_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID)) #define NEED_CLIENT_REFRESH_VG_ERROR(_code) \ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID) -#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER) +#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) \ + ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER) #define NEED_CLIENT_HANDLE_ERROR(_code) \ (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \ NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code)) diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py index 2edeeb6dbbb682bb06150e30803a7f05c170a5b1..96e2378fb343ef76bd2a167f6fbf95ecf7a88aff 100755 --- a/packaging/checkPackageRuning.py +++ b/packaging/checkPackageRuning.py @@ -42,8 +42,8 @@ else: # os.system("rm -rf /var/lib/taos/*") # os.system("systemctl restart taosd ") -# wait a moment ,at least 5 seconds -time.sleep(5) +# wait a moment ,at least 10 seconds +time.sleep(10) # prepare data by taosBenchmark diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 9f49cf345a4dd9e36f048f03bd49a28539baec66..07819159c4a531556072be150f91c4bd8a353954 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -124,12 +124,12 @@ if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/ ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so fi - if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then - cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/ - fi - if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then - cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/ - fi + # if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then + # cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/ + # fi + # if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then + # cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/ + # fi if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/ fi diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 52d5335003097787a9d607ed4f020eb9153ceb31..846d17e7f66bbe41a100e23abe35fd885d2d84dd 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -123,12 +123,12 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib ln -sf libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib/libjemalloc.so fi - if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then - cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib - fi - if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then - cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib - fi +# if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then +# cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib +# fi +# if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then +# cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib +# fi if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig fi diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 1b47b10520147664a3c2f3a558fed1208f84a2ca..f311714f3dd0c55cba3d7473521f2b2b560d23d7 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -315,13 +315,13 @@ function install_jemalloc() { ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + # fi + # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + # fi + if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 53b9c80f10082a1aa70dc6aec54ea54b25be66f1..8b845ca8f4d1fe6f0afcfac6e8f23326e30e4af0 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -214,13 +214,13 @@ function install_jemalloc() { ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + # fi + # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + # fi + if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 98c5245cd39367203d7a8d8387b56846d1e19083..c5c70e0aa28696732a47d5a75ad3d38c2e3a0ec3 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -241,10 +241,10 @@ function install_jemalloc() { ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so > /dev/null 2>&1 ${csudo}/usr/bin/install -c -d /usr/local/lib - [ -f ${binary_dir}/build/lib/libjemalloc.a ] && - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib - [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] && - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib + # [ -f ${binary_dir}/build/lib/libjemalloc.a ] && + # ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib + # [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] && + # ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc \ diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index c4b074f9037dd679babe1a2bddce2f7c4308f9ee..cd59294fe7d3ddb875a4b39955548b4838e03661 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -118,12 +118,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi + # if [ -f ${build_dir}/lib/libjemalloc.a ]; then + # cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + # fi + # if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + # cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + # fi if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index b0537e8bcfa7428cf22470982f7a0f8498e191e1..6c389502b76cfae68df9882f7ce4713e8c850c01 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -217,12 +217,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi + # if [ -f ${build_dir}/lib/libjemalloc.a ]; then + # cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + # fi + # if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + # cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + # fi if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig fi diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index fc392c9684c04ed6ce587d977ec4628df59cbe6f..e79a10c9e9a24014f84cf0c8d29be15cc143bb79 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -169,13 +169,13 @@ function install_jemalloc() { ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + # fi + # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + # fi + if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 7573fd59684900f797ee79c5b37a83087aae308a..63b16a30c5dfa5073202fb207d392f205a6f3389 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -1120,6 +1120,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { if (NEED_CLIENT_HANDLE_ERROR(code)) { tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId); + refreshMeta(pRequest->pTscObj, pRequest); pRequest->prevCode = code; doAsyncQuery(pRequest, true); return; diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 6d53f2b4c51f1845de00150d7bfa54eacc2e591b..d6fdb29b59d5cdf1c8f2c918a426faec19791a34 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -77,6 +77,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { } if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) { + tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer); setErrno(pRequest, code); tsem_post(&pRequest->body.rspSem); goto End; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index efa955ec84480f1e5906ff5f934a587200685fbe..503120fe85e93bac6bafdc95f0ed14bfa8094700 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1553,17 +1553,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char } } - char cTmp = 0; // for print tmp if is raw - if (info->isRawLine) { - cTmp = tmp[len]; - tmp[len] = '\0'; - } - uDebug("SML:0x%" PRIx64 " smlParseLine israw:%d, numLines:%d, protocol:%d, len:%d, sql:%s", info->id, - info->isRawLine, numLines, info->protocol, len, tmp); - if (info->isRawLine) { - tmp[len] = cTmp; - } + info->isRawLine, numLines, info->protocol, len, info->isRawLine ? "rawdata" : tmp); if (info->protocol == TSDB_SML_LINE_PROTOCOL) { if (info->dataFormat) { @@ -1584,8 +1575,7 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE; } if (code != TSDB_CODE_SUCCESS) { - tmp[len] = '\0'; - uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp); + uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, info->isRawLine ? "rawdata" : tmp); return code; } if (info->reRun) { diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 07c74927e148fce0663c377d053363fa69750460..7cd33955c19689eb64fb3d4efe2517c23916cf33 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -280,7 +280,7 @@ static const SSysDbTableSchema topicSchema[] = { {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, - {.name = "schema", .bytes = TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "schema", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "meta", .bytes = 4 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, }; diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 033fbb0ef164551f24e95be6e40b0844d763f2e9..0104384bd3cc89282f34ff3fa7409bc5f155f084 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -791,8 +791,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) { * @return */ size_t blockDataGetSerialMetaSize(uint32_t numOfCols) { - // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column - // length | + // | version | total length | total rows | total columns | flag seg| block group id | column schema + // | each column length | return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t); } @@ -1590,18 +1590,35 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int int32_t nRows = payloadSize / rowSize; ASSERT(nRows >= 1); - // the true value must be less than the value of nRows - int32_t additional = 0; + int32_t numVarCols = 0; + int32_t numFixCols = 0; for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); if (IS_VAR_DATA_TYPE(pCol->info.type)) { - additional += nRows * sizeof(int32_t); + ++numVarCols; + } else { + ++numFixCols; + } + } + + // find the data payload whose size is greater than payloadSize + int result = -1; + int start = 1; + int end = nRows; + while (start <= end) { + int mid = start + (end - start) / 2; + //data size + var data type columns offset + fixed data type columns bitmap len + int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); + if (midSize > payloadSize) { + result = mid; + end = mid - 1; } else { - additional += BitmapLen(nRows); + start = mid + 1; } } - int32_t newRows = (payloadSize - additional) / rowSize; + int32_t newRows = (result != -1) ? result - 1 : nRows; + // the true value must be less than the value of nRows ASSERT(newRows <= nRows && newRows >= 1); return newRows; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index a79351d5cc73ff423e7b1ab0295008b8a99daaed..948174b565f133ee160f1926c9c674f53539e0ae 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,6 +14,7 @@ */ #define _DEFAULT_SOURCE +#include "os.h" #include "tglobal.h" #include "tconfig.h" #include "tgrant.h" @@ -73,6 +74,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000; // mnode int64_t tsMndSdbWriteDelta = 200; int64_t tsMndLogRetention = 2000; +int8_t tsGrant = 1; bool tsMndSkipGrant = false; // monitor @@ -108,6 +110,7 @@ int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = false; bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -511,6 +514,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1; + if (cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, 0) != 0) return -1; if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, 0) != 0) return -1; @@ -871,6 +875,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval; tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; + tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32; tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; @@ -976,6 +981,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; } @@ -1525,3 +1532,5 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) { taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite); uInfo("all debug flag are set to %d", flag); } + +int8_t taosGranted() { return atomic_load_8(&tsGrant); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4cc6b34ca25904961dc18bbe79969d028a8b4f4b..e299b088d308fc55fc519bd9ee41f9819051c383 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -30,6 +30,9 @@ #include "tlog.h" +static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq); +static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq); + int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; @@ -1725,7 +1728,7 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq } else { pReq->unsafe = false; } - + tEndDecode(&decoder); tDecoderClear(&decoder); @@ -3161,7 +3164,7 @@ int32_t tSerializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeI32(&encoder, pReq->timestamp) < 0) return -1; + if (tEncodeI32(&encoder, pReq->timestampSec) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -3174,7 +3177,7 @@ int32_t tDeserializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableR tDecoderInit(&decoder, buf, bufLen); if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->timestamp) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->timestampSec) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); @@ -4671,7 +4674,7 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode if (tDecodeSReplica(&decoder, pReplica) < 0) return -1; } } - + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -6409,7 +6412,7 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1; if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI64(pCoder, pReq->uid) < 0) return -1; - if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1; + if (tEncodeI64(pCoder, pReq->btime) < 0) return -1; if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1; if (tEncodeI8(pCoder, pReq->type) < 0) return -1; if (tEncodeI32(pCoder, pReq->commentLen) < 0) return -1; @@ -6444,7 +6447,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) { if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1; if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1; - if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1; + if (tDecodeI64(pCoder, &pReq->btime) < 0) return -1; if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1; if (tDecodeI8(pCoder, &pReq->type) < 0) return -1; if (tDecodeI32(pCoder, &pReq->commentLen) < 0) return -1; @@ -6909,14 +6912,13 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) { default: break; } + if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1; tEndEncode(pEncoder); return 0; } -int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) { - if (tStartDecode(pDecoder) < 0) return -1; - +static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) { if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1; if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->colId) < 0) return -1; @@ -6960,6 +6962,28 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) { default: break; } + return 0; +} + +int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; + + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } + + tEndDecode(pDecoder); + return 0; +} + +int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; + + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; tEndDecode(pDecoder); return 0; @@ -7238,6 +7262,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) { if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1; if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1; + if (tEncodeI64(pCoder, pRes->ctimeMs) < 0) return -1; return 0; } @@ -7257,6 +7282,11 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) { if (tDecodeCStrTo(pCoder, pRes->tableFName) < 0) return -1; if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1; + + pRes->ctimeMs = 0; + if (!tDecodeIsEnd(pCoder)) { + if (tDecodeI64(pCoder, &pRes->ctimeMs) < 0) return -1; + } return 0; } @@ -7480,10 +7510,11 @@ int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq) SSingleDeleteReq *pOneReq = taosArrayGet(pReq->deleteReqs, i); if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1; } + if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1; return 0; } -int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { +static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq) { if (tDecodeI64(pDecoder, &pReq->suid) < 0) return -1; int32_t sz; if (tDecodeI32(pDecoder, &sz) < 0) return -1; @@ -7497,6 +7528,24 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { return 0; } +int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { + if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; + + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } + return 0; +} + +int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) { + if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; + + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + return 0; +} + static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubmitTbData) { if (tStartEncode(pCoder) < 0) return -1; @@ -7531,6 +7580,7 @@ static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubm pCoder->pos += rows[iRow]->len; } } + if (tEncodeI64(pCoder, pSubmitTbData->ctimeMs) < 0) return -1; tEndEncode(pCoder); return 0; @@ -7611,6 +7661,14 @@ static int32_t tDecodeSSubmitTbData(SDecoder *pCoder, SSubmitTbData *pSubmitTbDa } } + pSubmitTbData->ctimeMs = 0; + if (!tDecodeIsEnd(pCoder)) { + if (tDecodeI64(pCoder, &pSubmitTbData->ctimeMs) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto _exit; + } + } + tEndDecode(pCoder); _exit: diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 83fb331dbd5fc368f39c8ab866426200c99ab6bc..d1dc872f4b38a9fd9260579b4e71fa2c9b131db8 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -46,6 +46,7 @@ typedef struct { int32_t vgId; int32_t vgVersion; int8_t dropped; + int32_t toVgId; char path[PATH_MAX + 20]; } SWrapperCfg; @@ -55,6 +56,7 @@ typedef struct { int32_t refCount; int8_t dropped; int8_t disable; + int32_t toVgId; char *path; SVnode *pImpl; SMultiWorker pWriteW; @@ -70,6 +72,7 @@ typedef struct { int32_t vnodeNum; int32_t opened; int32_t failed; + bool updateVnodesList; int32_t threadIndex; TdThread thread; SVnodeMgmt *pMgmt; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index bf176ebb40574b5806fe67f5a0b6fb2519886d05..769b274f7fbc1b50c0c2131ad9e3a78151591869 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -71,6 +71,8 @@ static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg ** if (code < 0) goto _OVER; tjsonGetInt32ValueFromDouble(vnode, "vgVersion", pCfg->vgVersion, code); if (code < 0) goto _OVER; + tjsonGetInt32ValueFromDouble(vnode, "toVgId", pCfg->toVgId, code); + if (code < 0) goto _OVER; snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); } @@ -165,6 +167,7 @@ static int32_t vmEncodeVnodeList(SJson *pJson, SVnodeObj **ppVnodes, int32_t num if (tjsonAddDoubleToObject(vnode, "vgId", pVnode->vgId) < 0) return -1; if (tjsonAddDoubleToObject(vnode, "dropped", pVnode->dropped) < 0) return -1; if (tjsonAddDoubleToObject(vnode, "vgVersion", pVnode->vgVersion) < 0) return -1; + if (pVnode->toVgId && tjsonAddDoubleToObject(vnode, "toVgId", pVnode->toVgId) < 0) return -1; if (tjsonAddItemToArray(vnodes, vnode) < 0) return -1; } @@ -179,7 +182,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { SVnodeObj **ppVnodes = NULL; char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP); + snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP); snprintf(realfile, sizeof(realfile), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); int32_t numOfVnodes = 0; @@ -226,4 +229,4 @@ _OVER: dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, terrstr(), numOfVnodes); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 814a155cfba6d11d4dff61935a0cd8b0898d765c..94b804290a18864f170235ec3b51268f0039681e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -265,6 +265,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId); +#if 0 if (pMgmt->pTfs) { if (tfsDirExistAt(pMgmt->pTfs, path, (SDiskID){0})) { terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST; @@ -278,8 +279,9 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } } +#endif -if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) { + if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) { tFreeSCreateVnodeReq(&req); dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr()); code = terrno; @@ -482,10 +484,18 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { int32_t srcVgId = req.srcVgId; int32_t dstVgId = req.dstVgId; + + SVnodeObj *pVnode = vmAcquireVnode(pMgmt, dstVgId); + if (pVnode != NULL) { + dError("vgId:%d, vnode already exist", dstVgId); + vmReleaseVnode(pMgmt, pVnode); + terrno = TSDB_CODE_VND_ALREADY_EXIST; + return -1; + } + dInfo("vgId:%d, start to alter vnode hashrange:[%u, %u], dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd, req.dstVgId); - - SVnodeObj *pVnode = vmAcquireVnode(pMgmt, srcVgId); + pVnode = vmAcquireVnode(pMgmt, srcVgId); if (pVnode == NULL) { dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr()); terrno = TSDB_CODE_VND_NOT_EXIST; @@ -499,6 +509,13 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { }; tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); + // prepare alter + pVnode->toVgId = dstVgId; + if (vmWriteVnodeListToFile(pMgmt) != 0) { + dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr()); + return -1; + } + dInfo("vgId:%d, close vnode", srcVgId); vmCloseVnode(pMgmt, pVnode, true); @@ -530,6 +547,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } + // complete alter if (vmWriteVnodeListToFile(pMgmt) != 0) { dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr()); return -1; @@ -712,6 +730,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_ADD_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_DEL_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME_PUSH, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_VG_WALINFO, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 16e7ffc5367c857dad1414c788dc46d9f6263b42..db46ce3ca05ca261b5a396fa647eae7c1ac8a398 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -158,6 +158,28 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) taosMemoryFree(pVnode); } +static int32_t vmRestoreVgroupId(SWrapperCfg *pCfg, STfs *pTfs) { + int32_t srcVgId = pCfg->vgId; + int32_t dstVgId = pCfg->toVgId; + if (dstVgId == 0) return 0; + + char srcPath[TSDB_FILENAME_LEN]; + char dstPath[TSDB_FILENAME_LEN]; + + snprintf(srcPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, srcVgId); + snprintf(dstPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, dstVgId); + + int32_t vgId = vnodeRestoreVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs); + if (vgId <= 0) { + dError("vgId:%d, failed to restore vgroup id. srcPath: %s", pCfg->vgId, srcPath); + return -1; + } + + pCfg->vgId = vgId; + pCfg->toVgId = 0; + return 0; +} + static void *vmOpenVnodeInThread(void *param) { SVnodeThread *pThread = param; SVnodeMgmt *pMgmt = pThread->pMgmt; @@ -174,17 +196,33 @@ static void *vmOpenVnodeInThread(void *param) { pMgmt->state.openVnodes, pMgmt->state.totalVnodes); tmsgReportStartup("vnode-open", stepDesc); + if (pCfg->toVgId) { + if (vmRestoreVgroupId(pCfg, pMgmt->pTfs) != 0) { + dError("vgId:%d, failed to restore vgroup id by thread:%d", pCfg->vgId, pThread->threadIndex); + pThread->failed++; + continue; + } + pThread->updateVnodesList = true; + } + snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId); + SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb); if (pImpl == NULL) { dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex); pThread->failed++; - } else { - vmOpenVnode(pMgmt, pCfg, pImpl); - dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex); - pThread->opened++; - atomic_add_fetch_32(&pMgmt->state.openVnodes, 1); + continue; } + + if (vmOpenVnode(pMgmt, pCfg, pImpl) != 0) { + dError("vgId:%d, failed to open vnode by thread:%d", pCfg->vgId, pThread->threadIndex); + pThread->failed++; + continue; + } + + dInfo("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex); + pThread->opened++; + atomic_add_fetch_32(&pMgmt->state.openVnodes, 1); } dInfo("thread:%d, numOfVnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, @@ -242,6 +280,8 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { taosThreadAttrDestroy(&thAttr); } + bool updateVnodesList = false; + for (int32_t t = 0; t < threadNum; ++t) { SVnodeThread *pThread = &threads[t]; if (pThread->vnodeNum > 0 && taosCheckPthreadValid(pThread->thread)) { @@ -249,6 +289,7 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { taosThreadClear(&pThread->thread); } taosMemoryFree(pThread->pCfgs); + if (pThread->updateVnodesList) updateVnodesList = true; } taosMemoryFree(threads); taosMemoryFree(pCfgs); @@ -256,10 +297,15 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { if (pMgmt->state.openVnodes != pMgmt->state.totalVnodes) { dError("there are total vnodes:%d, opened:%d", pMgmt->state.totalVnodes, pMgmt->state.openVnodes); return -1; - } else { - dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes); - return 0; } + + if (updateVnodesList && vmWriteVnodeListToFile(pMgmt) != 0) { + dError("failed to write vnode list since %s", terrstr()); + return -1; + } + + dInfo("successfully opened %d vnodes", pMgmt->state.totalVnodes); + return 0; } static void *vmCloseVnodeInThread(void *param) { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index e41c9e10d723ba895c13426f0c4a4cc2423bbe51..76e2f930273ec4558a1c11ffb5b75e9ba3a60505 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -218,6 +218,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) { dDebug("vgId:%d, msg:%p put into vnode-write queue failed since its disable", pVnode->vgId, pMsg); terrno = TSDB_CODE_VND_STOPPED; + code = terrno; break; } dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 7ecf2fd234b4bc8589d2f395dfd3826d89d2f4e2..ea46b7069385fba0c204c5e78f9f7e7983053d85 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -23,10 +23,6 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { SEpSet epSet = {0}; dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); - if (epSet.numOfEps == 1) { - return; - } - const int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); pMsg->pCont = rpcMallocCont(contLen); if (pMsg->pCont == NULL) { diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 98ef8cd95ba9c201ec23ff9fd8515948016eec4f..85057e59167389bdda75a6c713a204af1762593b 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -105,6 +105,7 @@ typedef struct { SHashObj *dnodeHash; TdThreadRwlock lock; SMsgCb msgCb; + bool validMnodeEps; } SDnodeData; typedef struct { diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 45cc4bb7112fec0c06899a2426b774c5984af268..1564a090357717b4733a5c557fd2c1c248f753ba 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -288,6 +288,8 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) { taosHashPut(pData->dnodeHash, &pDnodeEp->id, sizeof(int32_t), pDnodeEp, sizeof(SDnodeEp)); } + pData->validMnodeEps = true; + dmPrintEps(pData); } @@ -348,6 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { + if(!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 460e75b42235d243f85996facef5919dd14290e9..06bb46772aee20a7049c5ce47e1b54756911f14c 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -227,6 +227,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) { + mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version); terrno = code; goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 5fa53dd1eeb8097d24e6bb0543c9230fec3368d0..162e75d783112b86196f6261f023d8dcb15e5f84 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -888,7 +888,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; void *pIter = NULL; - SVDropTtlTableReq ttlReq = {.timestamp = taosGetTimestampSec()}; + SVDropTtlTableReq ttlReq = {.timestampSec = taosGetTimestampSec()}; int32_t reqLen = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq); int32_t contLen = reqLen + sizeof(SMsgHead); @@ -914,7 +914,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) { if (code != 0) { mError("vgId:%d, failed to send drop ttl table request to vnode since 0x%x", pVgroup->vgId, code); } else { - mInfo("vgId:%d, send drop ttl table request to vnode, time:%d", pVgroup->vgId, ttlReq.timestamp); + mInfo("vgId:%d, send drop ttl table request to vnode, time:%" PRId32, pVgroup->vgId, ttlReq.timestampSec); } sdbRelease(pSdb, pVgroup); } @@ -1188,7 +1188,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p if (mndAllocStbSchemas(pOld, pNew) != 0) { return -1; } - + if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){ terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; @@ -3159,8 +3159,14 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SSdb *pSdb = pMnode->pSdb; SStbObj *pStb = NULL; - int32_t numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); - mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db); + + int32_t numOfRows = 0; + if (!pShow->sysDbRsp) { + numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); + mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db); + pShow->sysDbRsp = true; + } + SDbObj *pDb = NULL; if (strlen(pShow->db) > 0) { pDb = mndAcquireDb(pMnode, pShow->db); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 91bcbf5f4ec38951d8c44b38c56df9ecff259050..d3cb19231e565df33ffab44d6b749cd0b9416fe7 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -912,12 +912,14 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)&pTopic->createTime, false); - char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; + char *sql = taosMemoryMalloc(strlen(pTopic->sql) + VARSTR_HEADER_SIZE); STR_TO_VARSTR(sql, pTopic->sql); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)sql, false); + taosMemoryFree(sql); + char *schemaJson = taosMemoryMalloc(TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE); if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){ schemaToJson(pTopic->schema.pSchema, pTopic->schema.nCols, schemaJson); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 36e8755a3efe37c05e74ad25e798653ec55ae9b8..fe588b8f3b10b4110a5c8c843aaab658bee8b175 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1217,6 +1217,7 @@ static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, i action.pCont = pReq; action.contLen = contLen; action.msgType = TDMT_VND_ALTER_HASHRANGE; + action.acceptableCode = TSDB_CODE_VND_ALREADY_EXIST; if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(pReq); diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index b7bfc57cd5e720a918c5a5613c5cd8d03be4a028..9f828d00f5f8ad129e6f768ecce9a37091b11d22 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -27,6 +27,7 @@ target_sources( "src/meta/metaEntry.c" "src/meta/metaSnapshot.c" "src/meta/metaCache.c" + "src/meta/metaTtl.c" # sma "src/sma/smaEnv.c" @@ -80,6 +81,7 @@ IF (TD_VNODE_PLUGINS) ) ENDIF () +IF (NOT ${TD_LINUX}) target_include_directories( vnode PUBLIC "inc" @@ -87,7 +89,25 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" ) +ELSE() +target_include_directories( + vnode + PUBLIC "inc" + PUBLIC "src/inc" + PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" +) +ENDIF (NOT ${TD_LINUX}) + IF (TD_LINUX) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + + target_link_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) target_link_libraries( vnode PUBLIC os diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 7e19425d56492018577aa733920906874931ebfe..e84211c7657b4f50bf0d228c5f85376c40635984 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -54,6 +54,7 @@ void vnodeCleanup(); int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs); int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs); int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnodeHashRangeReq *pReq, STfs *pTfs); +int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs); void vnodeDestroy(const char *path, STfs *pTfs); SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb); void vnodePreClose(SVnode *pVnode); @@ -114,6 +115,7 @@ int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName); int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid); int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType); +int metaGetTableTtlByUid(void *meta, uint64_t uid, int64_t *ttlDays); bool metaIsTableExist(void* pVnode, tb_uid_t uid); int32_t metaGetCachedTableUidList(void *pVnode, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList, bool *acquired); diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index b39008147b2d1284a9e6d7562c2463874854ce94..4f25bf31a210863aa75877b87894a8f9159dfa7e 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -17,6 +17,7 @@ #define _TD_VNODE_META_H_ #include "index.h" +#include "metaTtl.h" #include "vnodeInt.h" #ifdef __cplusplus @@ -89,10 +90,10 @@ struct SMeta { // ivt idx and idx void* pTagIvtIdx; - TTB* pTagIdx; - TTB* pTtlIdx; + TTB* pTagIdx; + STtlManger* pTtlMgr; - TTB* pCtimeIdx; // table created time idx + TTB* pBtimeIdx; // table created time idx TTB* pNcolIdx; // ncol of table idx, normal table only TTB* pSmaIdx; @@ -138,20 +139,15 @@ typedef struct { } STagIdxKey; #pragma pack(pop) -typedef struct { - int64_t dtime; - tb_uid_t uid; -} STtlIdxKey; - typedef struct { tb_uid_t uid; int64_t smaUid; } SSmaIdxKey; typedef struct { - int64_t ctime; + int64_t btime; tb_uid_t uid; -} SCtimeIdxKey; +} SBtimeIdxKey; typedef struct { int64_t ncol; diff --git a/source/dnode/vnode/src/inc/metaTtl.h b/source/dnode/vnode/src/inc/metaTtl.h new file mode 100644 index 0000000000000000000000000000000000000000..bf3b897c6f7771b0877a08a440b442f3ee247f84 --- /dev/null +++ b/source/dnode/vnode/src/inc/metaTtl.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VNODE_TTL_H_ +#define _TD_VNODE_TTL_H_ + +#include "taosdef.h" +#include "thash.h" + +#include "tdb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum DirtyEntryType { + ENTRY_TYPE_DEL = 1, + ENTRY_TYPE_UPSERT = 2, +} DirtyEntryType; + +typedef struct STtlManger { + TdThreadRwlock lock; + + TTB* pOldTtlIdx; // btree<{deleteTime, tuid}, NULL> + + SHashObj* pTtlCache; // key: tuid, value: {ttl, ctime} + SHashObj* pDirtyUids; // dirty tuid + TTB* pTtlIdx; // btree<{deleteTime, tuid}, ttl> +} STtlManger; + +typedef struct { + int64_t ttlDays; + int64_t changeTimeMs; +} STtlCacheEntry; + +typedef struct { + DirtyEntryType type; +} STtlDirtyEntry; + +typedef struct { + int64_t deleteTimeSec; + tb_uid_t uid; +} STtlIdxKey; + +typedef struct { + int64_t deleteTimeMs; + tb_uid_t uid; +} STtlIdxKeyV1; + +typedef struct { + int64_t ttlDays; +} STtlIdxValue; + +typedef struct { + tb_uid_t uid; + int64_t changeTimeMs; +} STtlUpdCtimeCtx; + +typedef struct { + tb_uid_t uid; + int64_t changeTimeMs; + int64_t ttlDays; +} STtlUpdTtlCtx; + +typedef struct { + tb_uid_t uid; + TXN* pTxn; +} STtlDelTtlCtx; + +int ttlMgrOpen(STtlManger** ppTtlMgr, TDB* pEnv, int8_t rollback); +int ttlMgrClose(STtlManger* pTtlMgr); +int ttlMgrBegin(STtlManger* pTtlMgr, void* pMeta); + +int ttlMgrConvert(TTB* pOldTtlIdx, TTB* pNewTtlIdx, void* pMeta); +int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn); + +int ttlMgrInsertTtl(STtlManger* pTtlMgr, const STtlUpdTtlCtx* pUpdCtx); +int ttlMgrDeleteTtl(STtlManger* pTtlMgr, const STtlDelTtlCtx* pDelCtx); +int ttlMgrUpdateChangeTime(STtlManger* pTtlMgr, const STtlUpdCtimeCtx* pUpdCtimeCtx); +int ttlMgrFindExpired(STtlManger* pTtlMgr, int64_t timePointMs, SArray* pTbUids); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VNODE_TTL_H_*/ diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 2811fc35b0af49a423679953a2df42b2d1087e0e..a9541d8c472ecd5403206687be8a6434a39a5dca 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -149,8 +149,9 @@ int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp); int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid); int32_t metaTrimTables(SMeta* pMeta); -int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids); +int metaTtlDropTable(SMeta* pMeta, int64_t timePointMs, SArray* tbUids); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); +int metaUpdateChangeTime(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs); SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); int32_t metaGetTbTSchemaEx(SMeta* pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sver, STSchema** ppTSchema); @@ -176,7 +177,6 @@ SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid); SArray* metaGetSmaTbUids(SMeta* pMeta); void* metaGetIdx(SMeta* pMeta); void* metaGetIvtIdx(SMeta* pMeta); -int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList); void metaReaderInit(SMetaReader* pReader, SMeta* pMeta, int32_t flags); @@ -229,6 +229,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgL int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen); int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg); +int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg); // tq-stream diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index f597c100d0588dae9123af444dc442c0b7de2c2b..1fa5b9c1e9e98434c3c0769c5005c1a11902442a 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -40,6 +40,12 @@ int metaBegin(SMeta *pMeta, int8_t heap) { return -1; } + if (ttlMgrBegin(pMeta->pTtlMgr, pMeta) < 0) { + return -1; + } + + tdbCommit(pMeta->pEnv, pMeta->txn); + return 0; } @@ -50,6 +56,7 @@ int metaFinishCommit(SMeta *pMeta, TXN *txn) { return tdbPostCommit(pMeta->pEnv int metaPrepareAsyncCommit(SMeta *pMeta) { // return tdbPrepareAsyncCommit(pMeta->pEnv, pMeta->txn); int code = 0; + code = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn); code = tdbCommit(pMeta->pEnv, pMeta->txn); return code; diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c index e50931ac0618252eea58afc0d1a734d252a05d1e..01877a523a942c9a3e5bcf889046358334ef9621 100644 --- a/source/dnode/vnode/src/meta/metaEntry.c +++ b/source/dnode/vnode/src/meta/metaEntry.c @@ -31,7 +31,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { if (tEncodeSRSmaParam(pCoder, &pME->stbEntry.rsmaParam) < 0) return -1; } } else if (pME->type == TSDB_CHILD_TABLE) { - if (tEncodeI64(pCoder, pME->ctbEntry.ctime) < 0) return -1; + if (tEncodeI64(pCoder, pME->ctbEntry.btime) < 0) return -1; if (tEncodeI32(pCoder, pME->ctbEntry.ttlDays) < 0) return -1; if (tEncodeI32v(pCoder, pME->ctbEntry.commentLen) < 0) return -1; if (pME->ctbEntry.commentLen > 0) { @@ -40,7 +40,7 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { if (tEncodeI64(pCoder, pME->ctbEntry.suid) < 0) return -1; if (tEncodeTag(pCoder, (const STag *)pME->ctbEntry.pTags) < 0) return -1; } else if (pME->type == TSDB_NORMAL_TABLE) { - if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1; + if (tEncodeI64(pCoder, pME->ntbEntry.btime) < 0) return -1; if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1; if (tEncodeI32v(pCoder, pME->ntbEntry.commentLen) < 0) return -1; if (pME->ntbEntry.commentLen > 0) { @@ -76,7 +76,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { if (tDecodeSRSmaParam(pCoder, &pME->stbEntry.rsmaParam) < 0) return -1; } } else if (pME->type == TSDB_CHILD_TABLE) { - if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1; + if (tDecodeI64(pCoder, &pME->ctbEntry.btime) < 0) return -1; if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1; if (tDecodeI32v(pCoder, &pME->ctbEntry.commentLen) < 0) return -1; if (pME->ctbEntry.commentLen > 0) { @@ -85,7 +85,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { if (tDecodeI64(pCoder, &pME->ctbEntry.suid) < 0) return -1; if (tDecodeTag(pCoder, (STag **)&pME->ctbEntry.pTags) < 0) return -1; // (TODO) } else if (pME->type == TSDB_NORMAL_TABLE) { - if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1; + if (tDecodeI64(pCoder, &pME->ntbEntry.btime) < 0) return -1; if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1; if (tDecodeI32v(pCoder, &pME->ntbEntry.commentLen) < 0) return -1; if (pME->ntbEntry.commentLen > 0) { diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 1d0b11e26a58d047216196bf3fed7b8f13807922..fb17aff31889568a45ed41d8929f0072884accf7 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -19,12 +19,11 @@ static int tbDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen static int skmDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int ctbIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); -static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int uidIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int taskIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); -static int ctimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); +static int btimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int ncolIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); } @@ -128,8 +127,8 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { goto _err; } - // open pTtlIdx - ret = tdbTbOpen("ttl.idx", sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pMeta->pEnv, &pMeta->pTtlIdx, 0); + // open pTtlMgr ("ttlv1.idx") + ret = ttlMgrOpen(&pMeta->pTtlMgr, pMeta->pEnv, 0); if (ret < 0) { metaError("vgId:%d, failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; @@ -143,7 +142,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { } // idx table create time - ret = tdbTbOpen("ctime.idx", sizeof(SCtimeIdxKey), 0, ctimeIdxCmpr, pMeta->pEnv, &pMeta->pCtimeIdx, 0); + ret = tdbTbOpen("ctime.idx", sizeof(SBtimeIdxKey), 0, btimeIdxCmpr, pMeta->pEnv, &pMeta->pBtimeIdx, 0); if (ret < 0) { metaError("vgId:%d, failed to open meta ctime index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; @@ -184,9 +183,9 @@ _err: if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx); - if (pMeta->pCtimeIdx) tdbTbClose(pMeta->pCtimeIdx); + if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); - if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); + if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr); if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx); if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx); @@ -209,9 +208,9 @@ int metaClose(SMeta **ppMeta) { if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx); - if (pMeta->pCtimeIdx) tdbTbClose(pMeta->pCtimeIdx); + if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); - if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); + if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr); if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx); if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx); @@ -399,37 +398,18 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL return 0; } -static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { - STtlIdxKey *pTtlIdxKey1 = (STtlIdxKey *)pKey1; - STtlIdxKey *pTtlIdxKey2 = (STtlIdxKey *)pKey2; - - if (pTtlIdxKey1->dtime > pTtlIdxKey2->dtime) { - return 1; - } else if (pTtlIdxKey1->dtime < pTtlIdxKey2->dtime) { - return -1; - } - - if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) { - return 1; - } else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) { - return -1; - } - - return 0; -} - -static int ctimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { - SCtimeIdxKey *pCtimeIdxKey1 = (SCtimeIdxKey *)pKey1; - SCtimeIdxKey *pCtimeIdxKey2 = (SCtimeIdxKey *)pKey2; - if (pCtimeIdxKey1->ctime > pCtimeIdxKey2->ctime) { +static int btimeIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { + SBtimeIdxKey *pBtimeIdxKey1 = (SBtimeIdxKey *)pKey1; + SBtimeIdxKey *pBtimeIdxKey2 = (SBtimeIdxKey *)pKey2; + if (pBtimeIdxKey1->btime > pBtimeIdxKey2->btime) { return 1; - } else if (pCtimeIdxKey1->ctime < pCtimeIdxKey2->ctime) { + } else if (pBtimeIdxKey1->btime < pBtimeIdxKey2->btime) { return -1; } - if (pCtimeIdxKey1->uid > pCtimeIdxKey2->uid) { + if (pBtimeIdxKey1->uid > pBtimeIdxKey2->uid) { return 1; - } else if (pCtimeIdxKey1->uid < pCtimeIdxKey2->uid) { + } else if (pBtimeIdxKey1->uid < pBtimeIdxKey2->uid) { return -1; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 29fe89c3f29716bf49940b6bffe061e798a608b1..0e380ea0b2f42614d873a7c41cd32645da35ce84 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -212,6 +212,29 @@ int metaReadNext(SMetaReader *pReader) { return 0; } +int metaGetTableTtlByUid(void *meta, uint64_t uid, int64_t *ttlDays) { + int code = -1; + SMetaReader mr = {0}; + metaReaderInit(&mr, (SMeta *)meta, 0); + code = metaReaderGetTableEntryByUid(&mr, uid); + if (code < 0) { + goto _exit; + } + if (mr.me.type == TSDB_CHILD_TABLE) { + *ttlDays = mr.me.ctbEntry.ttlDays; + } else if (mr.me.type == TSDB_NORMAL_TABLE) { + *ttlDays = mr.me.ntbEntry.ttlDays; + } else { + goto _exit; + } + + code = 0; + +_exit: + metaReaderClear(&mr); + return code; +} + #if 1 // =================================================== SMTbCursor *metaOpenTbCursor(void *pVnode) { SMTbCursor *pTbCur = NULL; @@ -387,37 +410,6 @@ _err: return NULL; } -int metaTtlSmaller(SMeta *pMeta, uint64_t ttl, SArray *uidList) { - TBC *pCur; - int ret = tdbTbcOpen(pMeta->pTtlIdx, &pCur, NULL); - if (ret < 0) { - return ret; - } - - STtlIdxKey ttlKey = {0}; - ttlKey.dtime = ttl; - ttlKey.uid = INT64_MAX; - int c = 0; - tdbTbcMoveTo(pCur, &ttlKey, sizeof(ttlKey), &c); - if (c < 0) { - tdbTbcMoveToPrev(pCur); - } - - void *pKey = NULL; - int kLen = 0; - while (1) { - ret = tdbTbcPrev(pCur, &pKey, &kLen, NULL, NULL); - if (ret < 0) { - break; - } - ttlKey = *(STtlIdxKey *)pKey; - taosArrayPush(uidList, &ttlKey.uid); - } - tdbFree(pKey); - tdbTbcClose(pCur); - return 0; -} - struct SMCtbCursor { SMeta *pMeta; TBC *pCur; @@ -1018,17 +1010,17 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) { pCursor->type = param->type; metaRLock(pMeta); - ret = tdbTbcOpen(pMeta->pCtimeIdx, &pCursor->pCur, NULL); + ret = tdbTbcOpen(pMeta->pBtimeIdx, &pCursor->pCur, NULL); if (ret != 0) { goto END; } int64_t uidLimit = param->reverse ? INT64_MAX : 0; - SCtimeIdxKey ctimeKey = {.ctime = *(int64_t *)(param->val), .uid = uidLimit}; - SCtimeIdxKey *pCtimeKey = &ctimeKey; + SBtimeIdxKey btimeKey = {.btime = *(int64_t *)(param->val), .uid = uidLimit}; + SBtimeIdxKey *pBtimeKey = &btimeKey; int cmp = 0; - if (tdbTbcMoveTo(pCursor->pCur, &ctimeKey, sizeof(ctimeKey), &cmp) < 0) { + if (tdbTbcMoveTo(pCursor->pCur, &btimeKey, sizeof(btimeKey), &cmp) < 0) { goto END; } @@ -1042,10 +1034,10 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) { valid = tdbTbcGet(pCursor->pCur, (const void **)&entryKey, &nEntryKey, NULL, NULL); if (valid < 0) break; - SCtimeIdxKey *p = entryKey; + SBtimeIdxKey *p = entryKey; if (count > TRY_ERROR_LIMIT) break; - int32_t cmp = (*param->filterFunc)((void *)&p->ctime, (void *)&pCtimeKey->ctime, param->type); + int32_t cmp = (*param->filterFunc)((void *)&p->btime, (void *)&pBtimeKey->btime, param->type); if (cmp == 0) taosArrayPush(pUids, &p->uid); else { @@ -1149,7 +1141,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) { pCursor->type = param->type; metaRLock(pMeta); - ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); + //ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); END: if (pCursor->pMeta) metaULock(pCursor->pMeta); diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 0d0716f2f0f705f3144bf49a8ae4659a9377504b..424eac13b013efe0ce6067f82a30ed4d238e4e16 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -20,7 +20,7 @@ static int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, con static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME); -static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME); +static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME); static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME); @@ -28,8 +28,8 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry); static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type); static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey); // opt ins_tables query -static int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME); -static int metaDeleteCtimeIdx(SMeta *pMeta, const SMetaEntry *pME); +static int metaUpdateBtimeIdx(SMeta *pMeta, const SMetaEntry *pME); +static int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaDeleteNcolIdx(SMeta *pMeta, const SMetaEntry *pME); @@ -734,7 +734,7 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs me.uid = pReq->uid; me.name = pReq->name; if (me.type == TSDB_CHILD_TABLE) { - me.ctbEntry.ctime = pReq->ctime; + me.ctbEntry.btime = pReq->btime; me.ctbEntry.ttlDays = pReq->ttl; me.ctbEntry.commentLen = pReq->commentLen; me.ctbEntry.comment = pReq->comment; @@ -770,7 +770,7 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs metaTbGroupCacheClear(pMeta, me.ctbEntry.suid); metaULock(pMeta); } else { - me.ntbEntry.ctime = pReq->ctime; + me.ntbEntry.btime = pReq->btime; me.ntbEntry.ttlDays = pReq->ttl; me.ntbEntry.commentLen = pReq->commentLen; me.ntbEntry.comment = pReq->comment; @@ -923,50 +923,40 @@ end: return code; } -int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) { - int ret = metaTtlSmaller(pMeta, ttl, tbUids); +int metaTtlDropTable(SMeta *pMeta, int64_t timePointMs, SArray *tbUids) { + int ret = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn); if (ret != 0) { + metaError("ttl failed to flush, ret:%d", ret); + return ret; + } + + ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids); + if (ret != 0) { + metaError("ttl failed to find expired table, ret:%d", ret); return ret; } if (TARRAY_SIZE(tbUids) == 0) { return 0; } + metaInfo("ttl find expired table count: %zu" , TARRAY_SIZE(tbUids)); + metaDropTables(pMeta, tbUids); return 0; } -static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME) { - int64_t ttlDays = 0; - int64_t ctime = 0; +static int metaBuildBtimeIdxKey(SBtimeIdxKey *btimeKey, const SMetaEntry *pME) { + int64_t btime; if (pME->type == TSDB_CHILD_TABLE) { - ctime = pME->ctbEntry.ctime; - ttlDays = pME->ctbEntry.ttlDays; + btime = pME->ctbEntry.btime; } else if (pME->type == TSDB_NORMAL_TABLE) { - ctime = pME->ntbEntry.ctime; - ttlDays = pME->ntbEntry.ttlDays; - } else { - metaError("meta/table: invalide table type: %" PRId8 " build ttl idx key failed.", pME->type); - return; - } - - if (ttlDays <= 0) return; - - ttlKey->dtime = ctime / 1000 + ttlDays * tsTtlUnit; - ttlKey->uid = pME->uid; -} -static int metaBuildCtimeIdxKey(SCtimeIdxKey *ctimeKey, const SMetaEntry *pME) { - int64_t ctime; - if (pME->type == TSDB_CHILD_TABLE) { - ctime = pME->ctbEntry.ctime; - } else if (pME->type == TSDB_NORMAL_TABLE) { - ctime = pME->ntbEntry.ctime; + btime = pME->ntbEntry.btime; } else { return -1; } - ctimeKey->ctime = ctime; - ctimeKey->uid = pME->uid; + btimeKey->btime = btime; + btimeKey->uid = pME->uid; return 0; } @@ -980,11 +970,9 @@ static int metaBuildNColIdxKey(SNcolIdxKey *ncolKey, const SMetaEntry *pME) { return 0; } -static int metaDeleteTtlIdx(SMeta *pMeta, const SMetaEntry *pME) { - STtlIdxKey ttlKey = {0}; - metaBuildTtlIdxKey(&ttlKey, pME); - if (ttlKey.dtime == 0) return 0; - return tdbTbDelete(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), pMeta->txn); +static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) { + STtlDelTtlCtx ctx = {.uid = pME->uid, .pTxn = pMeta->txn}; + return ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx); } static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { @@ -1066,10 +1054,10 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { tdbTbDelete(pMeta->pNameIdx, e.name, strlen(e.name) + 1, pMeta->txn); tdbTbDelete(pMeta->pUidIdx, &uid, sizeof(uid), pMeta->txn); - if (e.type == TSDB_CHILD_TABLE || e.type == TSDB_NORMAL_TABLE) metaDeleteCtimeIdx(pMeta, &e); + if (e.type == TSDB_CHILD_TABLE || e.type == TSDB_NORMAL_TABLE) metaDeleteBtimeIdx(pMeta, &e); if (e.type == TSDB_NORMAL_TABLE) metaDeleteNcolIdx(pMeta, &e); - if (e.type != TSDB_SUPER_TABLE) metaDeleteTtlIdx(pMeta, &e); + if (e.type != TSDB_SUPER_TABLE) metaDeleteTtl(pMeta, &e); if (e.type == TSDB_CHILD_TABLE) { tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), pMeta->txn); @@ -1102,23 +1090,23 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { return 0; } // opt ins_tables -int metaUpdateCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) { - SCtimeIdxKey ctimeKey = {0}; - if (metaBuildCtimeIdxKey(&ctimeKey, pME) < 0) { +int metaUpdateBtimeIdx(SMeta *pMeta, const SMetaEntry *pME) { + SBtimeIdxKey btimeKey = {0}; + if (metaBuildBtimeIdxKey(&btimeKey, pME) < 0) { return 0; } - metaTrace("vgId:%d, start to save version:%" PRId64 " uid:%" PRId64 " ctime:%" PRId64, TD_VID(pMeta->pVnode), - pME->version, pME->uid, ctimeKey.ctime); + metaTrace("vgId:%d, start to save version:%" PRId64 " uid:%" PRId64 " btime:%" PRId64, TD_VID(pMeta->pVnode), + pME->version, pME->uid, btimeKey.btime); - return tdbTbUpsert(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), NULL, 0, pMeta->txn); + return tdbTbUpsert(pMeta->pBtimeIdx, &btimeKey, sizeof(btimeKey), NULL, 0, pMeta->txn); } -int metaDeleteCtimeIdx(SMeta *pMeta, const SMetaEntry *pME) { - SCtimeIdxKey ctimeKey = {0}; - if (metaBuildCtimeIdxKey(&ctimeKey, pME) < 0) { +int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME) { + SBtimeIdxKey btimeKey = {0}; + if (metaBuildBtimeIdxKey(&btimeKey, pME) < 0) { return 0; } - return tdbTbDelete(pMeta->pCtimeIdx, &ctimeKey, sizeof(ctimeKey), pMeta->txn); + return tdbTbDelete(pMeta->pBtimeIdx, &btimeKey, sizeof(btimeKey), pMeta->txn); } int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME) { SNcolIdxKey ncolKey = {0}; @@ -1328,6 +1316,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl metaULock(pMeta); + metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs); + metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp); if (entry.pBuf) taosMemoryFree(entry.pBuf); @@ -1515,6 +1505,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaULock(pMeta); + metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs); + tDecoderClear(&dc1); tDecoderClear(&dc2); taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); @@ -1603,9 +1595,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p // build SMetaEntry if (entry.type == TSDB_CHILD_TABLE) { if (pAlterTbReq->updateTTL) { - metaDeleteTtlIdx(pMeta, &entry); + metaDeleteTtl(pMeta, &entry); entry.ctbEntry.ttlDays = pAlterTbReq->newTTL; - metaUpdateTtlIdx(pMeta, &entry); + metaUpdateTtl(pMeta, &entry); } if (pAlterTbReq->newCommentLen >= 0) { entry.ctbEntry.commentLen = pAlterTbReq->newCommentLen; @@ -1613,9 +1605,9 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p } } else { if (pAlterTbReq->updateTTL) { - metaDeleteTtlIdx(pMeta, &entry); + metaDeleteTtl(pMeta, &entry); entry.ntbEntry.ttlDays = pAlterTbReq->newTTL; - metaUpdateTtlIdx(pMeta, &entry); + metaUpdateTtl(pMeta, &entry); } if (pAlterTbReq->newCommentLen >= 0) { entry.ntbEntry.commentLen = pAlterTbReq->newCommentLen; @@ -1628,6 +1620,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p metaUpdateUidIdx(pMeta, &entry); metaULock(pMeta); + metaUpdateChangeTime(pMeta, entry.uid, pAlterTbReq->ctimeMs); + tdbTbcClose(pTbDbc); tdbTbcClose(pUidIdxc); tDecoderClear(&dc); @@ -1967,11 +1961,28 @@ static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) { return tdbTbUpsert(pMeta->pNameIdx, pME->name, strlen(pME->name) + 1, &pME->uid, sizeof(tb_uid_t), pMeta->txn); } -static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) { - STtlIdxKey ttlKey = {0}; - metaBuildTtlIdxKey(&ttlKey, pME); - if (ttlKey.dtime == 0) return 0; - return tdbTbUpsert(pMeta->pTtlIdx, &ttlKey, sizeof(ttlKey), NULL, 0, pMeta->txn); +static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { + if (pME->type != TSDB_CHILD_TABLE && pME->type != TSDB_NORMAL_TABLE) return 0; + + STtlUpdTtlCtx ctx = {.uid = pME->uid}; + + if (pME->type == TSDB_CHILD_TABLE) { + ctx.ttlDays = pME->ctbEntry.ttlDays; + ctx.changeTimeMs = pME->ctbEntry.btime; + } else { + ctx.ttlDays = pME->ntbEntry.ttlDays; + ctx.changeTimeMs = pME->ntbEntry.btime; + } + + return ttlMgrInsertTtl(pMeta->pTtlMgr, &ctx); +} + +int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { + if (!tsTtlChangeOnWrite) return 0; + + STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs}; + + return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx); } static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) { @@ -2182,7 +2193,7 @@ int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) { } } - code = metaUpdateCtimeIdx(pMeta, pME); + code = metaUpdateBtimeIdx(pMeta, pME); VND_CHECK_CODE(code, line, _err); if (pME->type == TSDB_NORMAL_TABLE) { @@ -2191,7 +2202,7 @@ int metaHandleEntry(SMeta *pMeta, const SMetaEntry *pME) { } if (pME->type != TSDB_SUPER_TABLE) { - code = metaUpdateTtlIdx(pMeta, pME); + code = metaUpdateTtl(pMeta, pME); VND_CHECK_CODE(code, line, _err); } diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c new file mode 100644 index 0000000000000000000000000000000000000000..c283472c2464219502b593215f8117f74c5daabd --- /dev/null +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -0,0 +1,434 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "metaTtl.h" +#include "meta.h" + +typedef struct { + TTB *pNewTtlIdx; + SMeta *pMeta; +} SConvertData; + +static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid); +static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); +static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); +static int ttlMgrFillCache(STtlManger *pTtlMgr); +static int32_t ttlMgrFillCacheOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pTtlCache); +static int32_t ttlMgrConvertOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pConvertData); + +static int32_t ttlMgrWLock(STtlManger *pTtlMgr); +static int32_t ttlMgrRLock(STtlManger *pTtlMgr); +static int32_t ttlMgrULock(STtlManger *pTtlMgr); + +const char *ttlTbname = "ttl.idx"; +const char *ttlV1Tbname = "ttlv1.idx"; + +int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) { + int ret; + + *ppTtlMgr = NULL; + + STtlManger *pTtlMgr = (STtlManger *)tdbOsCalloc(1, sizeof(*pTtlMgr)); + if (pTtlMgr == NULL) { + return -1; + } + + if (tdbTbExist(ttlTbname, pEnv)) { + ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pEnv, &pTtlMgr->pOldTtlIdx, rollback); + if (ret < 0) { + metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno)); + return ret; + } + } + + ret = tdbTbOpen(ttlV1Tbname, TDB_VARIANT_LEN, TDB_VARIANT_LEN, ttlIdxKeyV1Cmpr, pEnv, &pTtlMgr->pTtlIdx, rollback); + if (ret < 0) { + metaError("failed to open %s since %s", ttlV1Tbname, tstrerror(terrno)); + + tdbOsFree(pTtlMgr); + return ret; + } + + pTtlMgr->pTtlCache = taosHashInit(8192, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + pTtlMgr->pDirtyUids = taosHashInit(8192, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + + taosThreadRwlockInit(&pTtlMgr->lock, NULL); + + *ppTtlMgr = pTtlMgr; + return 0; +} + +int ttlMgrClose(STtlManger *pTtlMgr) { + taosHashCleanup(pTtlMgr->pTtlCache); + taosHashCleanup(pTtlMgr->pDirtyUids); + tdbTbClose(pTtlMgr->pTtlIdx); + taosThreadRwlockDestroy(&pTtlMgr->lock); + tdbOsFree(pTtlMgr); + return 0; +} + +int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) { + metaInfo("ttl mgr start open"); + int ret; + + int64_t startNs = taosGetTimestampNs(); + + SMeta *meta = (SMeta *)pMeta; + + if (pTtlMgr->pOldTtlIdx) { + ret = ttlMgrConvert(pTtlMgr->pOldTtlIdx, pTtlMgr->pTtlIdx, pMeta); + if (ret < 0) { + metaError("failed to convert ttl index since %s", tstrerror(terrno)); + goto _out; + } + + ret = tdbTbDropByName(ttlTbname, meta->pEnv, meta->txn); + if (ret < 0) { + metaError("failed to drop old ttl index since %s", tstrerror(terrno)); + goto _out; + } + + tdbTbClose(pTtlMgr->pOldTtlIdx); + pTtlMgr->pOldTtlIdx = NULL; + } + + ret = ttlMgrFillCache(pTtlMgr); + if (ret < 0) { + metaError("failed to fill hash since %s", tstrerror(terrno)); + goto _out; + } + + int64_t endNs = taosGetTimestampNs(); + + metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache), + endNs - startNs); +_out: + return ret; +} + +static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid) { + if (ttlDays <= 0) return; + + pTtlKey->deleteTimeMs = changeTimeMs + ttlDays * tsTtlUnit * 1000; + pTtlKey->uid = uid; +} + +static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { + STtlIdxKey *pTtlIdxKey1 = (STtlIdxKey *)pKey1; + STtlIdxKey *pTtlIdxKey2 = (STtlIdxKey *)pKey2; + + if (pTtlIdxKey1->deleteTimeSec > pTtlIdxKey2->deleteTimeSec) { + return 1; + } else if (pTtlIdxKey1->deleteTimeSec < pTtlIdxKey2->deleteTimeSec) { + return -1; + } + + if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) { + return 1; + } else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) { + return -1; + } + + return 0; +} + +static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { + STtlIdxKeyV1 *pTtlIdxKey1 = (STtlIdxKeyV1 *)pKey1; + STtlIdxKeyV1 *pTtlIdxKey2 = (STtlIdxKeyV1 *)pKey2; + + if (pTtlIdxKey1->deleteTimeMs > pTtlIdxKey2->deleteTimeMs) { + return 1; + } else if (pTtlIdxKey1->deleteTimeMs < pTtlIdxKey2->deleteTimeMs) { + return -1; + } + + if (pTtlIdxKey1->uid > pTtlIdxKey2->uid) { + return 1; + } else if (pTtlIdxKey1->uid < pTtlIdxKey2->uid) { + return -1; + } + + return 0; +} + +static int ttlMgrFillCache(STtlManger *pTtlMgr) { + return tdbTbTraversal(pTtlMgr->pTtlIdx, pTtlMgr->pTtlCache, ttlMgrFillCacheOneEntry); +} + +static int32_t ttlMgrFillCacheOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pTtlCache) { + SHashObj *pCache = (SHashObj *)pTtlCache; + + STtlIdxKeyV1 *ttlKey = (STtlIdxKeyV1 *)pKey; + tb_uid_t uid = ttlKey->uid; + int64_t ttlDays = *(int64_t *)pVal; + int64_t changeTimeMs = ttlKey->deleteTimeMs - ttlDays * tsTtlUnit * 1000; + + STtlCacheEntry data = {.ttlDays = ttlDays, .changeTimeMs = changeTimeMs}; + + return taosHashPut(pCache, &uid, sizeof(uid), &data, sizeof(data)); +} + +static int ttlMgrConvertOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pConvertData) { + SConvertData *pData = (SConvertData *)pConvertData; + + STtlIdxKey *ttlKey = (STtlIdxKey *)pKey; + tb_uid_t uid = ttlKey->uid; + int64_t ttlDays = 0; + + int ret = metaGetTableTtlByUid(pData->pMeta, uid, &ttlDays); + if (ret < 0) { + metaError("ttlMgr convert failed to get ttl since %s", tstrerror(terrno)); + goto _out; + } + + STtlIdxKeyV1 ttlKeyV1 = {.deleteTimeMs = ttlKey->deleteTimeSec * 1000, .uid = uid}; + ret = tdbTbUpsert(pData->pNewTtlIdx, &ttlKeyV1, sizeof(ttlKeyV1), &ttlDays, sizeof(ttlDays), pData->pMeta->txn); + if (ret < 0) { + metaError("ttlMgr convert failed to upsert since %s", tstrerror(terrno)); + goto _out; + } + + ret = 0; +_out: + return ret; +} + +int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) { + SMeta *meta = pMeta; + + metaInfo("ttlMgr convert ttl start."); + + SConvertData cvData = {.pNewTtlIdx = pNewTtlIdx, .pMeta = meta}; + + int ret = tdbTbTraversal(pOldTtlIdx, &cvData, ttlMgrConvertOneEntry); + if (ret < 0) { + metaError("failed to convert ttl since %s", tstrerror(terrno)); + } + + metaInfo("ttlMgr convert ttl end."); + return ret; +} + +int ttlMgrInsertTtl(STtlManger *pTtlMgr, const STtlUpdTtlCtx *updCtx) { + if (updCtx->ttlDays == 0) return 0; + + STtlCacheEntry cacheEntry = {.ttlDays = updCtx->ttlDays, .changeTimeMs = updCtx->changeTimeMs}; + STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_UPSERT}; + + ttlMgrWLock(pTtlMgr); + + int ret = taosHashPut(pTtlMgr->pTtlCache, &updCtx->uid, sizeof(updCtx->uid), &cacheEntry, sizeof(cacheEntry)); + if (ret < 0) { + metaError("ttlMgr insert failed to update ttl cache since %s", tstrerror(terrno)); + goto _out; + } + + ret = taosHashPut(pTtlMgr->pDirtyUids, &updCtx->uid, sizeof(updCtx->uid), &dirtryEntry, sizeof(dirtryEntry)); + if (ret < 0) { + metaError("ttlMgr insert failed to update ttl dirty uids since %s", tstrerror(terrno)); + goto _out; + } + + ret = 0; +_out: + ttlMgrULock(pTtlMgr); + + metaDebug("ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, updCtx->uid, + updCtx->changeTimeMs, updCtx->ttlDays); + + return ret; +} + +int ttlMgrDeleteTtl(STtlManger *pTtlMgr, const STtlDelTtlCtx *delCtx) { + ttlMgrWLock(pTtlMgr); + + STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_DEL}; + + int ret = taosHashPut(pTtlMgr->pDirtyUids, &delCtx->uid, sizeof(delCtx->uid), &dirtryEntry, sizeof(dirtryEntry)); + if (ret < 0) { + metaError("ttlMgr del failed to update ttl dirty uids since %s", tstrerror(terrno)); + goto _out; + } + + ret = 0; +_out: + ttlMgrULock(pTtlMgr); + + metaDebug("ttl mgr delete ttl, uid: %" PRId64, delCtx->uid); + + return ret; +} + +int ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdCtimeCtx) { + ttlMgrWLock(pTtlMgr); + + STtlCacheEntry *oldData = taosHashGet(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid)); + if (oldData == NULL) { + goto _out; + } + + STtlCacheEntry cacheEntry = {.ttlDays = oldData->ttlDays, .changeTimeMs = pUpdCtimeCtx->changeTimeMs}; + STtlDirtyEntry dirtryEntry = {.type = ENTRY_TYPE_UPSERT}; + + int ret = + taosHashPut(pTtlMgr->pTtlCache, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &cacheEntry, sizeof(cacheEntry)); + if (ret < 0) { + metaError("ttlMgr update ctime failed to update ttl cache since %s", tstrerror(terrno)); + goto _out; + } + + ret = taosHashPut(pTtlMgr->pDirtyUids, &pUpdCtimeCtx->uid, sizeof(pUpdCtimeCtx->uid), &dirtryEntry, + sizeof(dirtryEntry)); + if (ret < 0) { + metaError("ttlMgr update ctime failed to update ttl dirty uids since %s", tstrerror(terrno)); + goto _out; + } + + ret = 0; +_out: + ttlMgrULock(pTtlMgr); + + metaDebug("ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pUpdCtimeCtx->uid, pUpdCtimeCtx->changeTimeMs); + + return ret; +} + +int ttlMgrFindExpired(STtlManger *pTtlMgr, int64_t timePointMs, SArray *pTbUids) { + ttlMgrRLock(pTtlMgr); + + TBC *pCur; + int ret = tdbTbcOpen(pTtlMgr->pTtlIdx, &pCur, NULL); + if (ret < 0) { + goto _out; + } + + STtlIdxKeyV1 ttlKey = {0}; + ttlKey.deleteTimeMs = timePointMs; + ttlKey.uid = INT64_MAX; + int c = 0; + tdbTbcMoveTo(pCur, &ttlKey, sizeof(ttlKey), &c); + if (c < 0) { + tdbTbcMoveToPrev(pCur); + } + + void *pKey = NULL; + int kLen = 0; + while (1) { + ret = tdbTbcPrev(pCur, &pKey, &kLen, NULL, NULL); + if (ret < 0) { + ret = 0; + break; + } + ttlKey = *(STtlIdxKeyV1 *)pKey; + taosArrayPush(pTbUids, &ttlKey.uid); + } + + tdbFree(pKey); + tdbTbcClose(pCur); + + ret = 0; +_out: + ttlMgrULock(pTtlMgr); + return ret; +} + +int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { + ttlMgrWLock(pTtlMgr); + + metaInfo("ttl mgr flush start."); + + int ret = -1; + + void *pIter = taosHashIterate(pTtlMgr->pDirtyUids, NULL); + while (pIter != NULL) { + STtlDirtyEntry *pEntry = (STtlDirtyEntry *)pIter; + tb_uid_t *pUid = taosHashGetKey(pIter, NULL); + + STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); + if (cacheEntry == NULL) { + metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno)); + goto _out; + } + + STtlIdxKeyV1 ttlKey; + ttlMgrBuildKey(&ttlKey, cacheEntry->ttlDays, cacheEntry->changeTimeMs, *pUid); + + if (pEntry->type == ENTRY_TYPE_UPSERT) { + ret = tdbTbUpsert(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), &cacheEntry->ttlDays, sizeof(cacheEntry->ttlDays), + pTxn); + if (ret < 0) { + metaError("ttlMgr flush failed to flush ttl cache upsert since %s", tstrerror(terrno)); + goto _out; + } + } else if (pEntry->type == ENTRY_TYPE_DEL) { + ret = tdbTbDelete(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), pTxn); + if (ret < 0) { + metaError("ttlMgr flush failed to flush ttl cache del since %s", tstrerror(terrno)); + goto _out; + } + + ret = taosHashRemove(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); + if (ret < 0) { + metaError("ttlMgr flush failed to delete ttl cache since %s", tstrerror(terrno)); + goto _out; + } + } else { + metaError("ttlMgr flush failed to flush ttl cache, unknown type: %d", pEntry->type); + goto _out; + } + + pIter = taosHashIterate(pTtlMgr->pDirtyUids, pIter); + } + + taosHashClear(pTtlMgr->pDirtyUids); + + ret = 0; +_out: + ttlMgrULock(pTtlMgr); + + metaInfo("ttl mgr flush end."); + + return ret; +} + +static int32_t ttlMgrRLock(STtlManger *pTtlMgr) { + int32_t ret = 0; + + metaTrace("ttlMgr rlock %p", &pTtlMgr->lock); + + ret = taosThreadRwlockRdlock(&pTtlMgr->lock); + + return ret; +} + +static int32_t ttlMgrWLock(STtlManger *pTtlMgr) { + int32_t ret = 0; + + metaTrace("ttlMgr wlock %p", &pTtlMgr->lock); + + ret = taosThreadRwlockWrlock(&pTtlMgr->lock); + + return ret; +} + +static int32_t ttlMgrULock(STtlManger *pTtlMgr) { + int32_t ret = 0; + + metaTrace("ttlMgr ulock %p", &pTtlMgr->lock); + + ret = taosThreadRwlockUnlock(&pTtlMgr->lock); + + return ret; +} diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index 3542ea9ffb2e767200c2ba4cf5e8aa959d931c1d..84cd44e837dc1edab8a130dc7598d9e4e7bab115 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -29,7 +29,7 @@ int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) { - smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaError("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); } return code; @@ -346,6 +346,43 @@ _end: return TSDB_CODE_SUCCESS; } +static int32_t tsmaProcessDelReq(SSma *pSma, int64_t indexUid, SBatchDeleteReq *pDelReq) { + int32_t code = 0; + int32_t lino = 0; + + if (taosArrayGetSize(pDelReq->deleteReqs) > 0) { + int32_t len = 0; + tEncodeSize(tEncodeSBatchDeleteReq, pDelReq, len, code); + TSDB_CHECK_CODE(code, lino, _exit); + + void *pBuf = rpcMallocCont(len + sizeof(SMsgHead)); + if (!pBuf) { + code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + } + + SEncoder encoder; + tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), len); + tEncodeSBatchDeleteReq(&encoder, pDelReq); + tEncoderClear(&encoder); + + ((SMsgHead *)pBuf)->vgId = TD_VID(pSma->pVnode); + + SRpcMsg delMsg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = pBuf, .contLen = len + sizeof(SMsgHead)}; + code = tmsgPutToQueue(&pSma->pVnode->msgCb, WRITE_QUEUE, &delMsg); + TSDB_CHECK_CODE(code, lino, _exit); + } + +_exit: + taosArrayDestroy(pDelReq->deleteReqs); + if (code) { + smaError("vgId:%d, failed at line %d to process delete req for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), lino, + indexUid, tstrerror(code)); + } + + return code; +} + /** * @brief Insert/Update Time-range-wise SMA data. * @@ -355,7 +392,6 @@ _end: */ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { const SArray *pDataBlocks = (const SArray *)msg; - // TODO: destroy SSDataBlocks(msg) if (!pDataBlocks) { terrno = TSDB_CODE_TSMA_INVALID_PTR; smaWarn("vgId:%d, insert tsma data failed since pDataBlocks is NULL", SMA_VID(pSma)); @@ -419,8 +455,10 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char goto _err; } - // TODO deleteReq - taosArrayDestroy(deleteReq.deleteReqs); + if ((terrno = tsmaProcessDelReq(pSma, indexUid, &deleteReq)) != 0) { + goto _err; + } + #if 0 if (!strncasecmp("td.tsma.rst.tb", pTsmaStat->pTSma->dstTbName, 14)) { terrno = TSDB_CODE_APP_ERROR; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index f4d3407aa2fb96c1c2ae06759a15c9b279b98886..de750aaa39cbbcfb304d0fe1f81da664377f45d7 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -264,7 +264,7 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* tFormatOffset(buf1, TSDB_OFFSET_LEN, &pRsp->reqOffset); tFormatOffset(buf2, TSDB_OFFSET_LEN, &pRsp->rspOffset); - tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId, + tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId, pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId); return 0; @@ -421,6 +421,35 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) { return 0; } +int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) { + int32_t vgId = TD_VID(pTq->pVnode); + taosWLockLatch(&pTq->lock); + if (taosHashGetSize(pTq->pPushMgr) > 0) { + void* pIter = taosHashIterate(pTq->pPushMgr, NULL); + + while (pIter) { + STqHandle* pHandle = *(STqHandle**)pIter; + tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId); + + if (ASSERT(pHandle->msg != NULL)) { + tqError("pHandle->msg should not be null"); + break; + }else{ + SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info}; + tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg); + taosMemoryFree(pHandle->msg); + pHandle->msg = NULL; + } + + pIter = taosHashIterate(pTq->pPushMgr, pIter); + } + + taosHashClear(pTq->pPushMgr); + } + taosWUnLockLatch(&pTq->lock); + return 0; +} + int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { SMqPollReq req = {0}; int code = 0; diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 4c2e19dcfbdc7828f8b61ae5d46fcfc0a83ac66f..4048ebe3f9ef844e4905ee06f4551c316dd49220 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -17,35 +17,16 @@ #include "vnd.h" int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) { - int32_t vgId = TD_VID(pTq->pVnode); - - taosWLockLatch(&pTq->lock); - - if (taosHashGetSize(pTq->pPushMgr) > 0) { - void* pIter = taosHashIterate(pTq->pPushMgr, NULL); - - while (pIter) { - STqHandle* pHandle = *(STqHandle**)pIter; - tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId); - - if (ASSERT(pHandle->msg != NULL)) { - tqError("pHandle->msg should not be null"); - break; - }else{ - SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info}; - tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg); - taosMemoryFree(pHandle->msg); - pHandle->msg = NULL; - } - - pIter = taosHashIterate(pTq->pPushMgr, pIter); - } - - taosHashClear(pTq->pPushMgr); + if (taosHashGetSize(pTq->pPushMgr) <= 0) { + return 0; } - - // unlock - taosWUnLockLatch(&pTq->lock); + SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME_PUSH}; + msg.pCont = rpcMallocCont(sizeof(SMsgHead)); + msg.contLen = sizeof(SMsgHead); + SMsgHead *pHead = msg.pCont; + pHead->vgId = TD_VID(pTq->pVnode); + pHead->contLen = msg.contLen; + tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg); return 0; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 8607fd754e33069afb9a620651e516780b6544b8..a301d82c30a603f6c45ccff200e90b6b3f87494b 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -168,7 +168,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId); code = tqScanData(pTq, pHandle, &dataRsp, pOffset); - if(code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) { + if (code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) { goto end; } @@ -176,13 +176,17 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST && dataRsp.blockNum == 0) { // lock taosWLockLatch(&pTq->lock); - code = tqRegisterPushHandle(pTq, pHandle, pMsg); - taosWUnLockLatch(&pTq->lock); - tDeleteMqDataRsp(&dataRsp); - return code; + int64_t ver = walGetCommittedVer(pTq->pVnode->pWal); + if (pOffset->version >= ver || + dataRsp.rspOffset.version >= ver) { // check if there are data again to avoid lost data + code = tqRegisterPushHandle(pTq, pHandle, pMsg); + taosWUnLockLatch(&pTq->lock); + goto end; + } else { + taosWUnLockLatch(&pTq->lock); + } } - // NOTE: this pHandle->consumerId may have been changed already. code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId); end : { @@ -192,9 +196,8 @@ end : { " code:%d", consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code); tDeleteMqDataRsp(&dataRsp); -} - return code; + } } static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, @@ -428,4 +431,4 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* tmsgSendRsp(&rsp); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index c659c8f4a2a917da32764f12494e6f318f7681b9..4ec66f82a60a7592eeb0eb29030e637b764446d3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -691,6 +691,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr .colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)}; if (!pLastCol) { pLastCol = &noneCol; + reallocVarData(&pLastCol->colVal); } taosArraySet(pLastArray, idxKey->idx, pLastCol); @@ -2848,14 +2849,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; - if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); if (pCol->colVal.value.pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + if (pColVal->value.nData > 0) { + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } } if (!COL_VAL_IS_VALUE(pColVal)) { @@ -3016,14 +3019,16 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; - if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) { pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); if (pCol->colVal.value.pData == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + if (pColVal->value.nData > 0) { + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } } /*if (COL_VAL_IS_NONE(pColVal)) { diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index b5e7c6875b3ce044530133f9105ee902628b23c0..583df15533589e40f4cc3ab9c75aee57a97d676d 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -129,15 +129,20 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p return 0; } +static int32_t vnodeVgroupIdLen(int32_t vgId) { + char tmp[TSDB_FILENAME_LEN]; + sprintf(tmp, "%d", vgId); + return strlen(tmp); +} + int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) { - int32_t ret = tfsRename(pTfs, srcPath, dstPath); - if (ret != 0) return ret; + int32_t ret = 0; char oldRname[TSDB_FILENAME_LEN] = {0}; char newRname[TSDB_FILENAME_LEN] = {0}; char tsdbPath[TSDB_FILENAME_LEN] = {0}; char tsdbFilePrefix[TSDB_FILENAME_LEN] = {0}; - snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", dstPath, TD_DIRSEP); + snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", srcPath, TD_DIRSEP); snprintf(tsdbFilePrefix, TSDB_FILENAME_LEN, "tsdb%sv", TD_DIRSEP); STfsDir *tsdbDir = tfsOpendir(pTfs, tsdbPath); @@ -154,8 +159,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr int32_t tsdbFileVgId = atoi(tsdbFilePrefixPos + 6); if (tsdbFileVgId == srcVgId) { - char *tsdbFileSurfixPos = strstr(tsdbFilePrefixPos, "f"); - if (tsdbFileSurfixPos == NULL) continue; + char *tsdbFileSurfixPos = tsdbFilePrefixPos + 6 + vnodeVgroupIdLen(srcVgId); tsdbFilePrefixPos[6] = 0; snprintf(newRname, TSDB_FILENAME_LEN, "%s%d%s", oldRname, dstVgId, tsdbFileSurfixPos); @@ -163,7 +167,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr ret = tfsRename(pTfs, tsdbFile->rname, newRname); if (ret != 0) { - vInfo("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr()); + vError("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr()); tfsClosedir(tsdbDir); return ret; } @@ -171,6 +175,21 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr } tfsClosedir(tsdbDir); + + vInfo("vgId:%d, rename dir from %s to %s", dstVgId, srcPath, dstPath); + ret = tfsRename(pTfs, srcPath, dstPath); + if (ret != 0) { + vError("vgId:%d, failed to rename dir from %s to %s since %s", dstVgId, srcPath, dstPath, terrstr()); + } + return ret; +} + +int32_t vnodeGetAbsDir(const char *relPath, STfs *pTfs, char *buf, size_t bufLen) { + if (pTfs) { + snprintf(buf, bufLen, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, relPath); + } else { + snprintf(buf, bufLen, "%s", relPath); + } return 0; } @@ -179,13 +198,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod char dir[TSDB_FILENAME_LEN] = {0}; int32_t ret = 0; - if (pTfs) { - snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, srcPath); - } else { - snprintf(dir, TSDB_FILENAME_LEN, "%s", srcPath); - } - - // todo add stat file to handle exception while vnode open + vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN); ret = vnodeLoadInfo(dir, &info); if (ret < 0) { @@ -240,6 +253,42 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod return 0; } +int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) { + SVnodeInfo info = {0}; + char dir[TSDB_FILENAME_LEN] = {0}; + + vnodeGetAbsDir(dstPath, pTfs, dir, TSDB_FILENAME_LEN); + if (vnodeLoadInfo(dir, &info) == 0) { + if (info.config.vgId != dstVgId) { + vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId); + return -1; + } + return dstVgId; + } + + vnodeGetAbsDir(srcPath, pTfs, dir, TSDB_FILENAME_LEN); + if (vnodeLoadInfo(dir, &info) < 0) { + vError("vgId:%d, failed to read vnode config from %s since %s", srcVgId, srcPath, tstrerror(terrno)); + return -1; + } + + if (info.config.vgId == srcVgId) { + vInfo("vgId:%d, rollback alter hashrange", srcVgId); + return srcVgId; + } else if (info.config.vgId != dstVgId) { + vError("vgId:%d, unexpected vnode config.vgId:%d", dstVgId, info.config.vgId); + return -1; + } + + vInfo("vgId:%d, rename %s to %s", dstVgId, srcPath, dstPath); + if (vnodeRenameVgroupId(srcPath, dstPath, srcVgId, dstVgId, pTfs) < 0) { + vError("vgId:%d, failed to rename vnode from %s to %s since %s", dstVgId, srcPath, dstPath, tstrerror(terrno)); + return -1; + } + + return dstVgId; +} + void vnodeDestroy(const char *path, STfs *pTfs) { vInfo("path:%s is removed while destroy vnode", path); tfsRmdir(pTfs, path); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c3fb5e5ad4c3841ab48f8b86182882b19d543938..dc842310293d58b9cda25944f40ee52184ed43d3 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -37,7 +37,7 @@ static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t ver, void *pRe static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp); static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp); -static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, int64_t ctime, int64_t *pUid) { +static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, int64_t btime, int64_t *pUid) { int32_t code = 0; int32_t lino = 0; @@ -66,8 +66,8 @@ static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, i } *(int64_t *)(pCoder->data + pCoder->pos) = uid; - // ctime - *(int64_t *)(pCoder->data + pCoder->pos + 8) = ctime; + // btime + *(int64_t *)(pCoder->data + pCoder->pos + 8) = btime; tEndDecode(pCoder); @@ -84,7 +84,7 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t code = 0; int32_t lino = 0; - int64_t ctime = taosGetTimestampMs(); + int64_t btime = taosGetTimestampMs(); SDecoder dc = {0}; int32_t nReqs; @@ -99,7 +99,7 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) { TSDB_CHECK_CODE(code, lino, _exit); } for (int32_t iReq = 0; iReq < nReqs; iReq++) { - code = vnodePreprocessCreateTableReq(pVnode, &dc, ctime, NULL); + code = vnodePreprocessCreateTableReq(pVnode, &dc, btime, NULL); TSDB_CHECK_CODE(code, lino, _exit); } @@ -109,8 +109,35 @@ _exit: tDecoderClear(&dc); return code; } + +static int32_t vnodePreProcessAlterTableMsg(SVnode *pVnode, SRpcMsg *pMsg) { + int32_t code = TSDB_CODE_INVALID_MSG; + int32_t lino = 0; + + SDecoder dc = {0}; + tDecoderInit(&dc, (uint8_t *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead)); + + SVAlterTbReq vAlterTbReq = {0}; + int64_t ctimeMs = taosGetTimestampMs(); + if (tDecodeSVAlterTbReqSetCtime(&dc, &vAlterTbReq, ctimeMs) < 0) { + goto _exit; + } + + code = 0; + +_exit: + tDecoderClear(&dc); + if (code) { + vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + } else { + vTrace("vgId:%d %s done, table:%s ctimeMs generated:%" PRId64, TD_VID(pVnode), __func__, vAlterTbReq.tbName, + ctimeMs); + } + return code; +} + extern int64_t tsMaxKeyByPrecision[]; -static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int64_t ctime) { +static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int64_t btimeMs, int64_t ctimeMs) { int32_t code = 0; int32_t lino = 0; @@ -127,7 +154,7 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int int64_t uid; if (submitTbData.flags & SUBMIT_REQ_AUTO_CREATE_TABLE) { - code = vnodePreprocessCreateTableReq(pVnode, pCoder, ctime, &uid); + code = vnodePreprocessCreateTableReq(pVnode, pCoder, btimeMs, &uid); TSDB_CHECK_CODE(code, lino, _exit); } @@ -153,7 +180,7 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } // scan and check - TSKEY now = ctime; + TSKEY now = btimeMs; if (pVnode->config.tsdbCfg.precision == TSDB_TIME_PRECISION_MICRO) { now *= 1000; } else if (pVnode->config.tsdbCfg.precision == TSDB_TIME_PRECISION_NANO) { @@ -170,7 +197,6 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int SColData colData = {0}; pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData); - if (colData.flag != HAS_VALUE) { code = TSDB_CODE_INVALID_MSG; goto _exit; @@ -182,6 +208,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int goto _exit; } } + + for (uint64_t i = 1; i < nColData; i++) { + pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData); + } } else { uint64_t nRow; if (tDecodeU64v(pCoder, &nRow) < 0) { @@ -200,6 +230,9 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } } + *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; + pCoder->pos += sizeof(int64_t); + tEndDecode(pCoder); _exit: @@ -229,15 +262,20 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) { TSDB_CHECK_CODE(code, lino, _exit); } - int64_t ctime = taosGetTimestampMs(); + int64_t btimeMs = taosGetTimestampMs(); + int64_t ctimeMs = btimeMs; for (int32_t i = 0; i < nSubmitTbData; i++) { - code = vnodePreProcessSubmitTbData(pVnode, pCoder, ctime); + code = vnodePreProcessSubmitTbData(pVnode, pCoder, btimeMs, ctimeMs); TSDB_CHECK_CODE(code, lino, _exit); } tEndDecode(pCoder); _exit: + if (code) { + vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code), + pMsg->msgType); + } tDecoderClear(pCoder); return code; } @@ -257,6 +295,7 @@ static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) { code = qWorkerProcessDeleteMsg(&handle, pVnode->pQuery, pMsg, &res); if (code) goto _exit; + res.ctimeMs = taosGetTimestampMs(); // malloc and encode tEncodeSize(tEncodeDeleteRes, &res, size, ret); pCont = rpcMallocCont(size + sizeof(SMsgHead)); @@ -278,6 +317,31 @@ _exit: return code; } +static int32_t vnodePreProcessBatchDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) { + int32_t code = 0; + int32_t lino = 0; + + int64_t ctimeMs = taosGetTimestampMs(); + SBatchDeleteReq pReq = {0}; + SDecoder *pCoder = &(SDecoder){0}; + + tDecoderInit(pCoder, (uint8_t *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead)); + + if (tDecodeSBatchDeleteReqSetCtime(pCoder, &pReq, ctimeMs) < 0) { + code = TSDB_CODE_INVALID_MSG; + } + + tDecoderClear(pCoder); + taosArrayDestroy(pReq.deleteReqs); + + if (code) { + vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + } else { + vTrace("vgId:%d %s done, ctimeMs generated:%" PRId64, TD_VID(pVnode), __func__, ctimeMs); + } + return code; +} + int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t code = 0; @@ -285,19 +349,25 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { case TDMT_VND_CREATE_TABLE: { code = vnodePreProcessCreateTableMsg(pVnode, pMsg); } break; + case TDMT_VND_ALTER_TABLE: { + code = vnodePreProcessAlterTableMsg(pVnode, pMsg); + } break; case TDMT_VND_SUBMIT: { code = vnodePreProcessSubmitMsg(pVnode, pMsg); } break; case TDMT_VND_DELETE: { code = vnodePreProcessDeleteMsg(pVnode, pMsg); } break; + case TDMT_VND_BATCH_DEL: { + code = vnodePreProcessBatchDeleteMsg(pVnode, pMsg); + } break; default: break; } _exit: if (code) { - vError("vgId%d failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code), + vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code), pMsg->msgType); } return code; @@ -505,7 +575,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { vTrace("message in vnode query queue is processing"); - if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME) && !syncIsReadyForRead(pVnode->sync)) { + if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME || pMsg->msgType == TDMT_VND_TMQ_CONSUME_PUSH) && !syncIsReadyForRead(pVnode->sync)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; } @@ -526,6 +596,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_VND_TMQ_CONSUME: return tqProcessPollReq(pVnode->pTq, pMsg); + case TDMT_VND_TMQ_CONSUME_PUSH: + return tqProcessPollPush(pVnode->pTq, pMsg); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_APP_ERROR; @@ -559,8 +631,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return vnodeGetTableCfg(pVnode, pMsg, true); case TDMT_VND_BATCH_META: return vnodeGetBatchMeta(pVnode, pMsg); - case TDMT_VND_TMQ_CONSUME: - return tqProcessPollReq(pVnode->pTq, pMsg); +// case TDMT_VND_TMQ_CONSUME: +// return tqProcessPollReq(pVnode->pTq, pMsg); case TDMT_VND_TMQ_VG_WALINFO: return tqProcessVgWalInfoReq(pVnode->pTq, pMsg); case TDMT_STREAM_TASK_RUN: @@ -587,9 +659,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { } } -// TODO: remove the function void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { - // TODO // blockDebugShowDataBlocks(data, __func__); tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); } @@ -637,8 +707,8 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq, goto end; } - vDebug("vgId:%d, drop ttl table req will be processed, time:%d", pVnode->config.vgId, ttlReq.timestamp); - int32_t ret = metaTtlDropTable(pVnode->pMeta, ttlReq.timestamp, tbUids); + vDebug("vgId:%d, drop ttl table req will be processed, time:%" PRId32, pVnode->config.vgId, ttlReq.timestampSec); + int32_t ret = metaTtlDropTable(pVnode->pMeta, (int64_t)ttlReq.timestampSec * 1000, tbUids); if (ret != 0) { goto end; } @@ -646,7 +716,7 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq, tqUpdateTbUidList(pVnode->pTq, tbUids, false); } - vnodeAsyncRentention(pVnode, ttlReq.timestamp); + vnodeAsyncRentention(pVnode, ttlReq.timestampSec); end: taosArrayDestroy(tbUids); @@ -1382,6 +1452,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in code = tsdbInsertTableData(pVnode->pTsdb, ver, pSubmitTbData, &affectedRows); if (code) goto _exit; + code = metaUpdateChangeTime(pVnode->pMeta, pSubmitTbData->uid, pSubmitTbData->ctimeMs); + if (code) goto _exit; + pSubmitRsp->affectedRows += affectedRows; } @@ -1636,6 +1709,14 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t ver, void *pRe TD_VID(pVnode), terrstr(), deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs); } + code = metaUpdateChangeTime(pVnode->pMeta, uid, deleteReq.ctimeMs); + if (code < 0) { + terrno = code; + vError("vgId:%d, update change time error since %s, suid:%" PRId64 ", uid:%" PRId64 ", start ts:%" PRId64 + ", end ts:%" PRId64, + TD_VID(pVnode), terrstr(), deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs); + } + tDecoderClear(&mr.coder); } metaReaderClear(&mr); @@ -1664,8 +1745,10 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, in ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0)); for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) { - code = tsdbDeleteTableData(pVnode->pTsdb, ver, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid), - pRes->skey, pRes->ekey); + uint64_t uid = *(uint64_t *)taosArrayGet(pRes->uidList, iUid); + code = tsdbDeleteTableData(pVnode->pTsdb, ver, pRes->suid, uid, pRes->skey, pRes->ekey); + if (code) goto _err; + code = metaUpdateChangeTime(pVnode->pMeta, uid, pRes->ctimeMs); if (code) goto _err; } diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 695bd4eb388652c197764e325dc932501abe44be..5746ea23406a722ec7464a9db07786c5cc5ee410 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -950,8 +950,8 @@ int32_t ctgCloneMetaOutput(STableMetaOutput* output, STableMetaOutput** pOutput) int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList); void ctgFreeJob(void* job); void ctgFreeHandleImpl(SCatalog* pCtg); -int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup); -int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx, +int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup); +int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx, char* dbFName, SArray* pNames, bool update); int32_t ctgGetVgIdsFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, char* dbFName, const char* pTbs[], int32_t tbNum, int32_t* vgId); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 03df2409290ffc4458a80a7006031d0b5f5b7d1e..f736e9be980a42d7d64ff86b2e8f615b663550f1 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -568,7 +568,7 @@ int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* return TSDB_CODE_SUCCESS; } - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pConn ? &pConn->mgmtEps : NULL, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); _return: @@ -629,7 +629,7 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf return TSDB_CODE_SUCCESS; } - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, pVgroup)); ctgRUnlockVgInfo(dbCache); diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 2b78b8dd1336174d3442e319578e15178be840da..562343c9c70b4b8a38c866069e897535c5fdfddd 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -1112,7 +1112,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out; SVgroupInfo vgInfo = {0}; - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo)); ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); @@ -1132,7 +1132,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (NULL != dbCache) { SVgroupInfo vgInfo = {0}; - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo)); ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); @@ -1282,7 +1282,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out; SVgroupInfo vgInfo = {0}; - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo)); ctgTaskDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); @@ -1302,7 +1302,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (NULL != dbCache) { SVgroupInfo vgInfo = {0}; - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo)); ctgTaskDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); @@ -1501,7 +1501,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; @@ -1536,7 +1536,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out; STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx); - CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true)); + CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true)); CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; @@ -1799,7 +1799,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq* tReq, int32_t flag, SName* pName, int CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (dbCache) { SVgroupInfo vgInfo = {0}; - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo)); ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); @@ -1948,7 +1948,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask* pTask) { if (NULL == pTask->res) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res)); ctgReleaseVgInfoToCache(pCtg, dbCache); dbCache = NULL; @@ -1996,7 +1996,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) { tReq.pTask = pTask; tReq.msgIdx = -1; CTG_ERR_JRET( - ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false)); + ctgGetVgInfosFromHashValue(pCtg, &pConn->mgmtEps, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false)); ctgReleaseVgInfoToCache(pCtg, dbCache); dbCache = NULL; @@ -2375,7 +2375,7 @@ int32_t ctgGetTbCfgCb(SCtgTask* pTask) { SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res; pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo)); - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo)); } CTG_RET(ctgLaunchGetTbCfgTask(pTask)); @@ -2395,7 +2395,7 @@ int32_t ctgGetTbTagCb(SCtgTask* pTask) { if (NULL == pCtx->pVgInfo) { pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo)); - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo)); } CTG_RET(ctgLaunchGetTbTagTask(pTask)); diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index ef4040e22be73533971051700028041624e0f58f..c85621163549bc6b698c681db96f4725e0be8a3a 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -2989,7 +2989,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg } *pVgroup = taosMemoryCalloc(1, sizeof(SVgroupInfo)); - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, *pVgroup)); + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, *pVgroup)); _return: diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index e623b7969dd7307737c67f24647eec644a8c51ef..e7abbc5ead160f552496bcc155c7d92af7720c5e 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -969,7 +969,7 @@ int32_t ctgHashValueComp(void const* lp, void const* rp) { return 0; } -int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) { +int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) { int32_t code = 0; CTG_ERR_RET(ctgMakeVgArray(dbInfo)); @@ -977,6 +977,14 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName char db[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pTableName, db); + if (IS_SYS_DBNAME(pTableName->dbname)) { + pVgroup->vgId = MNODE_HANDLE; + if (pMgmtEps) { + memcpy(&pVgroup->epSet, pMgmtEps, sizeof(pVgroup->epSet)); + } + return TSDB_CODE_SUCCESS; + } + if (vgNum <= 0) { ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum); CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED); @@ -1020,23 +1028,53 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName CTG_RET(code); } -int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx, +int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx, char* dbFName, SArray* pNames, bool update) { int32_t code = 0; SCtgTask* pTask = tReq->pTask; SMetaRes res = {0}; + SVgroupInfo* vgInfo = NULL; CTG_ERR_RET(ctgMakeVgArray(dbInfo)); + int32_t tbNum = taosArrayGetSize(pNames); + + char* pSep = strchr(dbFName, '.'); + if (pSep && IS_SYS_DBNAME(pSep + 1)) { + SVgroupInfo mgmtInfo = {0}; + mgmtInfo.vgId = MNODE_HANDLE; + if (pMgmgEpSet) { + memcpy(&mgmtInfo.epSet, pMgmgEpSet, sizeof(mgmtInfo.epSet)); + } + for (int32_t i = 0; i < tbNum; ++i) { + vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo)); + if (NULL == vgInfo) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + memcpy(vgInfo, &mgmtInfo, sizeof(mgmtInfo)); + + ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps, + vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port); + + if (update) { + SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx); + SMetaRes* pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i); + pRes->pRes = vgInfo; + } else { + res.pRes = vgInfo; + taosArrayPush(pCtx->pResList, &res); + } + } + return TSDB_CODE_SUCCESS; + } + int32_t vgNum = taosArrayGetSize(dbInfo->vgArray); if (vgNum <= 0) { ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); } - SVgroupInfo* vgInfo = NULL; - int32_t tbNum = taosArrayGetSize(pNames); - if (1 == vgNum) { for (int32_t i = 0; i < tbNum; ++i) { vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo)); diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 2a435b43e8bd645c18a3122d6ab042ac57ec9077..2a7aeb00603160a2f7a1e6042d6bdcd1d1530414 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -145,7 +145,7 @@ typedef struct SExplainCtx { SHashObj *groupHash; // Hash } SExplainCtx; -#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : "desc") +#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : ORDER_DESC == _order ? "desc" : "unknown") #define EXPLAIN_JOIN_STRING(_type) ((JOIN_TYPE_INNER == _type) ? "Inner join" : "Join") #define INVERAL_TIME_FROM_PRECISION_TO_UNIT(_t, _u, _p) (((_u) == 'n' || (_u) == 'y') ? (_t) : (convertTimeFromPrecisionToUnit(_t, _p, _u))) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index c5b9eb7475b0bdebe19451f1d3ac69a4d248abb5..884c0f7b20b136395d4818f1de77345b6c8b1bfe 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -499,6 +499,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pPrjNode->pProjections->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPrjNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pPrjNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -544,6 +547,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pJoinNode->pTargets->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pJoinNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pJoinNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -597,6 +603,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_GROUPS_FORMAT, pAggNode->pGroupKeys->length); } + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pAggNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -716,6 +725,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i case QUERY_NODE_PHYSICAL_PLAN_SORT: { SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_SORT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pSortNode->node.outputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); @@ -796,9 +810,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.outputTsOrder)); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -847,6 +862,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.node.outputTsOrder)); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -895,6 +914,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pFillNode->mode)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pFillNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pFillNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -1080,6 +1102,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.inputTsOrder)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pMergeNode->node.outputTsOrder)); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 38890f8c347d5bbaecb6f3b34fcab9667f1da80b..5d663df50e0e532cc185a1c226a178c600414dda 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -399,6 +399,8 @@ typedef struct SOptrBasicInfo { SResultRowInfo resultRowInfo; SSDataBlock* pRes; bool mergeResultBlock; + int32_t inputTsOrder; + int32_t outputTsOrder; } SOptrBasicInfo; typedef struct SIntervalAggOperatorInfo { @@ -411,8 +413,6 @@ typedef struct SIntervalAggOperatorInfo { STimeWindow win; // query time range bool timeWindowInterpo; // interpolation needed or not SArray* pInterpCols; // interpolation columns - int32_t resultTsOrder; // result timestamp order - int32_t inputOrder; // input data ts order EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index d51a24bb43253e09d6ab21e6a1785d6ef6b5e5ff..e0718a0c0a39473cfa020dcbf2f7801061ab8ed9 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -146,7 +146,7 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId); * @return */ uint64_t tsortGetGroupId(STupleHandle* pVHandle); - +void* tsortGetBlockInfo(STupleHandle* pVHandle); /** * * @param pSortHandle diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index b34c6f4b82842a9080d628b9ef4cc0fcf60f3b6e..be0ad1c2399c1b423bf6a09635d6334581c321fe 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -108,6 +108,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock; pInfo->groupKeyOptimized = pAggNode->groupKeyOptimized; pInfo->groupId = UINT64_MAX; + pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -164,10 +166,8 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; int64_t st = taosGetTimestampUs(); - - int32_t order = TSDB_ORDER_ASC; - int32_t scanFlag = MAIN_SCAN; - + int32_t code = TSDB_CODE_SUCCESS; + int32_t order = pAggInfo->binfo.inputTsOrder; bool hasValidBlock = false; while (1) { @@ -185,12 +185,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { } } hasValidBlock = true; - - int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false); - if (code != TSDB_CODE_SUCCESS) { - destroyDataBlockForEmptyInput(blockAllocated, &pBlock); - T_LONG_JMP(pTaskInfo->env, code); - } + pAggInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag; // there is an scalar expression that needs to be calculated before apply the group aggregation. if (pAggInfo->scalarExprSup.pExprInfo != NULL && !blockAllocated) { @@ -204,7 +199,7 @@ int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.id.groupId); - setInputDataBlock(pSup, pBlock, order, scanFlag, true); + setInputDataBlock(pSup, pBlock, order, pBlock->info.scanFlag, true); code = doAggregateImpl(pOperator, pSup->pCtx); if (code != 0) { destroyDataBlockForEmptyInput(blockAllocated, &pBlock); @@ -461,8 +456,12 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n uint32_t defaultPgsz = 0; uint32_t defaultBufsz = 0; - getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - + code = getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); + if (code) { + qError("failed to get buff page size, rowSize:%d", pAggSup->resultRowSize); + return code; + } + if (!osTempSpaceAvailable()) { code = TSDB_CODE_NO_DISKSPACE; qError("Init stream agg supporter failed since %s, key:%s, tempDir:%s", terrstr(code), pKey, tsTempDir); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 48003b277beb61df43a57eaa3ff6435ad4d456a6..ce39ebab5937c6576994458e05963fe0f373bf6a 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -208,6 +208,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { pRes->info.id.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes); pRes->info.rows = 1; + pRes->info.scanFlag = MAIN_SCAN; SExprSupp* pSup = &pInfo->pseudoExprSup; int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pRes, diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index b5dea6d94dd79d16cda1597b95eb345746c80483..bbdc50183e35bf2f40d80f59eb98e79662a0d98d 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -120,6 +120,8 @@ SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNo initBasicInfo(&pInfo->binfo, pResBlock); initResultRowInfo(&pInfo->binfo.resultRowInfo); + pInfo->binfo.inputTsOrder = physiNode->inputTsOrder; + pInfo->binfo.outputTsOrder = physiNode->outputTsOrder; pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pEventWindowNode->window.watermark, .calTrigger = pEventWindowNode->window.triggerType}; @@ -174,6 +176,7 @@ void destroyEWindowOperatorInfo(void* param) { colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupAggSup(&pInfo->aggSup); + cleanupExprSupp(&pInfo->scalarSup); taosMemoryFreeClear(param); } @@ -182,7 +185,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExprSupp* pSup = &pOperator->exprSupp; - int32_t order = TSDB_ORDER_ASC; + int32_t order = pInfo->binfo.inputTsOrder; SSDataBlock* pRes = pInfo->binfo.pRes; @@ -195,6 +198,7 @@ static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) { break; } + pRes->info.scanFlag = pBlock->info.scanFlag; setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId); diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 94041140d4e60030a4b93a1d6a6b7b4df9eacd85..4fbe6785a3b964395c56392af4ef417b48aa7cf6 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -520,6 +520,7 @@ int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pCo // data from mnode pRes->info.dataLoad = 1; pRes->info.rows = pBlock->info.rows; + pRes->info.scanFlag = MAIN_SCAN; relocateColumnData(pRes, pColList, pBlock->pDataBlock, false); blockDataDestroy(pBlock); } diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index fbc0512a26205d3f04159d5672b894c83ab528b0..42b8a9d31c4afb42b824f0a458bcd3decd2d98a8 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -922,8 +922,13 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) { int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) { *defaultPgsz = 4096; + uint32_t last = *defaultPgsz; while (*defaultPgsz < rowSize * 4) { *defaultPgsz <<= 1u; + if (*defaultPgsz < last) { + return TSDB_CODE_INVALID_PARA; + } + last = *defaultPgsz; } // The default buffer for each operator in query is 10MB. @@ -932,6 +937,9 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul *defaultBufsz = 4096 * 2560; if ((*defaultBufsz) <= (*defaultPgsz)) { (*defaultBufsz) = (*defaultPgsz) * 4; + if (*defaultBufsz < ((int64_t)(*defaultPgsz)) * 4) { + return TSDB_CODE_INVALID_PARA; + } } return 0; diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index f9e8a32520f6282770bc6926d9db855153ddcc20..1106c6e29fb093bdc145f41e91957ffc5cc9e9b2 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -178,16 +178,15 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { blockDataCleanup(pResBlock); - int32_t order = TSDB_ORDER_ASC; - int32_t scanFlag = MAIN_SCAN; - getTableScanInfo(pOperator, &order, &scanFlag, false); + int32_t order = pInfo->pFillInfo->order; SOperatorInfo* pDownstream = pOperator->pDownstream[0]; - +#if 0 // the scan order may be different from the output result order for agg interval operator. if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) { order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder; } +#endif doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order); if (pResBlock->info.rows > 0) { @@ -206,13 +205,14 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey); } else { + pResBlock->info.scanFlag = pBlock->info.scanFlag; pBlock->info.dataLoad = 1; blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId); blockDataCleanup(pInfo->pRes); blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows); blockDataEnsureCapacity(pInfo->pFinalRes, pBlock->info.rows); - doApplyScalarCalculation(pOperator, pBlock, order, scanFlag); + doApplyScalarCalculation(pOperator, pBlock, order, pBlock->info.scanFlag); if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) { if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) { @@ -405,7 +405,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval : &((SIntervalAggOperatorInfo*)downstream->info)->interval; - int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + int32_t order = (pPhyFillNode->node.inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; int32_t type = convertFillType(pPhyFillNode->mode); SResultInfo* pResultInfo = &pOperator->resultInfo; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index c448ea01608b4a5c22773126eed5efa3a3fa357b..57c22d56d38dadf5012a032e7f298f935bbd101e 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -378,9 +378,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { return buildGroupResultDataBlock(pOperator); } - int32_t order = TSDB_ORDER_ASC; - int32_t scanFlag = MAIN_SCAN; - + int32_t order = pInfo->binfo.inputTsOrder; int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; @@ -390,13 +388,10 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { break; } - int32_t code = getTableScanInfo(pOperator, &order, &scanFlag, false); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, code); - } + pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag; // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(&pOperator->exprSupp, pBlock, order, scanFlag, true); + setInputDataBlock(&pOperator->exprSupp, pBlock, order, pBlock->info.scanFlag, true); // there is an scalar expression that needs to be calculated right before apply the group aggregation. if (pInfo->scalarSup.pExprInfo != NULL) { @@ -481,6 +476,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo); pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock; + pInfo->binfo.inputTsOrder = pAggNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo, optrDefaultBufFn, NULL); @@ -762,6 +759,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { break; } + pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag; // there is an scalar expression that needs to be calculated right before apply the group aggregation. if (pInfo->scalarSup.pExprInfo != NULL) { pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, @@ -871,7 +869,12 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition uint32_t defaultBufsz = 0; pInfo->binfo.pRes = createDataBlockFromDescNode(pPartNode->node.pOutputDataBlockDesc); - getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz); + int32_t code = getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + pTaskInfo->code = code; + goto _error; + } if (!osTempSpaceAvailable()) { terrno = TSDB_CODE_NO_DISKSPACE; @@ -880,7 +883,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition goto _error; } - int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir); + code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir); if (code != TSDB_CODE_SUCCESS) { terrno = code; pTaskInfo->code = code; diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 73143fdba7be8b2c8a3ed83d19e1416e4758c5dc..13ab5d05a501ca1ff27bf5c32f3ca3f1fe3ed569 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -253,9 +253,9 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t } pInfo->inputOrder = TSDB_ORDER_ASC; - if (pJoinNode->inputTsOrder == ORDER_ASC) { + if (pJoinNode->node.inputTsOrder == ORDER_ASC) { pInfo->inputOrder = TSDB_ORDER_ASC; - } else if (pJoinNode->inputTsOrder == ORDER_DESC) { + } else if (pJoinNode->node.inputTsOrder == ORDER_DESC) { pInfo->inputOrder = TSDB_ORDER_DESC; } @@ -684,6 +684,7 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) // the pDataBlock are always the same one, no need to call this again pRes->info.rows = nrows; pRes->info.dataLoad = 1; + pRes->info.scanFlag = MAIN_SCAN; if (pRes->info.rows >= pOperator->resultInfo.threshold) { break; } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index e7de826d4b399d513851f8e587ab91f92c0701cb..cd450c5bb7fcfb0f9cbeb4767fa584cc30ecc3aa 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -93,6 +93,8 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->binfo.pRes = pResBlock; pInfo->pFinalRes = createOneDataBlock(pResBlock, false); + pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder; if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { pInfo->mergeDataBlocks = false; @@ -238,8 +240,9 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { } int64_t st = 0; - int32_t order = 0; + int32_t order = pInfo->inputTsOrder; int32_t scanFlag = 0; + int32_t code = TSDB_CODE_SUCCESS; if (pOperator->cost.openCost == 0) { st = taosGetTimestampUs(); @@ -284,10 +287,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { break; } - // the pDataBlock are always the same one, no need to call this again - int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, code); + if (pProjectInfo->mergeDataBlocks) { + pFinalRes->info.scanFlag = scanFlag = pBlock->info.scanFlag; + } else { + pRes->info.scanFlag = scanFlag = pBlock->info.scanFlag; } setInputDataBlock(pSup, pBlock, order, scanFlag, false); @@ -406,6 +409,8 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy } pInfo->binfo.pRes = pResBlock; + pInfo->binfo.inputTsOrder = pNode->inputTsOrder; + pInfo->binfo.outputTsOrder = pNode->outputTsOrder; pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pSup->pCtx, numOfExpr); setOperatorInfo(pOperator, "IndefinitOperator", QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC, false, OP_NOT_OPENED, pInfo, @@ -429,18 +434,13 @@ _error: static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOperatorInfo* downstream, SExecTaskInfo* pTaskInfo) { - int32_t order = 0; - int32_t scanFlag = 0; - SIndefOperatorInfo* pIndefInfo = pOperator->info; SOptrBasicInfo* pInfo = &pIndefInfo->binfo; SExprSupp* pSup = &pOperator->exprSupp; - // the pDataBlock are always the same one, no need to call this again - int32_t code = getTableScanInfo(downstream, &order, &scanFlag, false); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, code); - } + int32_t order = pInfo->inputTsOrder; + int32_t scanFlag = pBlock->info.scanFlag; + int32_t code = TSDB_CODE_SUCCESS; // there is an scalar expression that needs to be calculated before apply the group aggregation. SExprSupp* pScalarSup = &pIndefInfo->scalarSup; @@ -506,6 +506,7 @@ SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) { setOperatorCompleted(pOperator); break; } + pInfo->pRes->info.scanFlag = pBlock->info.scanFlag; if (pIndefInfo->groupId == 0 && pBlock->info.id.groupId != 0) { pIndefInfo->groupId = pBlock->info.id.groupId; // this is the initial group result diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 2702cf68619d0dd5499cc2b80a40df5522e7c5a0..2f108c83b0b8e1adfa07c69b72440f6e834c404e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -712,6 +712,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0; pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime; + pBlock->info.scanFlag = pTableScanInfo->base.scanFlag; return pBlock; } return NULL; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 0395b9ec088f399f9f568a91a4fc2524c78280e6..8f287946f06e653494389c47cb63d4581296f1ef 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -69,6 +69,8 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode); pInfo->pSortInfo = createSortInfo(pSortNode->pSortKeys); + pInfo->binfo.inputTsOrder = pSortNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pSortNode->node.outputTsOrder; initLimitInfo(pSortNode->node.pLimit, pSortNode->node.pSlimit, &pInfo->limitInfo); setOperatorInfo(pOperator, "SortOperator", QUERY_NODE_PHYSICAL_PLAN_SORT, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -114,6 +116,7 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) { } pBlock->info.dataLoad = 1; + pBlock->info.scanFlag = ((SDataBlockInfo*)tsortGetBlockInfo(pTupleHandle))->scanFlag; pBlock->info.rows += 1; } @@ -155,6 +158,7 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i pDataBlock->info.dataLoad = 1; pDataBlock->info.rows = p->info.rows; + pDataBlock->info.scanFlag = p->info.scanFlag; } blockDataDestroy(p); @@ -331,6 +335,7 @@ SSDataBlock* getGroupSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlo pDataBlock->info.rows = p->info.rows; pDataBlock->info.capacity = p->info.rows; + pDataBlock->info.scanFlag = p->info.scanFlag; } blockDataDestroy(p); @@ -505,6 +510,8 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSort pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); + pInfo->binfo.inputTsOrder = pSortPhyNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pSortPhyNode->node.outputTsOrder; int32_t numOfOutputCols = 0; int32_t code = extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, @@ -698,6 +705,7 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData } pDataBlock->info.rows = p->info.rows; + pDataBlock->info.scanFlag = p->info.scanFlag; if (pInfo->ignoreGroupId) { pDataBlock->info.id.groupId = 0; } else { @@ -799,6 +807,8 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock); pInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols); pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); // one additional is reserved for merged result. + pInfo->binfo.inputTsOrder = pMergePhyNode->node.inputTsOrder; + pInfo->binfo.outputTsOrder = pMergePhyNode->node.outputTsOrder; setOperatorInfo(pOperator, "MultiwayMergeOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(openMultiwayMergeOperator, doMultiwayMerge, NULL, diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 23a7d2c9e9e0f8c6415d2a8cccb53e5b7ee0cfd4..06138d7d5c5bd8d438451b07198f85c793e02f8b 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1154,7 +1154,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) { int32_t tableType = mr.me.type; if (tableType == TSDB_CHILD_TABLE) { // create time - int64_t ts = mr.me.ctbEntry.ctime; + int64_t ts = mr.me.ctbEntry.btime; pColInfoData = taosArrayGet(p->pDataBlock, 2); colDataSetVal(pColInfoData, numOfRows, (char*)&ts, false); @@ -1206,7 +1206,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) { } else if (tableType == TSDB_NORMAL_TABLE) { // create time pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataSetVal(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); + colDataSetVal(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.btime, false); // number of columns pColInfoData = taosArrayGet(p->pDataBlock, 3); @@ -1338,7 +1338,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { int32_t tableType = pInfo->pCur->mr.me.type; if (tableType == TSDB_CHILD_TABLE) { // create time - int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime; + int64_t ts = pInfo->pCur->mr.me.ctbEntry.btime; pColInfoData = taosArrayGet(p->pDataBlock, 2); colDataSetVal(pColInfoData, numOfRows, (char*)&ts, false); @@ -1392,7 +1392,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { } else if (tableType == TSDB_NORMAL_TABLE) { // create time pColInfoData = taosArrayGet(p->pDataBlock, 2); - colDataSetVal(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false); + colDataSetVal(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.btime, false); // number of columns pColInfoData = taosArrayGet(p->pDataBlock, 3); diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 2421343bd7b57949233419997c6d35b2cfd513f0..022440b2ad238fcca62180c4b03174a4dec4eb75 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -44,6 +44,8 @@ typedef struct STimeSliceOperatorInfo { uint64_t groupId; SGroupKeys* pPrevGroupKey; SSDataBlock* pNextGroupRes; + SSDataBlock* pRemainRes; // save block unfinished processing + int32_t remainIndex; // the remaining index in the block to be processed } STimeSliceOperatorInfo; static void destroyTimeSliceOperatorInfo(void* param); @@ -644,13 +646,47 @@ static int32_t resetKeeperInfo(STimeSliceOperatorInfo* pInfo) { return TSDB_CODE_SUCCESS; } +static bool checkThresholdReached(STimeSliceOperatorInfo* pSliceInfo, int32_t threshold) { + SSDataBlock* pResBlock = pSliceInfo->pRes; + if (pResBlock->info.rows > threshold) { + return true; + } + + return false; +} + +static bool checkWindowBoundReached(STimeSliceOperatorInfo* pSliceInfo) { + if (pSliceInfo->current > pSliceInfo->win.ekey) { + return true; + } + + return false; +} + +static void saveBlockStatus(STimeSliceOperatorInfo* pSliceInfo, SSDataBlock* pBlock, int32_t curIndex) { + SSDataBlock* pResBlock = pSliceInfo->pRes; + + SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId); + if (curIndex < pBlock->info.rows - 1) { + pSliceInfo->pRemainRes = pBlock; + pSliceInfo->remainIndex = curIndex + 1; + return; + } + + // all data in remaining block processed + pSliceInfo->pRemainRes = NULL; + +} + static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pSliceInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, bool ignoreNull) { SSDataBlock* pResBlock = pSliceInfo->pRes; SInterval* pInterval = &pSliceInfo->interval; SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId); - for (int32_t i = 0; i < pBlock->info.rows; ++i) { + + int32_t i = (pSliceInfo->pRemainRes == NULL) ? 0 : pSliceInfo->remainIndex; + for (; i < pBlock->info.rows; ++i) { int64_t ts = *(int64_t*)colDataGetData(pTsCol, i); // check for duplicate timestamps @@ -662,10 +698,6 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS continue; } - if (pSliceInfo->current > pSliceInfo->win.ekey) { - break; - } - if (ts == pSliceInfo->current) { addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i); @@ -674,9 +706,14 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); - if (pSliceInfo->current > pSliceInfo->win.ekey) { + + if (checkWindowBoundReached(pSliceInfo)) { break; } + if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) { + saveBlockStatus(pSliceInfo, pBlock, i); + return; + } } else if (ts < pSliceInfo->current) { // in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate doKeepPrevRows(pSliceInfo, pBlock, i); @@ -697,9 +734,13 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS } } - if (pSliceInfo->current > pSliceInfo->win.ekey) { + if (checkWindowBoundReached(pSliceInfo)) { break; } + if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) { + saveBlockStatus(pSliceInfo, pBlock, i); + return; + } } else { // ignore current row, and do nothing } @@ -730,11 +771,20 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS } doKeepPrevRows(pSliceInfo, pBlock, i); - if (pSliceInfo->current > pSliceInfo->win.ekey) { + if (checkWindowBoundReached(pSliceInfo)) { break; } + if (checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) { + saveBlockStatus(pSliceInfo, pBlock, i); + return; + } } } + + // if reached here, meaning block processing finished naturally, + // or interpolation reach window upper bound + pSliceInfo->pRemainRes = NULL; + } static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperatorInfo* pOperator, int32_t index) { @@ -781,39 +831,69 @@ static void resetTimesliceInfo(STimeSliceOperatorInfo* pSliceInfo) { resetKeeperInfo(pSliceInfo); } +static void doHandleTimeslice(SOperatorInfo* pOperator, SSDataBlock* pBlock) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + STimeSliceOperatorInfo* pSliceInfo = pOperator->info; + SExprSupp* pSup = &pOperator->exprSupp; + bool ignoreNull = getIgoreNullRes(pSup); + int32_t order = TSDB_ORDER_ASC; + + int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp); + if (code != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, code); + } + + if (pSliceInfo->scalarSup.pExprInfo != NULL) { + SExprSupp* pExprSup = &pSliceInfo->scalarSup; + projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); + } + + // the pDataBlock are always the same one, no need to call this again + setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); + doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull); + copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock); +} + static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; STimeSliceOperatorInfo* pSliceInfo = pOperator->info; SSDataBlock* pResBlock = pSliceInfo->pRes; - SExprSupp* pSup = &pOperator->exprSupp; - bool ignoreNull = getIgoreNullRes(pSup); - int32_t order = TSDB_ORDER_ASC; - SInterval* pInterval = &pSliceInfo->interval; SOperatorInfo* downstream = pOperator->pDownstream[0]; - blockDataCleanup(pResBlock); while (1) { if (pSliceInfo->pNextGroupRes != NULL) { - setInputDataBlock(pSup, pSliceInfo->pNextGroupRes, order, MAIN_SCAN, true); - doTimesliceImpl(pOperator, pSliceInfo, pSliceInfo->pNextGroupRes, pTaskInfo, ignoreNull); - copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pSliceInfo->pNextGroupRes); + doHandleTimeslice(pOperator, pSliceInfo->pNextGroupRes); + if (checkWindowBoundReached(pSliceInfo) || checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) { + doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL); + if (pSliceInfo->pRemainRes == NULL) { + pSliceInfo->pNextGroupRes = NULL; + } + if (pResBlock->info.rows != 0) { + goto _finished; + } else { + // after fillter if result block has 0 rows, go back to + // process pNextGroupRes again for unfinished data + continue; + } + } pSliceInfo->pNextGroupRes = NULL; } while (1) { - SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); + SSDataBlock* pBlock = pSliceInfo->pRemainRes ? pSliceInfo->pRemainRes : downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { setOperatorCompleted(pOperator); break; } + pResBlock->info.scanFlag = pBlock->info.scanFlag; if (pSliceInfo->groupId == 0 && pBlock->info.id.groupId != 0) { pSliceInfo->groupId = pBlock->info.id.groupId; } else { @@ -824,21 +904,15 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { } } - if (pSliceInfo->scalarSup.pExprInfo != NULL) { - SExprSupp* pExprSup = &pSliceInfo->scalarSup; - projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); - } - - int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp); - if (code != TSDB_CODE_SUCCESS) { - T_LONG_JMP(pTaskInfo->env, code); + doHandleTimeslice(pOperator, pBlock); + if (checkWindowBoundReached(pSliceInfo) || checkThresholdReached(pSliceInfo, pOperator->resultInfo.threshold)) { + doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL); + if (pResBlock->info.rows != 0) { + goto _finished; + } } - - // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); - doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull); - copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock); } + // post work for a specific group // check if need to interpolate after last datablock // except for fill(next), fill(linear) @@ -851,11 +925,12 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { // restore initial value for next group resetTimesliceInfo(pSliceInfo); - if (pResBlock->info.rows >= 4096) { + if (pResBlock->info.rows != 0) { break; } } +_finished: // restore the value setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED); if (pResBlock->info.rows == 0) { @@ -911,6 +986,8 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode pInfo->groupId = 0; pInfo->pPrevGroupKey = NULL; pInfo->pNextGroupRes = NULL; + pInfo->pRemainRes = NULL; + pInfo->remainIndex = 0; if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) { STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 2676e097f9340cbf200b74b641c056a4eb17ef40..f29ea5057e06619a76f0b2ea79091a1eca42d2a6 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -355,7 +355,7 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, int32_t pos, SSDataBlock* pBlock, const TSKEY* tsCols, STimeWindow* win, SExprSupp* pSup) { - bool ascQuery = (pInfo->inputOrder == TSDB_ORDER_ASC); + bool ascQuery = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC); TSKEY curTs = tsCols[pos]; @@ -385,7 +385,7 @@ static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, i static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SExprSupp* pSup, int32_t endRowIndex, SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, STimeWindow* win) { - int32_t order = pInfo->inputOrder; + int32_t order = pInfo->binfo.inputTsOrder; TSKEY actualEndKey = tsCols[endRowIndex]; TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey; @@ -547,7 +547,7 @@ static void doWindowBorderInterpolation(SIntervalAggOperatorInfo* pInfo, SSDataB if (!done) { int32_t endRowIndex = startPos + forwardRows - 1; - TSKEY endKey = (pInfo->inputOrder == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; + TSKEY endKey = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; bool interp = setTimeWindowInterpolationEndTs(pInfo, pSup, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); @@ -885,12 +885,12 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul int32_t numOfOutput = pSup->numOfExprs; int64_t* tsCols = extractTsCol(pBlock, pInfo); uint64_t tableGroupId = pBlock->info.id.groupId; - bool ascScan = (pInfo->inputOrder == TSDB_ORDER_ASC); + bool ascScan = (pInfo->binfo.inputTsOrder == TSDB_ORDER_ASC); TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); SResultRow* pResult = NULL; STimeWindow win = - getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->inputOrder); + getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder); int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -899,7 +899,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul TSKEY ekey = ascScan ? win.ekey : win.skey; int32_t forwardRows = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder); + getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->binfo.inputTsOrder); // prev time window not interpolation yet. if (pInfo->timeWindowInterpo) { @@ -926,7 +926,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul STimeWindow nextWin = win; while (1) { int32_t prevEndPos = forwardRows - 1 + startPos; - startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->inputOrder); + startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder); if (startPos < 0) { break; } @@ -939,7 +939,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ekey = ascScan ? nextWin.ekey : nextWin.skey; forwardRows = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder); + getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->binfo.inputTsOrder); // window start(end) key interpolation doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup); // TODO: add to open window? how to close the open windows after input blocks exhausted? @@ -1032,7 +1032,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { break; } - getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag, true); + pInfo->binfo.pRes->info.scanFlag = scanFlag = pBlock->info.scanFlag; if (pInfo->scalarSupp.pExprInfo != NULL) { SExprSupp* pExprSup = &pInfo->scalarSupp; @@ -1040,11 +1040,11 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again - setInputDataBlock(pSup, pBlock, pInfo->inputOrder, scanFlag, true); + setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true); hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag); } - initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->resultTsOrder); + initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder); OPTR_SET_OPENED(pOperator); pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; @@ -1158,7 +1158,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExprSupp* pSup = &pOperator->exprSupp; - int32_t order = TSDB_ORDER_ASC; + int32_t order = pInfo->binfo.inputTsOrder; int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; @@ -1168,6 +1168,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { break; } + pInfo->binfo.pRes->info.scanFlag = pBlock->info.scanFlag; setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId); @@ -1650,8 +1651,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh }; pInfo->win = pTaskInfo->window; - pInfo->inputOrder = (pPhyNode->window.inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; - pInfo->resultTsOrder = (pPhyNode->window.outputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + pInfo->binfo.inputTsOrder = pPhyNode->window.node.inputTsOrder; + pInfo->binfo.outputTsOrder = pPhyNode->window.node.outputTsOrder; pInfo->interval = interval; pInfo->twAggSup = as; pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock; @@ -1802,7 +1803,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { } int64_t st = taosGetTimestampUs(); - int32_t order = TSDB_ORDER_ASC; + int32_t order = pInfo->binfo.inputTsOrder; SOperatorInfo* downstream = pOperator->pDownstream[0]; @@ -1812,6 +1813,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { break; } + pBInfo->pRes->info.scanFlag = pBlock->info.scanFlag; // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId); @@ -1872,6 +1874,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi if (pInfo->stateKey.pData == NULL) { goto _error; } + pInfo->binfo.inputTsOrder = pStateNode->window.node.inputTsOrder; + pInfo->binfo.outputTsOrder = pStateNode->window.node.outputTsOrder; int32_t code = filterInitFromNode((SNode*)pStateNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); if (code != TSDB_CODE_SUCCESS) { @@ -1970,6 +1974,8 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW pInfo->binfo.pRes = pResBlock; pInfo->winSup.prevTs = INT64_MIN; pInfo->reptScan = false; + pInfo->binfo.inputTsOrder = pSessionNode->window.node.inputTsOrder; + pInfo->binfo.outputTsOrder = pSessionNode->window.node.outputTsOrder; code = filterInitFromNode((SNode*)pSessionNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -4318,7 +4324,6 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { SSDataBlock* pRes = pIaInfo->binfo.pRes; SResultRowInfo* pResultRowInfo = &pIaInfo->binfo.resultRowInfo; SOperatorInfo* downstream = pOperator->pDownstream[0]; - int32_t scanFlag = MAIN_SCAN; while (1) { SSDataBlock* pBlock = NULL; @@ -4365,8 +4370,8 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { } } - getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag, false); - setInputDataBlock(pSup, pBlock, pIaInfo->inputOrder, scanFlag, true); + pRes->info.scanFlag = pBlock->info.scanFlag; + setInputDataBlock(pSup, pBlock, pIaInfo->binfo.inputTsOrder, pBlock->info.scanFlag, true); doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes); doFilter(pRes, pOperator->exprSupp.pFilterInfo, NULL); @@ -4439,7 +4444,8 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, miaInfo->curTs = INT64_MIN; iaInfo->win = pTaskInfo->window; - iaInfo->inputOrder = TSDB_ORDER_ASC; + iaInfo->binfo.inputTsOrder = pNode->window.node.inputTsOrder; + iaInfo->binfo.outputTsOrder = pNode->window.node.outputTsOrder; iaInfo->interval = interval; iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId; iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock; @@ -4516,7 +4522,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t STimeWindow* newWin) { SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info; SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; - bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC); + bool ascScan = (iaInfo->binfo.inputTsOrder == TSDB_ORDER_ASC); SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin}; tdListAppend(miaInfo->groupIntervals, &groupTimeWindow); @@ -4551,12 +4557,12 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* int32_t numOfOutput = pExprSup->numOfExprs; int64_t* tsCols = extractTsCol(pBlock, iaInfo); uint64_t tableGroupId = pBlock->info.id.groupId; - bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC); + bool ascScan = (iaInfo->binfo.inputTsOrder == TSDB_ORDER_ASC); TSKEY blockStartTs = getStartTsKey(&pBlock->info.window, tsCols); SResultRow* pResult = NULL; STimeWindow win = getActiveTimeWindow(iaInfo->aggSup.pResultBuf, pResultRowInfo, blockStartTs, &iaInfo->interval, - iaInfo->inputOrder); + iaInfo->binfo.inputTsOrder); int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, @@ -4567,7 +4573,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* TSKEY ekey = ascScan ? win.ekey : win.skey; int32_t forwardRows = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder); + getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->binfo.inputTsOrder); ASSERT(forwardRows > 0); // prev time window not interpolation yet. @@ -4598,7 +4604,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* while (1) { int32_t prevEndPos = forwardRows - 1 + startPos; startPos = - getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->inputOrder); + getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->binfo.inputTsOrder); if (startPos < 0) { break; } @@ -4613,7 +4619,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* ekey = ascScan ? nextWin.ekey : nextWin.skey; forwardRows = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder); + getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->binfo.inputTsOrder); // window start(end) key interpolation doWindowBorderInterpolation(iaInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pExprSup); @@ -4649,7 +4655,6 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { if (!miaInfo->inputBlocksFinished) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - int32_t scanFlag = MAIN_SCAN; while (1) { SSDataBlock* pBlock = NULL; if (miaInfo->prefetchedBlock == NULL) { @@ -4674,9 +4679,9 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { break; } - getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag, false); - setInputDataBlock(pExpSupp, pBlock, iaInfo->inputOrder, scanFlag, true); - doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes); + pRes->info.scanFlag = pBlock->info.scanFlag; + setInputDataBlock(pExpSupp, pBlock, iaInfo->binfo.inputTsOrder, pBlock->info.scanFlag, true); + doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, pBlock->info.scanFlag, pRes); if (pRes->info.rows >= pOperator->resultInfo.threshold) { break; @@ -4726,10 +4731,11 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo; pIntervalInfo->win = pTaskInfo->window; - pIntervalInfo->inputOrder = TSDB_ORDER_ASC; + pIntervalInfo->binfo.inputTsOrder = pIntervalPhyNode->window.node.inputTsOrder; pIntervalInfo->interval = interval; pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock; pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; + pIntervalInfo->binfo.outputTsOrder = pIntervalPhyNode->window.node.outputTsOrder; SExprSupp* pExprSupp = &pOperator->exprSupp; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 783597df6705af50017312095ede318707a2dd01..f26aa8a97cafbeeb6f80248bb84b6b01f4ad0be4 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -101,7 +101,11 @@ static int32_t sortComparCleanup(SMsortComparParam* cmpParam) { for (int32_t i = 0; i < cmpParam->numOfSources; ++i) { SSortSource* pSource = cmpParam->pSources[i]; blockDataDestroy(pSource->src.pBlock); + if (pSource->pageIdList) { + taosArrayDestroy(pSource->pageIdList); + } taosMemoryFreeClear(pSource); + cmpParam->pSources[i] = NULL; } cmpParam->numOfSources = 0; @@ -123,9 +127,11 @@ void tsortClearOrderdSource(SArray* pOrderedSource, int64_t *fetchUs, int64_t *f // release pageIdList if ((*pSource)->pageIdList) { taosArrayDestroy((*pSource)->pageIdList); + (*pSource)->pageIdList = NULL; } if ((*pSource)->param && !(*pSource)->onlyRef) { taosMemoryFree((*pSource)->param); + (*pSource)->param = NULL; } if (!(*pSource)->onlyRef && (*pSource)->src.pBlock) { @@ -881,6 +887,7 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) { } uint64_t tsortGetGroupId(STupleHandle* pVHandle) { return pVHandle->pBlock->info.id.groupId; } +void* tsortGetBlockInfo(STupleHandle* pVHandle) { return &pVHandle->pBlock->info; } SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) { SSortExecInfo info = {0}; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 1265c64c8c1d832d1ce2916ec923eb6db44460d0..4365cd8b959752cdd47ce3488ef3519eaf58dacb 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2660,7 +2660,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { } else { pDiffInfo->ignoreNegative = false; } - pDiffInfo->includeNull = false; + pDiffInfo->includeNull = true; pDiffInfo->firstOutput = false; return true; } diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index a8e4f692abcfbebd42390b2d8b41e6aaf7055f4c..6e4dde4ec1752f4bb3349c9c41658ab0140264ae 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -110,6 +110,7 @@ static int32_t columnNodeCopy(const SColumnNode* pSrc, SColumnNode* pDst) { COPY_SCALAR_FIELD(tableId); COPY_SCALAR_FIELD(tableType); COPY_SCALAR_FIELD(colId); + COPY_SCALAR_FIELD(projIdx); COPY_SCALAR_FIELD(colType); COPY_SCALAR_FIELD(hasIndex); COPY_CHAR_ARRAY_FIELD(dbName); @@ -358,6 +359,8 @@ static int32_t logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) { COPY_SCALAR_FIELD(requireDataOrder); COPY_SCALAR_FIELD(resultDataOrder); COPY_SCALAR_FIELD(groupAction); + COPY_SCALAR_FIELD(inputTsOrder); + COPY_SCALAR_FIELD(outputTsOrder); return TSDB_CODE_SUCCESS; } @@ -404,7 +407,6 @@ static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) { CLONE_NODE_FIELD(pOnConditions); CLONE_NODE_FIELD(pColEqualOnConditions); COPY_SCALAR_FIELD(isSingleTableJoin); - COPY_SCALAR_FIELD(inputTsOrder); return TSDB_CODE_SUCCESS; } @@ -482,8 +484,6 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p COPY_SCALAR_FIELD(igExpired); COPY_SCALAR_FIELD(igCheckUpdate); COPY_SCALAR_FIELD(windowAlgo); - COPY_SCALAR_FIELD(inputTsOrder); - COPY_SCALAR_FIELD(outputTsOrder); return TSDB_CODE_SUCCESS; } @@ -495,7 +495,6 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) { CLONE_NODE_FIELD(pWStartTs); CLONE_NODE_FIELD(pValues); COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); - COPY_SCALAR_FIELD(inputTsOrder); return TSDB_CODE_SUCCESS; } @@ -544,6 +543,8 @@ static int32_t physiNodeCopy(const SPhysiNode* pSrc, SPhysiNode* pDst) { CLONE_NODE_FIELD_EX(pOutputDataBlockDesc, SDataBlockDescNode*); CLONE_NODE_FIELD(pConditions); CLONE_NODE_LIST_FIELD(pChildren); + COPY_SCALAR_FIELD(inputTsOrder); + COPY_SCALAR_FIELD(outputTsOrder); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 6bf1ad01a8d3b9aef17257658fcb4c9d749305ea..0b449c5bfe7e7543ba824a4c128f055c7aa011e0 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1904,9 +1904,6 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType); } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder); - } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition); } @@ -1929,9 +1926,6 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code); } - if (TSDB_CODE_SUCCESS == code) { - tjsonGetNumberValue(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder, code); - } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions); } @@ -2150,7 +2144,6 @@ static const char* jkWindowPhysiPlanWatermark = "Watermark"; static const char* jkWindowPhysiPlanDeleteMark = "DeleteMark"; static const char* jkWindowPhysiPlanIgnoreExpired = "IgnoreExpired"; static const char* jkWindowPhysiPlanInputTsOrder = "InputTsOrder"; -static const char* jkWindowPhysiPlanOutputTsOrder = "outputTsOrder"; static const char* jkWindowPhysiPlanMergeDataBlock = "MergeDataBlock"; static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { @@ -2181,12 +2174,6 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanIgnoreExpired, pNode->igExpired); } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder); - } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkWindowPhysiPlanMergeDataBlock, pNode->mergeDataBlock); } @@ -2222,12 +2209,6 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetTinyIntValue(pJson, jkWindowPhysiPlanIgnoreExpired, &pNode->igExpired); } - if (TSDB_CODE_SUCCESS == code) { - tjsonGetNumberValue(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder, code); - } - if (TSDB_CODE_SUCCESS == code) { - tjsonGetNumberValue(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder, code); - } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkWindowPhysiPlanMergeDataBlock, &pNode->mergeDataBlock); } @@ -2294,7 +2275,6 @@ static const char* jkFillPhysiPlanWStartTs = "WStartTs"; static const char* jkFillPhysiPlanValues = "Values"; static const char* jkFillPhysiPlanStartTime = "StartTime"; static const char* jkFillPhysiPlanEndTime = "EndTime"; -static const char* jkFillPhysiPlanInputTsOrder = "inputTsOrder"; static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj; @@ -2321,9 +2301,6 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey); } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder); - } return code; } @@ -2353,9 +2330,6 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey); } - if (TSDB_CODE_SUCCESS == code) { - tjsonGetNumberValue(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder, code); - } return code; } @@ -3053,6 +3027,7 @@ static const char* jkColumnTableId = "TableId"; static const char* jkColumnTableType = "TableType"; static const char* jkColumnColId = "ColId"; static const char* jkColumnColType = "ColType"; +static const char* jkColumnProjId = "ProjId"; static const char* jkColumnDbName = "DbName"; static const char* jkColumnTableName = "TableName"; static const char* jkColumnTableAlias = "TableAlias"; @@ -3073,6 +3048,9 @@ static int32_t columnNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkColumnColId, pNode->colId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkColumnProjId, pNode->projIdx); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkColumnColType, pNode->colType); } @@ -3111,6 +3089,9 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetSmallIntValue(pJson, jkColumnColId, &pNode->colId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetSmallIntValue(pJson, jkColumnProjId, &pNode->projIdx); + } if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code); } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 0631b91323146ed6dc37c2417971d5d49beafba4..1ca37defa4a76a6b679e85facae10a6cd758fb80 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -1851,7 +1851,9 @@ enum { PHY_NODE_CODE_CONDITIONS, PHY_NODE_CODE_CHILDREN, PHY_NODE_CODE_LIMIT, - PHY_NODE_CODE_SLIMIT + PHY_NODE_CODE_SLIMIT, + PHY_NODE_CODE_INPUT_TS_ORDER, + PHY_NODE_CODE_OUTPUT_TS_ORDER }; static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -1870,6 +1872,12 @@ static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_SLIMIT, nodeToMsg, pNode->pSlimit); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeEnum(pEncoder, PHY_NODE_CODE_INPUT_TS_ORDER, pNode->inputTsOrder); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeEnum(pEncoder, PHY_NODE_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder); + } return code; } @@ -1896,6 +1904,12 @@ static int32_t msgToPhysiNode(STlvDecoder* pDecoder, void* pObj) { case PHY_NODE_CODE_SLIMIT: code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSlimit); break; + case PHY_NODE_CODE_INPUT_TS_ORDER: + code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder)); + break; + case PHY_NODE_CODE_OUTPUT_TS_ORDER: + code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder)); + break; default: break; } @@ -2339,9 +2353,6 @@ static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TARGETS, nodeListToMsg, pNode->pTargets); } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER, pNode->inputTsOrder); - } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS, nodeToMsg, pNode->pColEqualOnConditions); } @@ -2370,9 +2381,6 @@ static int32_t msgToPhysiJoinNode(STlvDecoder* pDecoder, void* pObj) { case PHY_SORT_MERGE_JOIN_CODE_TARGETS: code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets); break; - case PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER: - code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder)); - break; case PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS: code = msgToNodeFromTlv(pTlv, (void**)&pNode->pColEqualOnConditions); break; @@ -2675,12 +2683,6 @@ static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_IG_EXPIRED, pNode->igExpired); } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_INPUT_TS_ORDER, pNode->inputTsOrder); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder); - } if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_WINDOW_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock); } @@ -2722,12 +2724,6 @@ static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) { case PHY_WINDOW_CODE_IG_EXPIRED: code = tlvDecodeI8(pTlv, &pNode->igExpired); break; - case PHY_WINDOW_CODE_INPUT_TS_ORDER: - code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder)); - break; - case PHY_WINDOW_CODE_OUTPUT_TS_ORDER: - code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder)); - break; case PHY_WINDOW_CODE_MERGE_DATA_BLOCK: code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock); break; @@ -2846,9 +2842,6 @@ static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange); } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_INPUT_TS_ORDER, pNode->inputTsOrder); - } return code; } @@ -2881,9 +2874,6 @@ static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) { case PHY_FILL_CODE_TIME_RANGE: code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange); break; - case PHY_FILL_CODE_INPUT_TS_ORDER: - code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder)); - break; default: break; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index ee2bc7e4423e72ead6d96354bb48914f0601e734..15232b95b6dcb584ee22d708ca42e3d6a359ad82 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -1956,9 +1956,9 @@ static uint32_t funcNodeHash(const char* pKey, uint32_t len) { } static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) { - if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) { - return 1; - } + // if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) { + // return 1; + // } return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1; } diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 4fecb1cd33affcf6cf1e737282f217eae583630c..ff394467f6f660054f7eaf1768be3c1fcc2a4483 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -210,7 +210,7 @@ SNode* createCreateTopicStmtUseDb(SAstCreateContext* pCxt, bool ignoreExists, ST SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SNode* pRealTable, bool withMeta, SNode* pWhere); SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pTopicName); -SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, SToken* pTopicName); +SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId, SToken* pTopicName); SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue); SNode* createDefaultExplainOptions(SAstCreateContext* pCxt); SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal); diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e5a0fc3d76c9163abd2880233e551048ff6a11cf..e08153c34121c541fab57a3a03b66c8fb9b7152d 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -210,6 +210,15 @@ static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) { return true; } +static bool checkCGroupName(SAstCreateContext* pCxt, SToken* pCGroup) { + trimEscape(pCGroup); + if (pCGroup->n >= TSDB_CGROUP_LEN) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pCGroup->z); + return false; + } + return true; +} + static bool checkStreamName(SAstCreateContext* pCxt, SToken* pStreamName) { trimEscape(pStreamName); if (pStreamName->n >= TSDB_STREAM_NAME_LEN) { @@ -1751,12 +1760,15 @@ SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken return (SNode*)pStmt; } -SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, +SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId, SToken* pTopicName) { CHECK_PARSER_STATUS(pCxt); if (!checkTopicName(pCxt, pTopicName)) { return NULL; } + if (!checkCGroupName(pCxt, pCGroupId)) { + return NULL; + } SDropCGroupStmt* pStmt = (SDropCGroupStmt*)nodesMakeNode(QUERY_NODE_DROP_CGROUP_STMT); CHECK_OUT_OF_MEM(pStmt); pStmt->ignoreNotExists = ignoreNotExists; diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 01b62a90511fd933239f806160ca6760299b7f95..49d27f60836c5a9289faa86faf98847e72fc084c 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -311,6 +311,9 @@ static int32_t calcConstDelete(SCalcConstContext* pCxt, SDeleteStmt* pDelete) { if (TSDB_CODE_SUCCESS == code) { code = calcConstStmtCondition(pCxt, &pDelete->pWhere, &pDelete->deleteZeroRows); } + if (code == TSDB_CODE_SUCCESS && pDelete->timeRange.skey > pDelete->timeRange.ekey) { + pDelete->deleteZeroRows = true; + } return code; } @@ -465,6 +468,9 @@ static bool isEmptyResultQuery(SNode* pStmt) { } break; } + case QUERY_NODE_DELETE_STMT: + isEmptyResult = ((SDeleteStmt*)pStmt)->deleteZeroRows; + break; default: break; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 43c4a5389638384b76cc1e737cc91899b7cc9c60..b3a043fe12c0d6328c6b506ab7a327bea8cba54c 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1384,13 +1384,33 @@ static bool isCountStar(SFunctionNode* pFunc) { return (QUERY_NODE_COLUMN == nodeType(pPara) && 0 == strcmp(((SColumnNode*)pPara)->colName, "*")); } +static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { + int32_t code = TSDB_CODE_SUCCESS; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == pVal) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pVal->node.resType.type = TSDB_DATA_TYPE_INT; + pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes; + const int32_t val = 1; + nodesSetValueNodeValue(pVal, (void*)&val); + pVal->translate = true; + nodesListErase(pCount->pParameterList, nodesListGetCell(pCount->pParameterList, 0)); + code = nodesListAppend(pCount->pParameterList, (SNode*)pVal); + return code; +} + // count(*) is rewritten as count(ts) for scannning optimization static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) { SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pCount->pParameterList, 0); STableNode* pTable = NULL; int32_t code = findTable(pCxt, ('\0' == pCol->tableAlias[0] ? NULL : pCol->tableAlias), &pTable); - if (TSDB_CODE_SUCCESS == code && QUERY_NODE_REAL_TABLE == nodeType(pTable)) { - setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol); + if (TSDB_CODE_SUCCESS == code) { + if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { + setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol); + } else { + code = rewriteCountStarAsCount1(pCxt, pCount); + } } return code; } @@ -2341,7 +2361,7 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) int32_t code = TSDB_CODE_SUCCESS; if (NULL == getGroupByList(pCxt) && NULL == pSelect->pPartitionByList && NULL == pSelect->pWindow) { return code; - } + } if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } @@ -2352,7 +2372,7 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); } -*/ +*/ return code; } @@ -2660,7 +2680,7 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; - + nodesRewriteExprPostOrder(&pSelect->pWhere, doTranslateTbName, &pRewriteCxt); return pRewriteCxt.errCode; @@ -3055,13 +3075,13 @@ static bool needFill(SNode* pNode) { static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) { SListCell* pCell = nodesListGetCell(pValues, index); - if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) { + if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType) && (QUERY_NODE_VALUE == nodeType(pCell->pNode))) { return TSDB_CODE_SUCCESS; } - SNode* pCaseFunc = NULL; - int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc); + SNode* pCastFunc = NULL; + int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCastFunc); if (TSDB_CODE_SUCCESS == code) { - code = scalarCalculateConstants(pCaseFunc, &pCell->pNode); + code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); @@ -3157,7 +3177,7 @@ static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect } if (TSDB_CODE_SUCCESS == code) { code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } + } if (TSDB_CODE_SUCCESS == code) { code = translateFillValues(pCxt, pSelect); } @@ -4945,6 +4965,14 @@ static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt if (TSDB_CODE_SUCCESS == code) { code = checkTableSchema(pCxt, pStmt); } + if (TSDB_CODE_SUCCESS == code) { + if (createStable && pStmt->pOptions->ttl != 0) { + code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, + "Only supported for create non-super table in databases " + "configured with the 'TTL' option"); + } + } + return code; } @@ -4979,6 +5007,7 @@ static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { } snprintf(pTable->table.dbName, sizeof(pTable->table.dbName), "%s", pInfo->pDbName); snprintf(pTable->table.tableName, sizeof(pTable->table.tableName), "%s", pInfo->pTableName); + snprintf(pTable->table.tableAlias, sizeof(pTable->table.tableAlias), "%s", pInfo->pTableName); TSWAP(pTable->pMeta, pInfo->pRollupTableMeta); *pOutput = (SNode*)pTable; return TSDB_CODE_SUCCESS; diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index f6dfa93ab27b358b32b6602ce78c64f1cc7eb0b4..6d27bb0d29d3525386c1b0d1cdf91e0ad8490210 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -765,7 +765,7 @@ TEST_F(ParserInitialCTest, createStable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, " "a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED, a11 TINYINT, " "a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) MAX_DELAY 100s,10m WATERMARK 10a,1m " + "COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) MAX_DELAY 100s,10m WATERMARK 10a,1m " "DELETE_MARK 1000s,200m"); clearCreateStbReq(); } @@ -1005,7 +1005,7 @@ TEST_F(ParserInitialCTest, createTable) { ASSERT_EQ(pReq->flags, pExpect->flags); ASSERT_EQ(std::string(pReq->name), std::string(pExpect->name)); ASSERT_EQ(pReq->uid, pExpect->uid); - ASSERT_EQ(pReq->ctime, pExpect->ctime); + ASSERT_EQ(pReq->btime, pExpect->btime); ASSERT_EQ(pReq->ttl, pExpect->ttl); ASSERT_EQ(pReq->commentLen, pExpect->commentLen); ASSERT_EQ(std::string(pReq->comment), std::string(pExpect->comment)); @@ -1038,7 +1038,7 @@ TEST_F(ParserInitialCTest, createTable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), " "a9 SMALLINT, a10 SMALLINT UNSIGNED, a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, " "a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN)"); + "COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN)"); run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy', NOW)"); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 5bbc9acdadfe671d54cbe8ce534797ed580146ed..713f12e2294c49bb1327728a7fa162d6313e31f2 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -436,7 +436,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pJoin->joinType = pJoinTable->joinType; pJoin->isSingleTableJoin = pJoinTable->table.singleTable; - pJoin->inputTsOrder = ORDER_ASC; + pJoin->node.inputTsOrder = ORDER_ASC; pJoin->node.groupAction = GROUP_ACTION_CLEAR; pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL; pJoin->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL; @@ -741,8 +741,8 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm pWindow->igExpired = pCxt->pPlanCxt->igExpired; pWindow->igCheckUpdate = pCxt->pPlanCxt->igCheckUpdate; } - pWindow->inputTsOrder = ORDER_ASC; - pWindow->outputTsOrder = ORDER_ASC; + pWindow->node.inputTsOrder = ORDER_ASC; + pWindow->node.outputTsOrder = ORDER_ASC; int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_WINDOW, fmIsWindowClauseFunc, &pWindow->pFuncs); if (TSDB_CODE_SUCCESS == code) { @@ -972,7 +972,7 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pFill->node.groupAction = getGroupAction(pCxt, pSelect); pFill->node.requireDataOrder = getRequireDataOrder(true, pSelect); pFill->node.resultDataOrder = pFill->node.requireDataOrder; - pFill->inputTsOrder = ORDER_ASC; + pFill->node.inputTsOrder = 0; int32_t code = partFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs); if (TSDB_CODE_SUCCESS == code) { @@ -1045,6 +1045,19 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect if (NULL == pSort->pSortKeys) { code = TSDB_CODE_OUT_OF_MEMORY; } + SNode* pNode = NULL; + SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0); + if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) { + SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr; + int16_t projIdx = 1; + FOREACH(pNode, pSelect->pProjectionList) { + SExprNode* pExpr = (SExprNode*)pNode; + if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) { + pCol->projIdx = projIdx; break; + } + projIdx++; + } + } } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 8b75fe6b33b4821bd813c73a92507e7bda3e05f2..2d1a758f3342f5d4fea5a31d8bc578ca4960d94e 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -116,25 +116,33 @@ static EDealRes optRebuildTbanme(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } -static void optSetParentOrder(SLogicNode* pNode, EOrder order) { +static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNodeForcePropagate) { if (NULL == pNode) { return; } + pNode->inputTsOrder = order; switch (nodeType(pNode)) { - case QUERY_NODE_LOGIC_PLAN_WINDOW: - ((SWindowLogicNode*)pNode)->inputTsOrder = order; - // window has a sorting function, and the operator behind it uses its output order - return; + // for those nodes that will change the order, stop propagating + //case QUERY_NODE_LOGIC_PLAN_WINDOW: case QUERY_NODE_LOGIC_PLAN_JOIN: - ((SJoinLogicNode*)pNode)->inputTsOrder = order; - break; - case QUERY_NODE_LOGIC_PLAN_FILL: - ((SFillLogicNode*)pNode)->inputTsOrder = order; + case QUERY_NODE_LOGIC_PLAN_AGG: + case QUERY_NODE_LOGIC_PLAN_SORT: + if (pNode == pNodeForcePropagate) { + pNode->outputTsOrder = order; + break; + } else + return; + case QUERY_NODE_LOGIC_PLAN_WINDOW: + // Window output ts order default to be asc, and changed when doing sort by primary key optimization. + // We stop propagate the original order to parents. + // Use window output ts order instead. + order = pNode->outputTsOrder; break; default: + pNode->outputTsOrder = order; break; } - optSetParentOrder(pNode->pParent, order); + optSetParentOrder(pNode->pParent, order, pNodeForcePropagate); } EDealRes scanPathOptHaveNormalColImpl(SNode* pNode, void* pContext) { @@ -159,25 +167,25 @@ static bool scanPathOptMayBeOptimized(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode)) { return false; } - if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) && - QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode->pParent) && - QUERY_NODE_LOGIC_PLAN_PARTITION != nodeType(pNode->pParent))) { - return false; - } - if ((QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) && - WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType) || - (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode->pParent) && pNode->pParent->pParent && - QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent->pParent) && + return true; +} + +static bool scanPathOptShouldGetFuncs(SLogicNode* pNode) { + if ((QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode) && + WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode)->winType) || + (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) && pNode->pParent && + QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode->pParent) && WINDOW_TYPE_INTERVAL == ((SWindowLogicNode*)pNode->pParent)->winType)) { return true; } - if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode->pParent)) { - return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode->pParent)->pGroupKeys); + if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode)) { + return !scanPathOptHaveNormalCol(((SAggLogicNode*)pNode)->pGroupKeys); } return false; } static SNodeList* scanPathOptGetAllFuncs(SLogicNode* pNode) { + if (!scanPathOptShouldGetFuncs(pNode)) return NULL; switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_WINDOW: return ((SWindowLogicNode*)pNode)->pFuncs; @@ -339,12 +347,12 @@ static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan) case SCAN_ORDER_ASC: pScan->scanSeq[0] = 1; pScan->scanSeq[1] = 0; - optSetParentOrder(pScan->node.pParent, ORDER_ASC); + optSetParentOrder(pScan->node.pParent, ORDER_ASC, NULL); break; case SCAN_ORDER_DESC: pScan->scanSeq[0] = 0; pScan->scanSeq[1] = 1; - optSetParentOrder(pScan->node.pParent, ORDER_DESC); + optSetParentOrder(pScan->node.pParent, ORDER_DESC, NULL); break; case SCAN_ORDER_BOTH: pScan->scanSeq[0] = 1; @@ -1239,6 +1247,7 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS if ((ORDER_DESC == order && pScan->scanSeq[0] > 0) || (ORDER_ASC == order && pScan->scanSeq[1] > 0)) { TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]); } + pScan->node.outputTsOrder = order; if (TSDB_SUPER_TABLE == pScan->tableType) { pScan->scanType = SCAN_TYPE_TABLE_MERGE; pScan->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL; @@ -1246,9 +1255,9 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS } pScan->sortPrimaryKey = true; } else if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pSequencingNode)) { - ((SWindowLogicNode*)pSequencingNode)->outputTsOrder = order; + ((SLogicNode*)pSequencingNode)->outputTsOrder = order; } - optSetParentOrder(((SLogicNode*)pSequencingNode)->pParent, order); + optSetParentOrder(((SLogicNode*)pSequencingNode)->pParent, order, (SLogicNode*)pSort); } SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0); @@ -2881,10 +2890,62 @@ static int32_t tableCountScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLo return code; } +static SSortLogicNode* sortNonPriKeySatisfied(SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_SORT != nodeType(pNode)) { + return NULL; + } + SSortLogicNode* pSort = (SSortLogicNode*)pNode; + if (sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys)) { + return NULL; + } + SNode* pSortKeyNode = NULL, *pSortKeyExpr = NULL; + FOREACH(pSortKeyNode, pSort->pSortKeys) { + pSortKeyExpr = ((SOrderByExprNode*)pSortKeyNode)->pExpr; + switch (nodeType(pSortKeyExpr)) { + case QUERY_NODE_COLUMN: + break; + case QUERY_NODE_VALUE: + continue; + default: + return NULL; + } + } + + if (!pSortKeyExpr || ((SColumnNode*)pSortKeyExpr)->projIdx != 1 || + ((SColumnNode*)pSortKeyExpr)->node.resType.type != TSDB_DATA_TYPE_TIMESTAMP) { + return NULL; + } + return pSort; +} + +static bool sortNonPriKeyShouldOptimize(SLogicNode* pNode, void* pInfo) { + SSortLogicNode* pSort = sortNonPriKeySatisfied(pNode); + if (!pSort) return false; + SNodeList* pSortNodeList = pInfo; + nodesListAppend(pSortNodeList, (SNode*)pSort); + return false; +} + +static int32_t sortNonPriKeyOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { + SNodeList* pNodeList = nodesMakeList(); + optFindEligibleNode(pLogicSubplan->pNode, sortNonPriKeyShouldOptimize, pNodeList); + SNode* pNode = NULL; + FOREACH(pNode, pNodeList) { + SSortLogicNode* pSort = (SSortLogicNode*)pNode; + SOrderByExprNode* pOrderByExpr = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0); + pSort->node.outputTsOrder = pOrderByExpr->order; + optSetParentOrder(pSort->node.pParent, pOrderByExpr->order, NULL); + } + pCxt->optimized = false; + nodesClearList(pNodeList); + return TSDB_CODE_SUCCESS; +} + // clang-format off static const SOptimizeRule optimizeRuleSet[] = { {.pName = "ScanPath", .optimizeFunc = scanPathOptimize}, {.pName = "PushDownCondition", .optimizeFunc = pushDownCondOptimize}, + {.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize}, {.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize}, {.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize}, {.pName = "PartitionTags", .optimizeFunc = partTagsOptimize}, diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 4f57193856ea5016aad37731fc342e9bd1b5b9f2..b3d94a5e47aef06960954b88c612b9568f0e45d4 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -366,6 +366,8 @@ static SPhysiNode* makePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode TSWAP(pPhysiNode->pLimit, pLogicNode->pLimit); TSWAP(pPhysiNode->pSlimit, pLogicNode->pSlimit); + pPhysiNode->inputTsOrder = pLogicNode->inputTsOrder; + pPhysiNode->outputTsOrder = pLogicNode->outputTsOrder; int32_t code = createDataBlockDesc(pCxt, pLogicNode->pTargets, &pPhysiNode->pOutputDataBlockDesc); if (TSDB_CODE_SUCCESS != code) { @@ -676,7 +678,7 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren int32_t code = TSDB_CODE_SUCCESS; pJoin->joinType = pJoinLogicNode->joinType; - pJoin->inputTsOrder = pJoinLogicNode->inputTsOrder; + pJoin->node.inputTsOrder = pJoinLogicNode->node.inputTsOrder; setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition, &pJoin->pMergeCondition); if (TSDB_CODE_SUCCESS == code) { @@ -939,6 +941,11 @@ static int32_t createIndefRowsFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* SNodeList* pFuncs = NULL; int32_t code = rewritePrecalcExprs(pCxt, pFuncLogicNode->pFuncs, &pPrecalcExprs, &pFuncs); + if (pIdfRowsFunc->node.inputTsOrder == 0) { + // default to asc + pIdfRowsFunc->node.inputTsOrder = TSDB_ORDER_ASC; + } + SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc); // push down expression to pOutputDataBlockDesc of child node if (TSDB_CODE_SUCCESS == code && NULL != pPrecalcExprs) { @@ -1156,9 +1163,12 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pWindow->watermark = pWindowLogicNode->watermark; pWindow->deleteMark = pWindowLogicNode->deleteMark; pWindow->igExpired = pWindowLogicNode->igExpired; - pWindow->inputTsOrder = pWindowLogicNode->inputTsOrder; - pWindow->outputTsOrder = pWindowLogicNode->outputTsOrder; pWindow->mergeDataBlock = (GROUP_ACTION_KEEP == pWindowLogicNode->node.groupAction ? false : true); + pWindow->node.inputTsOrder = pWindowLogicNode->node.inputTsOrder; + pWindow->node.outputTsOrder = pWindowLogicNode->node.outputTsOrder; + if (nodeType(pWindow) == QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL) { + pWindow->node.inputTsOrder = pWindowLogicNode->node.outputTsOrder; + } SNodeList* pPrecalcExprs = NULL; SNodeList* pFuncs = NULL; @@ -1492,7 +1502,7 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren pFill->mode = pFillNode->mode; pFill->timeRange = pFillNode->timeRange; - pFill->inputTsOrder = pFillNode->inputTsOrder; + pFill->node.inputTsOrder = pFillNode->node.inputTsOrder; SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc); int32_t code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pFillExprs, &pFill->pFillExprs); diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 504db0d07b8ce4988b3cd40b4089d24dc88dc03f..246ee13fb00aa7d30857e63a03f18262ffb10510 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -534,7 +534,9 @@ static int32_t stbSplGetNumOfVgroups(SLogicNode* pNode) { static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* pNode) { int32_t code = TSDB_CODE_SUCCESS; - + pMerge->node.inputTsOrder = pNode->outputTsOrder; + pMerge->node.outputTsOrder = pNode->outputTsOrder; + switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_PROJECT: { SProjectLogicNode *pLogicNode = (SProjectLogicNode*)pNode; @@ -631,7 +633,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo ((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_MERGE; SNodeList* pMergeKeys = NULL; code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk, - ((SWindowLogicNode*)pInfo->pSplitNode)->outputTsOrder, &pMergeKeys); + ((SWindowLogicNode*)pInfo->pSplitNode)->node.outputTsOrder, &pMergeKeys); if (TSDB_CODE_SUCCESS == code) { code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, true); } @@ -721,7 +723,7 @@ static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSpl SNodeList* pMergeKeys = NULL; int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk, - ((SWindowLogicNode*)pWindow)->inputTsOrder, &pMergeKeys); + ((SWindowLogicNode*)pWindow)->node.inputTsOrder, &pMergeKeys); if (TSDB_CODE_SUCCESS == code) { code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild, true); diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index fa2ddd9163c8309c3853e5c189dd1f9124c64307..38d7c9da3bb5a01848c2055d28a982272173a289 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -141,7 +141,7 @@ void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { if (NULL == pMsgBody) { return; } - + taosMemoryFreeClear(pMsgBody->target.dbFName); taosMemoryFreeClear(pMsgBody->msgInfo.pData); if (pMsgBody->paramFreeFp) { @@ -173,7 +173,7 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra .contLen = pInfo->msgInfo.len, .info.ahandle = (void*)pInfo, .info.handle = pInfo->msgInfo.handle, - .info.persistHandle = persistHandle, + .info.persistHandle = persistHandle, .code = 0 }; TRACE_SET_ROOTID(&rpcMsg.info.traceId, pInfo->requestId); @@ -252,7 +252,7 @@ void destroyQueryExecRes(SExecResult* pRes) { taosMemoryFreeClear(pRes->res); break; } - case TDMT_SCH_QUERY: + case TDMT_SCH_QUERY: case TDMT_SCH_MERGE_QUERY: { taosArrayDestroy((SArray*)pRes->res); break; @@ -532,7 +532,7 @@ int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst) { (*pDst)->name = taosStrdup(pSrc->name); } (*pDst)->uid = pSrc->uid; - (*pDst)->ctime = pSrc->ctime; + (*pDst)->btime = pSrc->btime; (*pDst)->ttl = pSrc->ttl; (*pDst)->commentLen = pSrc->commentLen; if (pSrc->comment) { diff --git a/source/libs/qworker/CMakeLists.txt b/source/libs/qworker/CMakeLists.txt index 8ba8b79ab80430395131ad10d7c7912dc17879c2..7a984cd000caf314f59e74baad09d898800bf19d 100644 --- a/source/libs/qworker/CMakeLists.txt +++ b/source/libs/qworker/CMakeLists.txt @@ -7,15 +7,9 @@ target_include_directories( PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(qworker - PRIVATE os util transport nodes planner qcom executor index grant - ) -ELSE () - TARGET_LINK_LIBRARIES(qworker - PRIVATE os util transport nodes planner qcom executor index - ) -ENDIF() +TARGET_LINK_LIBRARIES(qworker + PRIVATE os util transport nodes planner qcom executor index + ) if(${BUILD_TEST}) ADD_SUBDIRECTORY(test) diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 231e597724f7bb03c4e1a726bb9e59e18660d532..508e957e265d295b51db83aedb22ac92575bec9c 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -366,7 +366,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - if (chkGrant && (!TEST_SHOW_REWRITE_MASK(msg.msgMask)) && (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS)) { + if (chkGrant && (!TEST_SHOW_REWRITE_MASK(msg.msgMask)) && !taosGranted()) { QW_ELOG("query failed cause of grant expired, msgMask:%d", msg.msgMask); tFreeSSubQueryMsg(&msg); QW_ERR_RET(TSDB_CODE_GRANT_EXPIRED); diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 9db049508126d2665726f79a56456b2c09e45656..6f641e677a7edacbf862b095854772567431a7ba 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -427,6 +427,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes pRes->suid = pDelRes->suid; pRes->uidList = pDelRes->uidList; + pRes->ctimeMs = taosGetTimestampMs(); pRes->skey = pDelRes->skey; pRes->ekey = pDelRes->ekey; pRes->affectedRows = pDelRes->affectedRows; @@ -824,6 +825,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { break; } QW_UNLOCK(QW_WRITE, &ctx->lock); + queryStop = false; } while (true); input.code = code; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 207753ae2571fe292ca009cb6af51030bf0667f0..78e28bce4958f91a6802ea93798886d02c34af23 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -765,7 +765,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { if (SCH_IS_DATA_BIND_TASK(pTask)) { SCH_TASK_ELOG("no execNode specifed for data src task, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps); - SCH_ERR_RET(TSDB_CODE_APP_ERROR); + SCH_ERR_RET(TSDB_CODE_MND_INVALID_SCHEMA_VER); } SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt index d1ef7fe3c1c7d3f988549be6940a0ba55e61f815..5c24db5051f9b9e8e7a400d9e58dfb9714848f70 100644 --- a/source/libs/stream/CMakeLists.txt +++ b/source/libs/stream/CMakeLists.txt @@ -9,6 +9,14 @@ target_include_directories( if(${BUILD_WITH_ROCKSDB}) IF (TD_LINUX) + target_include_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + target_link_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) target_link_libraries( stream PUBLIC rocksdb tdb diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ed9f99cf7828111c2c1cf89ade3eb6cb7bae192b..5c31b1dd602595a264693aa965fdba9b7448ae13 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -89,6 +89,9 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF } pMeta->streamBackend = streamBackendInit(streamPath); + if (pMeta->streamBackend == NULL) { + goto _err; + } pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend); taosMemoryFree(streamPath); diff --git a/source/libs/stream/test/CMakeLists.txt b/source/libs/stream/test/CMakeLists.txt index 049bfbbb3aadedbfc59e4b587e3fc9630028b0f1..629b04ae51435c83b6556a87636929ca732bbec3 100644 --- a/source/libs/stream/test/CMakeLists.txt +++ b/source/libs/stream/test/CMakeLists.txt @@ -8,20 +8,9 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) # bloomFilterTest ADD_EXECUTABLE(streamUpdateTest "tstreamUpdateTest.cpp") -#TARGET_LINK_LIBRARIES( -# streamUpdateTest -# PUBLIC os util common gtest gtest_main stream executor -#) - -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(streamUpdateTest - PUBLIC os util common gtest gtest_main stream executor index grant - ) -ELSE () - TARGET_LINK_LIBRARIES(streamUpdateTest - PUBLIC os util common gtest gtest_main stream executor index - ) -ENDIF() +TARGET_LINK_LIBRARIES(streamUpdateTest + PUBLIC os util common gtest gtest_main stream executor index + ) TARGET_INCLUDE_DIRECTORIES( streamUpdateTest diff --git a/source/libs/tdb/inc/tdb.h b/source/libs/tdb/inc/tdb.h index 0e20941b3ae472986d5116616eda05af93a83dc1..4dd47e97ac94bdba2b80bd6f5815a38e987bc9d8 100644 --- a/source/libs/tdb/inc/tdb.h +++ b/source/libs/tdb/inc/tdb.h @@ -46,12 +46,16 @@ int32_t tdbAlter(TDB *pDb, int pages); int32_t tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprFn, TDB *pEnv, TTB **ppTb, int8_t rollback); int32_t tdbTbClose(TTB *pTb); +bool tdbTbExist(const char *tbname, TDB *pEnv); +int tdbTbDropByName(const char *tbname, TDB *pEnv, TXN* pTxn); int32_t tdbTbDrop(TTB *pTb); int32_t tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn); int32_t tdbTbDelete(TTB *pTb, const void *pKey, int kLen, TXN *pTxn); int32_t tdbTbUpsert(TTB *pTb, const void *pKey, int kLen, const void *pVal, int vLen, TXN *pTxn); int32_t tdbTbGet(TTB *pTb, const void *pKey, int kLen, void **ppVal, int *vLen); int32_t tdbTbPGet(TTB *pTb, const void *pKey, int kLen, void **ppKey, int *pkLen, void **ppVal, int *vLen); +int32_t tdbTbTraversal(TTB *pTb, void *data, + int32_t (*func)(const void *pKey, int keyLen, const void *pVal, int valLen, void *data)); // TBC int32_t tdbTbcOpen(TTB *pTb, TBC **ppTbc, TXN *pTxn); diff --git a/source/libs/tdb/src/db/tdbTable.c b/source/libs/tdb/src/db/tdbTable.c index 18d14fa474f51104442cf689bd52649598b9c396..446a21f312ba4e3118744ae2f8837126c9ff3fcf 100644 --- a/source/libs/tdb/src/db/tdbTable.c +++ b/source/libs/tdb/src/db/tdbTable.c @@ -134,11 +134,67 @@ int tdbTbClose(TTB *pTb) { return 0; } +bool tdbTbExist(const char *tbname, TDB *pEnv) { + bool exist = false; + SPager *pPager; + char fFullName[TDB_FILENAME_LEN]; + +#ifdef USE_MAINDB + + snprintf(fFullName, TDB_FILENAME_LEN, "%s/%s", pEnv->dbName, TDB_MAINDB_NAME); + + if (strcmp(TDB_MAINDB_NAME, tbname)) { + pPager = tdbEnvGetPager(pEnv, fFullName); + + exist = tdbTbGet(pPager->pEnv->pMainDb, tbname, strlen(tbname) + 1, NULL, NULL) == 0; + } else { + exist = taosCheckExistFile(fFullName); + } + +#else + + snprintf(fFullName, TDB_FILENAME_LEN, "%s/%s", pEnv->dbName, tbname); + + exist = taosCheckExistFile(fFullName); + +#endif + + return exist; +} + int tdbTbDrop(TTB *pTb) { // TODO return 0; } +int tdbTbDropByName(const char *tbname, TDB *pEnv, TXN *pTxn) { + int ret; + SPager *pPager; + char fFullName[TDB_FILENAME_LEN]; + +#ifdef USE_MAINDB + + snprintf(fFullName, TDB_FILENAME_LEN, "%s/%s", pEnv->dbName, TDB_MAINDB_NAME); + + if (strcmp(TDB_MAINDB_NAME, tbname)) { + pPager = tdbEnvGetPager(pEnv, fFullName); + + ret = tdbTbDelete(pPager->pEnv->pMainDb, tbname, strlen(tbname) + 1, pTxn); + } else { + ret = taosRemoveFile(fFullName); + } + +#else + + snprintf(fFullName, TDB_FILENAME_LEN, "%s/%s", pEnv->dbName, tbname); + + ret = taosRemoveFile(fFullName); + +#endif + + return ret; +} + int tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn) { return tdbBtreeInsert(pTb->pBt, pKey, keyLen, pVal, valLen, pTxn); } @@ -173,6 +229,38 @@ int tdbTbcOpen(TTB *pTb, TBC **ppTbc, TXN *pTxn) { return 0; } +int32_t tdbTbTraversal(TTB *pTb, void *data, + int32_t (*func)(const void *pKey, int keyLen, const void *pVal, int valLen, void *data)) { + TBC *pCur; + int ret = tdbTbcOpen(pTb, &pCur, NULL); + if (ret < 0) { + return ret; + } + + tdbTbcMoveToFirst(pCur); + + void *pKey = NULL; + int kLen = 0; + void *pValue = NULL; + int vLen = 0; + + while (1) { + ret = tdbTbcNext(pCur, &pKey, &kLen, &pValue, &vLen); + if (ret < 0) { + ret = 0; + break; + } + + ret = func(pKey, kLen, pValue, vLen, data); + if (ret < 0) break; + } + tdbFree(pKey); + tdbFree(pValue); + tdbTbcClose(pCur); + + return 0; +} + int tdbTbcMoveTo(TBC *pTbc, const void *pKey, int kLen, int *c) { return tdbBtcMoveTo(&pTbc->btc, pKey, kLen, c); } int tdbTbcMoveToFirst(TBC *pTbc) { return tdbBtcMoveToFirst(&pTbc->btc); } diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index db066a82b6647462eb3b17e7723411477f7fa1e1..46e28b529db1769d4e47c2694d6a9f19037997bf 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -64,7 +64,7 @@ else() endif() IF (JEMALLOC_ENABLED) - target_link_libraries(os PUBLIC -ljemalloc) + target_link_libraries(os PUBLIC -L${CMAKE_BINARY_DIR}/build/lib -ljemalloc) ENDIF () if(${BUILD_TEST}) diff --git a/source/util/src/terror.c b/source/util/src/terror.c index a66af6e73229904eb0c5430ddc40a43e618bd047..d7571b928388944a01118ae027a4ae8418e4edce 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -261,8 +261,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STB_ALTER_OPTION, "Invalid stable alter TAOS_DEFINE_ERROR(TSDB_CODE_MND_STB_OPTION_UNCHNAGED, "STable option unchanged") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC,"Field used by topic") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single stable mode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SCHEMA_VER, "Invalid schema version while alter stb") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid while alter stb") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SCHEMA_VER, "Invalid schema version") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA, "Field used by tsma") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_CREATING, "Dnode in creating status") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_IN_DROPPING, "Dnode in dropping status") diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 801e6aa1bdad471313c9f26d7332561f2fb60a9a..9bb3f75b3b3d6e7685613b37a8e37bf690417689 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -5,423 +5,6 @@ #unit-test ,,y,unit-test,bash test.sh -#tsim test -,,y,script,./test.sh -f tsim/user/basic.sim -,,y,script,./test.sh -f tsim/user/password.sim -,,y,script,./test.sh -f tsim/user/privilege_db.sim -,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim -,,y,script,./test.sh -f tsim/user/privilege_topic.sim -,,y,script,./test.sh -f tsim/db/alter_option.sim -,,y,script,./test.sh -f tsim/db/alter_replica_13.sim -,,y,script,./test.sh -f tsim/db/alter_replica_31.sim -,,y,script,./test.sh -f tsim/db/basic1.sim -,,y,script,./test.sh -f tsim/db/basic2.sim -,,y,script,./test.sh -f tsim/db/basic3.sim -,,y,script,./test.sh -f tsim/db/basic4.sim -,,y,script,./test.sh -f tsim/db/basic5.sim -,,y,script,./test.sh -f tsim/db/basic6.sim -,,y,script,./test.sh -f tsim/db/commit.sim -,,y,script,./test.sh -f tsim/db/create_all_options.sim -,,y,script,./test.sh -f tsim/db/delete_reuse1.sim -,,y,script,./test.sh -f tsim/db/delete_reuse2.sim -,,y,script,./test.sh -f tsim/db/delete_reusevnode.sim -,,y,script,./test.sh -f tsim/db/delete_reusevnode2.sim -,,y,script,./test.sh -f tsim/db/delete_writing1.sim -,,y,script,./test.sh -f tsim/db/delete_writing2.sim -,,y,script,./test.sh -f tsim/db/error1.sim -,,y,script,./test.sh -f tsim/db/keep.sim -,,y,script,./test.sh -f tsim/db/len.sim -,,y,script,./test.sh -f tsim/db/repeat.sim -,,y,script,./test.sh -f tsim/db/show_create_db.sim -,,y,script,./test.sh -f tsim/db/show_create_table.sim -,,y,script,./test.sh -f tsim/db/tables.sim -,,y,script,./test.sh -f tsim/db/taosdlog.sim -,,y,script,./test.sh -f tsim/dnode/balance_replica1.sim -,,y,script,./test.sh -f tsim/dnode/balance_replica3.sim -,,y,script,./test.sh -f tsim/dnode/balance1.sim -,,y,script,./test.sh -f tsim/dnode/balance2.sim -,,y,script,./test.sh -f tsim/dnode/balance3.sim -,,y,script,./test.sh -f tsim/dnode/balancex.sim -,,y,script,./test.sh -f tsim/dnode/create_dnode.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim -,,y,script,./test.sh -f tsim/dnode/drop_dnode_force.sim -,,y,script,./test.sh -f tsim/dnode/offline_reason.sim -,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim -,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim -,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim -,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim -,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim -,,y,script,./test.sh -f tsim/dnode/vnode_clean.sim -,,y,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim -,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim -,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim -,,y,script,./test.sh -f tsim/import/basic.sim -,,y,script,./test.sh -f tsim/import/commit.sim -,,y,script,./test.sh -f tsim/import/large.sim -,,y,script,./test.sh -f tsim/import/replica1.sim -,,y,script,./test.sh -f tsim/insert/backquote.sim -,,y,script,./test.sh -f tsim/insert/basic.sim -,,y,script,./test.sh -f tsim/insert/basic0.sim -,,y,script,./test.sh -f tsim/insert/basic1.sim -,,y,script,./test.sh -f tsim/insert/basic2.sim -,,y,script,./test.sh -f tsim/insert/commit-merge0.sim -,,y,script,./test.sh -f tsim/insert/insert_drop.sim -,,y,script,./test.sh -f tsim/insert/insert_select.sim -,,y,script,./test.sh -f tsim/insert/null.sim -,,y,script,./test.sh -f tsim/insert/query_block1_file.sim -,,y,script,./test.sh -f tsim/insert/query_block1_memory.sim -,,y,script,./test.sh -f tsim/insert/query_block2_file.sim -,,y,script,./test.sh -f tsim/insert/query_block2_memory.sim -,,y,script,./test.sh -f tsim/insert/query_file_memory.sim -,,y,script,./test.sh -f tsim/insert/query_multi_file.sim -,,y,script,./test.sh -f tsim/insert/tcp.sim -,,y,script,./test.sh -f tsim/insert/update0.sim -,,y,script,./test.sh -f tsim/insert/update1_sort_merge.sim -,,y,script,./test.sh -f tsim/insert/update2.sim -,,y,script,./test.sh -f tsim/parser/alter__for_community_version.sim -,,y,script,./test.sh -f tsim/parser/alter_column.sim -,,y,script,./test.sh -f tsim/parser/alter_stable.sim -,,y,script,./test.sh -f tsim/parser/alter.sim -,,y,script,./test.sh -f tsim/parser/alter1.sim -,,y,script,./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim -,,y,script,./test.sh -f tsim/parser/auto_create_tb.sim -,,y,script,./test.sh -f tsim/parser/between_and.sim -,,y,script,./test.sh -f tsim/parser/binary_escapeCharacter.sim -,,y,script,./test.sh -f tsim/parser/col_arithmetic_operation.sim -,,y,script,./test.sh -f tsim/parser/columnValue_bigint.sim -,,y,script,./test.sh -f tsim/parser/columnValue_bool.sim -,,y,script,./test.sh -f tsim/parser/columnValue_double.sim -,,y,script,./test.sh -f tsim/parser/columnValue_float.sim -,,y,script,./test.sh -f tsim/parser/columnValue_int.sim -,,y,script,./test.sh -f tsim/parser/columnValue_smallint.sim -,,y,script,./test.sh -f tsim/parser/columnValue_tinyint.sim -,,y,script,./test.sh -f tsim/parser/columnValue_unsign.sim -,,y,script,./test.sh -f tsim/parser/commit.sim -,,y,script,./test.sh -f tsim/parser/condition.sim -,,y,script,./test.sh -f tsim/parser/constCol.sim -,,y,script,./test.sh -f tsim/parser/create_db.sim -,,y,script,./test.sh -f tsim/parser/create_mt.sim -,,y,script,./test.sh -f tsim/parser/create_tb_with_tag_name.sim -,,y,script,./test.sh -f tsim/parser/create_tb.sim -,,y,script,./test.sh -f tsim/parser/dbtbnameValidate.sim -,,y,script,./test.sh -f tsim/parser/distinct.sim -,,y,script,./test.sh -f tsim/parser/fill_us.sim -,,y,script,./test.sh -f tsim/parser/fill.sim -,,y,script,./test.sh -f tsim/parser/first_last.sim -,,y,script,./test.sh -f tsim/parser/fill_stb.sim -,,y,script,./test.sh -f tsim/parser/interp.sim -,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim -,,y,script,./test.sh -f tsim/parser/function.sim -,,y,script,./test.sh -f tsim/parser/groupby-basic.sim -,,y,script,./test.sh -f tsim/parser/groupby.sim -,,y,script,./test.sh -f tsim/parser/having_child.sim -,,y,script,./test.sh -f tsim/parser/having.sim -,,y,script,./test.sh -f tsim/parser/import_commit1.sim -,,y,script,./test.sh -f tsim/parser/import_commit2.sim -,,y,script,./test.sh -f tsim/parser/import_commit3.sim -,,y,script,./test.sh -f tsim/parser/import_file.sim -,,y,script,./test.sh -f tsim/parser/import.sim -,,y,script,./test.sh -f tsim/parser/insert_multiTbl.sim -,,y,script,./test.sh -f tsim/parser/insert_tb.sim -,,y,script,./test.sh -f tsim/parser/join_manyblocks.sim -,,y,script,./test.sh -f tsim/parser/join_multitables.sim -,,y,script,./test.sh -f tsim/parser/join_multivnode.sim -,,y,script,./test.sh -f tsim/parser/join.sim -,,y,script,./test.sh -f tsim/parser/last_cache.sim -,,y,script,./test.sh -f tsim/parser/last_groupby.sim -,,y,script,./test.sh -f tsim/parser/lastrow.sim -,,y,script,./test.sh -f tsim/parser/lastrow2.sim -,,y,script,./test.sh -f tsim/parser/like.sim -,,y,script,./test.sh -f tsim/parser/limit.sim -,,y,script,./test.sh -f tsim/parser/limit1.sim -,,y,script,./test.sh -f tsim/parser/mixed_blocks.sim -,,y,script,./test.sh -f tsim/parser/nchar.sim -,,y,script,./test.sh -f tsim/parser/nestquery.sim -,,y,script,./test.sh -f tsim/parser/null_char.sim -,,y,script,./test.sh -f tsim/parser/precision_ns.sim -,,y,script,./test.sh -f tsim/parser/projection_limit_offset.sim -,,y,script,./test.sh -f tsim/parser/regex.sim -,,y,script,./test.sh -f tsim/parser/regressiontest.sim -,,y,script,./test.sh -f tsim/parser/select_across_vnodes.sim -,,y,script,./test.sh -f tsim/parser/select_distinct_tag.sim -,,y,script,./test.sh -f tsim/parser/select_from_cache_disk.sim -,,y,script,./test.sh -f tsim/parser/select_with_tags.sim -,,y,script,./test.sh -f tsim/parser/selectResNum.sim -,,y,script,./test.sh -f tsim/parser/set_tag_vals.sim -,,y,script,./test.sh -f tsim/parser/single_row_in_tb.sim -,,y,script,./test.sh -f tsim/parser/sliding.sim -,,y,script,./test.sh -f tsim/parser/slimit_alter_tags.sim -,,y,script,./test.sh -f tsim/parser/slimit.sim -,,y,script,./test.sh -f tsim/parser/slimit1.sim -,,y,script,./test.sh -f tsim/parser/stableOp.sim -,,y,script,./test.sh -f tsim/parser/tags_dynamically_specifiy.sim -,,y,script,./test.sh -f tsim/parser/tags_filter.sim -,,y,script,./test.sh -f tsim/parser/tbnameIn.sim -,,y,script,./test.sh -f tsim/parser/timestamp.sim -,,y,script,./test.sh -f tsim/parser/top_groupby.sim -,,y,script,./test.sh -f tsim/parser/topbot.sim -,,y,script,./test.sh -f tsim/parser/union.sim -,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim -,,y,script,./test.sh -f tsim/parser/where.sim -,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim -,,y,script,./test.sh -f tsim/query/charScalarFunction.sim -,,y,script,./test.sh -f tsim/query/explain.sim -,,y,script,./test.sh -f tsim/query/interval-offset.sim -,,y,script,./test.sh -f tsim/query/interval.sim -,,y,script,./test.sh -f tsim/query/scalarFunction.sim -,,y,script,./test.sh -f tsim/query/scalarNull.sim -,,y,script,./test.sh -f tsim/query/session.sim -,,y,script,./test.sh -f tsim/query/udf.sim -,,y,script,./test.sh -f tsim/query/udf_with_const.sim -,,y,script,./test.sh -f tsim/query/sys_tbname.sim -,,y,script,./test.sh -f tsim/query/groupby.sim -,,y,script,./test.sh -f tsim/query/event.sim -,,y,script,./test.sh -f tsim/query/forceFill.sim -,,y,script,./test.sh -f tsim/query/emptyTsRange.sim -,,y,script,./test.sh -f tsim/query/partitionby.sim -,,y,script,./test.sh -f tsim/qnode/basic1.sim -,,y,script,./test.sh -f tsim/snode/basic1.sim -,,y,script,./test.sh -f tsim/mnode/basic1.sim -,,y,script,./test.sh -f tsim/mnode/basic2.sim -,,y,script,./test.sh -f tsim/mnode/basic3.sim -,,y,script,./test.sh -f tsim/mnode/basic4.sim -,,y,script,./test.sh -f tsim/mnode/basic5.sim -,,y,script,./test.sh -f tsim/show/basic.sim -,,y,script,./test.sh -f tsim/table/autocreate.sim -,,y,script,./test.sh -f tsim/table/basic1.sim -,,y,script,./test.sh -f tsim/table/basic2.sim -,,y,script,./test.sh -f tsim/table/basic3.sim -,,y,script,./test.sh -f tsim/table/bigint.sim -,,y,script,./test.sh -f tsim/table/binary.sim -,,y,script,./test.sh -f tsim/table/bool.sim -,,y,script,./test.sh -f tsim/table/column_name.sim -,,y,script,./test.sh -f tsim/table/column_num.sim -,,y,script,./test.sh -f tsim/table/column_value.sim -,,y,script,./test.sh -f tsim/table/column2.sim -,,y,script,./test.sh -f tsim/table/createmulti.sim -,,y,script,./test.sh -f tsim/table/date.sim -,,y,script,./test.sh -f tsim/table/db.table.sim -,,y,script,./test.sh -f tsim/table/delete_reuse1.sim -,,y,script,./test.sh -f tsim/table/delete_reuse2.sim -,,y,script,./test.sh -f tsim/table/delete_writing.sim -,,y,script,./test.sh -f tsim/table/describe.sim -,,y,script,./test.sh -f tsim/table/double.sim -,,y,script,./test.sh -f tsim/table/float.sim -,,y,script,./test.sh -f tsim/table/hash.sim -,,y,script,./test.sh -f tsim/table/int.sim -,,y,script,./test.sh -f tsim/table/limit.sim -,,y,script,./test.sh -f tsim/table/smallint.sim -,,y,script,./test.sh -f tsim/table/table_len.sim -,,y,script,./test.sh -f tsim/table/table.sim -,,y,script,./test.sh -f tsim/table/tinyint.sim -,,y,script,./test.sh -f tsim/table/vgroup.sim -,,n,script,./test.sh -f tsim/stream/basic0.sim -g -,,y,script,./test.sh -f tsim/stream/basic1.sim -,,y,script,./test.sh -f tsim/stream/basic2.sim -,,y,script,./test.sh -f tsim/stream/basic3.sim -,,y,script,./test.sh -f tsim/stream/basic4.sim -,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim -,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim -,,y,script,./test.sh -f tsim/stream/deleteInterval.sim -,,y,script,./test.sh -f tsim/stream/deleteSession.sim -,,y,script,./test.sh -f tsim/stream/deleteState.sim -,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim -,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim -,,y,script,./test.sh -f tsim/stream/distributeSession0.sim -,,y,script,./test.sh -f tsim/stream/drop_stream.sim -,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim -,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim -,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalDelete0.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalDelete1.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalLinear.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalPartitionBy.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext1.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalRange.sim -,,y,script,./test.sh -f tsim/stream/fillIntervalValue.sim -,,y,script,./test.sh -f tsim/stream/ignoreCheckUpdate.sim -,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim -,,y,script,./test.sh -f tsim/stream/partitionby1.sim -,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim -,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim -,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim -,,y,script,./test.sh -f tsim/stream/partitionby.sim -,,y,script,./test.sh -f tsim/stream/pauseAndResume.sim -,,y,script,./test.sh -f tsim/stream/schedSnode.sim -,,y,script,./test.sh -f tsim/stream/session0.sim -,,y,script,./test.sh -f tsim/stream/session1.sim -,,y,script,./test.sh -f tsim/stream/sliding.sim -,,y,script,./test.sh -f tsim/stream/state0.sim -,,y,script,./test.sh -f tsim/stream/state1.sim -,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim -,,y,script,./test.sh -f tsim/stream/triggerSession0.sim -,,y,script,./test.sh -f tsim/stream/udTableAndTag0.sim -,,y,script,./test.sh -f tsim/stream/udTableAndTag1.sim -,,y,script,./test.sh -f tsim/stream/udTableAndTag2.sim -,,y,script,./test.sh -f tsim/stream/windowClose.sim -,,y,script,./test.sh -f tsim/trans/lossdata1.sim -,,y,script,./test.sh -f tsim/trans/create_db.sim -,,y,script,./test.sh -f tsim/tmq/basic1.sim -,,y,script,./test.sh -f tsim/tmq/basic2.sim -,,y,script,./test.sh -f tsim/tmq/basic3.sim -,,y,script,./test.sh -f tsim/tmq/basic4.sim -,,y,script,./test.sh -f tsim/tmq/basic1Of2Cons.sim -,,y,script,./test.sh -f tsim/tmq/basic2Of2Cons.sim -,,y,script,./test.sh -f tsim/tmq/basic3Of2Cons.sim -,,y,script,./test.sh -f tsim/tmq/basic4Of2Cons.sim -,,y,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim -,,y,script,./test.sh -f tsim/tmq/topic.sim -,,y,script,./test.sh -f tsim/tmq/snapshot.sim -,,y,script,./test.sh -f tsim/tmq/snapshot1.sim -,,y,script,./test.sh -f tsim/stable/alter_comment.sim -,,y,script,./test.sh -f tsim/stable/alter_count.sim -,,y,script,./test.sh -f tsim/stable/alter_import.sim -,,y,script,./test.sh -f tsim/stable/alter_insert1.sim -,,y,script,./test.sh -f tsim/stable/alter_insert2.sim -,,y,script,./test.sh -f tsim/stable/alter_metrics.sim -,,y,script,./test.sh -f tsim/stable/column_add.sim -,,y,script,./test.sh -f tsim/stable/column_drop.sim -,,y,script,./test.sh -f tsim/stable/column_modify.sim -,,y,script,./test.sh -f tsim/stable/disk.sim -,,y,script,./test.sh -f tsim/stable/dnode3.sim -,,y,script,./test.sh -f tsim/stable/metrics.sim -,,y,script,./test.sh -f tsim/stable/refcount.sim -,,y,script,./test.sh -f tsim/stable/tag_add.sim -,,y,script,./test.sh -f tsim/stable/tag_drop.sim -,,y,script,./test.sh -f tsim/stable/tag_filter.sim -,,y,script,./test.sh -f tsim/stable/tag_modify.sim -,,y,script,./test.sh -f tsim/stable/tag_rename.sim -,,y,script,./test.sh -f tsim/stable/values.sim -,,y,script,./test.sh -f tsim/stable/vnode3.sim -,,y,script,./test.sh -f tsim/stable/metrics_idx.sim -,,n,script,./test.sh -f tsim/sma/drop_sma.sim -,,y,script,./test.sh -f tsim/sma/sma_leak.sim -,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim -,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim -,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim -,,n,script,./test.sh -f tsim/valgrind/checkError1.sim -,,n,script,./test.sh -f tsim/valgrind/checkError2.sim -,,n,script,./test.sh -f tsim/valgrind/checkError3.sim -,,n,script,./test.sh -f tsim/valgrind/checkError4.sim -,,n,script,./test.sh -f tsim/valgrind/checkError5.sim -,,n,script,./test.sh -f tsim/valgrind/checkError6.sim -,,n,script,./test.sh -f tsim/valgrind/checkError7.sim -,,n,script,./test.sh -f tsim/valgrind/checkError8.sim -,,n,script,./test.sh -f tsim/valgrind/checkUdf.sim -,,y,script,./test.sh -f tsim/vnode/replica3_basic.sim -,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim -,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim -,,y,script,./test.sh -f tsim/vnode/replica3_many.sim -,,y,script,./test.sh -f tsim/vnode/replica3_import.sim -,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim -,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim -,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim -,,y,script,./test.sh -f tsim/vnode/stable_dnode3.sim -,,y,script,./test.sh -f tsim/vnode/stable_replica3_dnode6.sim -,,y,script,./test.sh -f tsim/vnode/stable_replica3_vnode3.sim -,,y,script,./test.sh -f tsim/sync/3Replica1VgElect.sim -,,y,script,./test.sh -f tsim/sync/3Replica5VgElect.sim -,,y,script,./test.sh -f tsim/sync/oneReplica1VgElect.sim -,,y,script,./test.sh -f tsim/sync/oneReplica5VgElect.sim -,,y,script,./test.sh -f tsim/catalog/alterInCurrent.sim -,,y,script,./test.sh -f tsim/scalar/in.sim -,,y,script,./test.sh -f tsim/scalar/scalar.sim -,,y,script,./test.sh -f tsim/scalar/filter.sim -,,y,script,./test.sh -f tsim/scalar/caseWhen.sim -,,y,script,./test.sh -f tsim/scalar/tsConvert.sim -,,y,script,./test.sh -f tsim/alter/cached_schema_after_alter.sim -,,y,script,./test.sh -f tsim/alter/dnode.sim -,,y,script,./test.sh -f tsim/alter/table.sim -,,y,script,./test.sh -f tsim/cache/new_metrics.sim -,,y,script,./test.sh -f tsim/cache/restart_table.sim -,,y,script,./test.sh -f tsim/cache/restart_metrics.sim -,,y,script,./test.sh -f tsim/column/commit.sim -,,y,script,./test.sh -f tsim/column/metrics.sim -,,y,script,./test.sh -f tsim/column/table.sim -,,y,script,./test.sh -f tsim/compress/commitlog.sim -,,y,script,./test.sh -f tsim/compress/compress2.sim -,,y,script,./test.sh -f tsim/compress/compress.sim -,,y,script,./test.sh -f tsim/compress/uncompress.sim -,,y,script,./test.sh -f tsim/compute/avg.sim -,,y,script,./test.sh -f tsim/compute/block_dist.sim -,,y,script,./test.sh -f tsim/compute/bottom.sim -,,y,script,./test.sh -f tsim/compute/count.sim -,,y,script,./test.sh -f tsim/compute/diff.sim -,,y,script,./test.sh -f tsim/compute/diff2.sim -,,y,script,./test.sh -f tsim/compute/first.sim -,,y,script,./test.sh -f tsim/compute/interval.sim -,,y,script,./test.sh -f tsim/compute/last_row.sim -,,y,script,./test.sh -f tsim/compute/last.sim -,,y,script,./test.sh -f tsim/compute/leastsquare.sim -,,y,script,./test.sh -f tsim/compute/max.sim -,,y,script,./test.sh -f tsim/compute/min.sim -,,y,script,./test.sh -f tsim/compute/null.sim -,,y,script,./test.sh -f tsim/compute/percentile.sim -,,y,script,./test.sh -f tsim/compute/stddev.sim -,,y,script,./test.sh -f tsim/compute/sum.sim -,,y,script,./test.sh -f tsim/compute/top.sim -,,y,script,./test.sh -f tsim/field/2.sim -,,y,script,./test.sh -f tsim/field/3.sim -,,y,script,./test.sh -f tsim/field/4.sim -,,y,script,./test.sh -f tsim/field/5.sim -,,y,script,./test.sh -f tsim/field/6.sim -,,y,script,./test.sh -f tsim/field/binary.sim -,,y,script,./test.sh -f tsim/field/bigint.sim -,,y,script,./test.sh -f tsim/field/bool.sim -,,y,script,./test.sh -f tsim/field/double.sim -,,y,script,./test.sh -f tsim/field/float.sim -,,y,script,./test.sh -f tsim/field/int.sim -,,y,script,./test.sh -f tsim/field/single.sim -,,y,script,./test.sh -f tsim/field/smallint.sim -,,y,script,./test.sh -f tsim/field/tinyint.sim -,,y,script,./test.sh -f tsim/field/unsigined_bigint.sim -,,y,script,./test.sh -f tsim/vector/metrics_field.sim -,,y,script,./test.sh -f tsim/vector/metrics_mix.sim -,,y,script,./test.sh -f tsim/vector/metrics_query.sim -,,y,script,./test.sh -f tsim/vector/metrics_tag.sim -,,y,script,./test.sh -f tsim/vector/metrics_time.sim -,,y,script,./test.sh -f tsim/vector/multi.sim -,,y,script,./test.sh -f tsim/vector/single.sim -,,y,script,./test.sh -f tsim/vector/table_field.sim -,,y,script,./test.sh -f tsim/vector/table_mix.sim -,,y,script,./test.sh -f tsim/vector/table_query.sim -,,y,script,./test.sh -f tsim/vector/table_time.sim -,,y,script,./test.sh -f tsim/wal/kill.sim -,,y,script,./test.sh -f tsim/tag/3.sim -,,y,script,./test.sh -f tsim/tag/4.sim -,,y,script,./test.sh -f tsim/tag/5.sim -,,y,script,./test.sh -f tsim/tag/6.sim -,,y,script,./test.sh -f tsim/tag/add.sim -,,y,script,./test.sh -f tsim/tag/bigint.sim -,,y,script,./test.sh -f tsim/tag/binary_binary.sim -,,y,script,./test.sh -f tsim/tag/binary.sim -,,y,script,./test.sh -f tsim/tag/bool_binary.sim -,,y,script,./test.sh -f tsim/tag/bool_int.sim -,,y,script,./test.sh -f tsim/tag/bool.sim -,,y,script,./test.sh -f tsim/tag/change.sim -,,y,script,./test.sh -f tsim/tag/column.sim -,,y,script,./test.sh -f tsim/tag/commit.sim -,,y,script,./test.sh -f tsim/tag/create.sim -,,y,script,./test.sh -f tsim/tag/delete.sim -,,y,script,./test.sh -f tsim/tag/double.sim -,,y,script,./test.sh -f tsim/tag/filter.sim -,,y,script,./test.sh -f tsim/tag/float.sim -,,y,script,./test.sh -f tsim/tag/int_binary.sim -,,y,script,./test.sh -f tsim/tag/int_float.sim -,,y,script,./test.sh -f tsim/tag/int.sim -,,y,script,./test.sh -f tsim/tag/set.sim -,,y,script,./test.sh -f tsim/tag/smallint.sim -,,y,script,./test.sh -f tsim/tag/tinyint.sim -,,y,script,./test.sh -f tsim/tag/drop_tag.sim -,,y,script,./test.sh -f tsim/tag/tbNameIn.sim -,,y,script,./test.sh -f tmp/monitor.sim - #system test ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_str.py @@ -449,6 +32,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb2.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py @@ -568,6 +152,7 @@ ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py +,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py @@ -822,7 +407,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tagFilter.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/projectionDesc.py -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3398.py -N 3 -n 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3405_3398_3423.py -N 3 -n 3 ,,n,system-test,python3 ./test.py -f 2-query/queryQnode.py ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode1mnode.py @@ -932,7 +517,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 2 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 2 @@ -1027,7 +612,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 @@ -1123,7 +708,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 4 -,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4 +#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 4 #,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4 #,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 4 @@ -1595,6 +1180,7 @@ ,,y,script,./test.sh -f tsim/tag/drop_tag.sim ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim + #develop test ,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py ,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py diff --git a/tests/script/tsim/parser/projection_limit_offset.sim b/tests/script/tsim/parser/projection_limit_offset.sim index 2d99b0a296621ca0b29f908dba8714afde47076c..cab46a93d3cb5600d4f6c9a0ec196874ded8e93c 100644 --- a/tests/script/tsim/parser/projection_limit_offset.sim +++ b/tests/script/tsim/parser/projection_limit_offset.sim @@ -380,10 +380,10 @@ if $row != 8 then endi sql select diff(k) from tm0 -if $row != 3 then +if $row != 4 then return -1 endi -if $data20 != -1 then +if $data20 != NULL then return -1 endi diff --git a/tests/script/tsim/query/delete_and_query.sim b/tests/script/tsim/query/delete_and_query.sim new file mode 100644 index 0000000000000000000000000000000000000000..3004ababa1c79d1cbc90925da0af8a0f3b1fb427 --- /dev/null +++ b/tests/script/tsim/query/delete_and_query.sim @@ -0,0 +1,25 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database if not exists test +sql use test +sql create table t1 (ts timestamp, c2 int) +sql insert into t1 values(now, 1) + +sql delete from t1 where ts is null +sql delete from t1 where ts < now +sql select ts from t1 order by ts asc + +print ----------rows: $rows +if $rows != 0 then + return -1 +endi + +sql select ts from t1 order by ts desc +print ----------rows: $rows +if $rows != 0 then + return -1 +endi + diff --git a/tests/script/tsim/query/explain_tsorder.sim b/tests/script/tsim/query/explain_tsorder.sim new file mode 100644 index 0000000000000000000000000000000000000000..202f85bcf0745e883b6ea1e58094ea3dbed64a3c --- /dev/null +++ b/tests/script/tsim/query/explain_tsorder.sim @@ -0,0 +1,37 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test +sql use test +sql CREATE STABLE `meters` (`ts` TIMESTAMP, `c2` INT) TAGS (`cc` VARCHAR(3)) + + +sql insert into d1 using meters tags("MY") values("2022-05-15 00:01:08.000 ",234) +sql insert into d1 using meters tags("MY") values("2022-05-16 00:01:08.000 ",136) +sql insert into d1 using meters tags("MY") values("2022-05-17 00:01:08.000 ", 59) +sql insert into d1 using meters tags("MY") values("2022-05-18 00:01:08.000 ", 58) +sql insert into d1 using meters tags("MY") values("2022-05-19 00:01:08.000 ",243) +sql insert into d1 using meters tags("MY") values("2022-05-20 00:01:08.000 ",120) +sql insert into d1 using meters tags("MY") values("2022-05-21 00:01:08.000 ", 11) +sql insert into d1 using meters tags("MY") values("2022-05-22 00:01:08.000 ",196) +sql insert into d1 using meters tags("MY") values("2022-05-23 00:01:08.000 ",116) +sql insert into d1 using meters tags("MY") values("2022-05-24 00:01:08.000 ",210) + +sql insert into d2 using meters tags("HT") values("2022-05-15 00:01:08.000", 234) +sql insert into d2 using meters tags("HT") values("2022-05-16 00:01:08.000", 136) +sql insert into d2 using meters tags("HT") values("2022-05-17 00:01:08.000", 59) +sql insert into d2 using meters tags("HT") values("2022-05-18 00:01:08.000", 58) +sql insert into d2 using meters tags("HT") values("2022-05-19 00:01:08.000", 243) +sql insert into d2 using meters tags("HT") values("2022-05-20 00:01:08.000", 120) +sql insert into d2 using meters tags("HT") values("2022-05-21 00:01:08.000", 11) +sql insert into d2 using meters tags("HT") values("2022-05-22 00:01:08.000", 196) +sql insert into d2 using meters tags("HT") values("2022-05-23 00:01:08.000", 116) +sql insert into d2 using meters tags("HT") values("2022-05-24 00:01:08.000", 210) + +#sleep 10000000 +system taos -P7100 -s 'source tsim/query/t/explain_tsorder.sql' | grep -v 'Query OK' | grep -v 'Client Version' > /tmp/explain_tsorder.result +system echo ----------------------diff start----------------------- +system git diff --exit-code --color tsim/query/r/explain_tsorder.result /tmp/explain_tsorder.result +system echo ----------------------diff succeed----------------------- diff --git a/tests/script/tsim/query/r/explain_tsorder.result b/tests/script/tsim/query/r/explain_tsorder.result new file mode 100644 index 0000000000000000000000000000000000000000..6c63a343de1b945ce681aaceadedfabb612f2136 --- /dev/null +++ b/tests/script/tsim/query/r/explain_tsorder.result @@ -0,0 +1,2560 @@ +Copyright (c) 2022 by TDengine, all rights reserved. + +taos> source tsim/query/t/explain_tsorder.sql +taos> use test; +Database changed. + +taos> explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 7.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 8.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 9.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 10.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 11.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 12.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 13.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 14.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 15.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 16.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 17.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 7.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 8.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 9.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 10.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 11.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 12.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 13.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 14.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 15.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 16.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 17.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 7.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 8.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 9.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 10.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 11.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=asc output_order=asc ) +*************************** 12.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 13.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 14.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 15.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 16.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 17.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=asc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 7.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 8.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 9.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 10.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 11.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=asc output_order=desc ) +*************************** 12.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 13.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 14.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 15.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 16.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 17.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=asc output_order=desc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=desc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=desc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=desc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=desc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 14.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 16.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 17.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 20.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 23.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 24.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 25.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 26.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 27.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 32.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 33.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=8 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=16 groups=1 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=16 groups=1 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=16 groups=1 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Aggragate (functions=1 width=16 groups=1 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 5.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 6.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 8.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 10.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 14.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 15.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 16.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 18.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 19.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 20.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 22.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 24.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 29.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 31.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 33.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 34.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 35.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=null width=24 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [10001, 19999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column a (functions=2 width=16 input_order=asc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=unknown ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 16.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 18.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 19.row *************************** +QUERY_PLAN: Filter: ((a > 10000) AND (a < 20000)) +*************************** 20.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 21.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 22.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 23.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 24.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 25.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 30.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 31.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 33.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 34.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 37.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 38.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 39.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 40.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 41.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 42.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=null width=24 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [10001, 19999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column a (functions=2 width=16 input_order=desc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=desc ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 16.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 17.row *************************** +QUERY_PLAN: Filter: ((a > 10000) AND (a < 20000)) +*************************** 18.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 19.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 21.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 22.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 23.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 24.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 25.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 28.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 29.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 31.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 32.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 33.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 34.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 37.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 38.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 39.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 40.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=asc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=null width=24 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [10001, 19999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column b (functions=2 width=16 input_order=desc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=2 width=16 input_order=desc ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=desc (columns=2 width=16) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 16.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=2 width=16 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 18.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 19.row *************************** +QUERY_PLAN: Filter: ((b > 10000) AND (b < 20000)) +*************************** 20.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 21.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 22.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 23.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 24.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 25.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 30.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 31.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 33.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 34.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 37.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 38.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 39.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 40.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 41.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 42.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=desc (columns=1 width=8) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=1 width=8 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=null width=24 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [10001, 19999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column b (functions=2 width=16 input_order=desc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=2 width=16 input_order=desc ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=desc (columns=2 width=16) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 16.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=2 width=16 input_order=asc output_order=asc) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=2 width=16 +*************************** 18.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 19.row *************************** +QUERY_PLAN: Filter: ((b > 10000) AND (b < 20000)) +*************************** 20.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 21.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 22.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 23.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 24.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 25.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 28.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 29.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 30.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 31.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 33.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 34.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 37.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 38.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 39.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 40.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 41.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 42.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc; + _wstart | last(ts) | avg(c2) | +================================================================================ + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + +taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc; + _wstart | last(ts) | avg(c2) | +================================================================================ + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + +taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc; + _wstart | first(ts) | avg(c2) | +================================================================================ + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + +taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc; + _wstart | first(ts) | avg(c2) | +================================================================================ + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d + d | +========================== + 2022-05-15 00:01:00.000 | + 2022-05-16 00:01:00.000 | + 2022-05-17 00:01:00.000 | + 2022-05-18 00:01:00.000 | + 2022-05-19 00:01:00.000 | + 2022-05-20 00:01:00.000 | + 2022-05-21 00:01:00.000 | + 2022-05-22 00:01:00.000 | + 2022-05-23 00:01:00.000 | + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d; + d | +========================== + 2022-05-15 00:01:00.000 | + 2022-05-16 00:01:00.000 | + 2022-05-17 00:01:00.000 | + 2022-05-18 00:01:00.000 | + 2022-05-19 00:01:00.000 | + 2022-05-20 00:01:00.000 | + 2022-05-21 00:01:00.000 | + 2022-05-22 00:01:00.000 | + 2022-05-23 00:01:00.000 | + 2022-05-24 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + 2022-05-23 00:01:00.000 | + 2022-05-22 00:01:00.000 | + 2022-05-21 00:01:00.000 | + 2022-05-20 00:01:00.000 | + 2022-05-19 00:01:00.000 | + 2022-05-18 00:01:00.000 | + 2022-05-17 00:01:00.000 | + 2022-05-16 00:01:00.000 | + 2022-05-15 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc; + d | +========================== + 2022-05-24 00:01:00.000 | + 2022-05-23 00:01:00.000 | + 2022-05-22 00:01:00.000 | + 2022-05-21 00:01:00.000 | + 2022-05-20 00:01:00.000 | + 2022-05-19 00:01:00.000 | + 2022-05-18 00:01:00.000 | + 2022-05-17 00:01:00.000 | + 2022-05-16 00:01:00.000 | + 2022-05-15 00:01:00.000 | + +taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-19 00:01:08.000' interval(10s) order by d; + d | +========================== + 2022-05-15 00:01:00.000 | + 2022-05-16 00:01:00.000 | + 2022-05-17 00:01:00.000 | + 2022-05-18 00:01:00.000 | + 2022-05-19 00:01:00.000 | + +taos> select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d; + d | +========================== + 2022-05-15 00:01:08.000 | + 2022-05-16 00:01:08.000 | + 2022-05-17 00:01:08.000 | + 2022-05-18 00:01:08.000 | + 2022-05-24 00:01:08.000 | + +taos> select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d desc; + d | +========================== + 2022-05-24 00:01:08.000 | + 2022-05-18 00:01:08.000 | + 2022-05-17 00:01:08.000 | + 2022-05-16 00:01:08.000 | + 2022-05-15 00:01:08.000 | + +taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc; + _wstart | d | avg(c) | +================================================================================ + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + +taos> explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=linear width=32 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=4 width=32 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [1652544060001, 1653062467999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column a (functions=4 width=32 input_order=desc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=4 width=32 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=5h offset=0a sliding=5h +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=desc ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=desc output_order=desc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 16.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 17.row *************************** +QUERY_PLAN: Filter: ((a > 1652544060000) AND (a < 1653062468000)) +*************************** 18.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 19.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=desc output_order=desc) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 21.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 22.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, desc +*************************** 23.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 24.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 25.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 28.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 29.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 31.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 32.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 33.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 34.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=desc ) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 37.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 38.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 39.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 40.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a asc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=3 width=24) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 3.row *************************** +QUERY_PLAN: -> Fill (mode=linear width=32 input_order=asc ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=4 width=32 +*************************** 5.row *************************** +QUERY_PLAN: Time Range: [1652544060001, 1653062467999] +*************************** 6.row *************************** +QUERY_PLAN: -> Interval on Column a (functions=4 width=32 input_order=asc output_order=asc ) +*************************** 7.row *************************** +QUERY_PLAN: Output: columns=4 width=32 +*************************** 8.row *************************** +QUERY_PLAN: Time Window: interval=5h offset=0a sliding=5h +*************************** 9.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 10.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=24 input_order=asc ) +*************************** 11.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 12.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 13.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 14.row *************************** +QUERY_PLAN: -> Merge Aligned Interval on Column (functions=3 width=24 input_order=asc output_order=asc) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=3 width=24 +*************************** 16.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 17.row *************************** +QUERY_PLAN: Filter: ((a > 1652544060000) AND (a < 1653062468000)) +*************************** 18.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 19.row *************************** +QUERY_PLAN: -> SortMerge (columns=3 width=108 input_order=asc output_order=asc) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 21.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 22.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, asc +*************************** 23.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 24.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 25.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 26.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 27.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 28.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 29.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 30.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 31.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 32.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=108) +*************************** 33.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 34.row *************************** +QUERY_PLAN: -> Interval on Column ts (functions=3 width=108 input_order=desc output_order=asc ) +*************************** 35.row *************************** +QUERY_PLAN: Output: columns=3 width=108 +*************************** 36.row *************************** +QUERY_PLAN: Time Window: interval=10s offset=0a sliding=10s +*************************** 37.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 38.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 39.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 40.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc)\G; +*************************** 1.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=unknown output_order=unknown) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 3.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 4.row *************************** +QUERY_PLAN: Merge Key: b desc +*************************** 5.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 7.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=2 width=12) +*************************** 8.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 9.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 10.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 11.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 12.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 14.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=2 width=12) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 16.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 17.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 18.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> select * from (select ts as a, c2 as b from meters order by c2 desc); + a | b | +======================================== + 2022-05-19 00:01:08.000 | 243 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-21 00:01:08.000 | 11 | + +taos> explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Sort input_order=unknown output_order=desc (columns=2 width=12) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 3.row *************************** +QUERY_PLAN: -> Projection (columns=2 width=12 input_order=unknown ) +*************************** 4.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 5.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 6.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 7.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=unknown output_order=unknown) +*************************** 8.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 9.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 10.row *************************** +QUERY_PLAN: Merge Key: b desc +*************************** 11.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 12.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 13.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=2 width=12) +*************************** 14.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 15.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 16.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 17.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 18.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 19.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 20.row *************************** +QUERY_PLAN: -> Sort input_order=asc output_order=unknown (columns=2 width=12) +*************************** 21.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 22.row *************************** +QUERY_PLAN: -> Table Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 24.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc; + a | b | +======================================== + 2022-05-24 00:01:08.000 | 210 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-15 00:01:08.000 | 234 | + +taos> explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=16 input_order=unknown ) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=16 +*************************** 3.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> Inner join (columns=4 width=24 input_order=asc ) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=4 width=24 +*************************** 7.row *************************** +QUERY_PLAN: Join Cond: (`test`.`a`.`ts` = `test`.`b`.`ts`) +*************************** 8.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=unknown output_order=unknown) +*************************** 9.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 10.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 11.row *************************** +QUERY_PLAN: Merge Key: ts asc +*************************** 12.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 14.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 16.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 17.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 18.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 19.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 21.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 22.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=unknown output_order=unknown) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 24.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 25.row *************************** +QUERY_PLAN: Merge Key: ts asc +*************************** 26.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 29.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 30.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 31.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 33.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=16 input_order=unknown ) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=16 +*************************** 3.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> Inner join (columns=4 width=24 input_order=asc ) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=4 width=24 +*************************** 7.row *************************** +QUERY_PLAN: Join Cond: (`test`.`a`.`ts` = `test`.`b`.`ts`) +*************************** 8.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=asc output_order=asc) +*************************** 9.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 10.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 11.row *************************** +QUERY_PLAN: Merge Key: ts asc +*************************** 12.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 14.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 16.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 17.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 18.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 19.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 21.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 22.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=asc output_order=asc) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 24.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 25.row *************************** +QUERY_PLAN: Merge Key: ts asc +*************************** 26.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 29.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 30.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 31.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 32.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 33.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 34.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 35.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts; + ts | c2 | c2 | +====================================================== + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + +taos> select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts desc; + ts | c2 | c2 | +====================================================== + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + +taos> explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts desc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=16 input_order=unknown ) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=16 +*************************** 3.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> Inner join (columns=4 width=24 input_order=desc ) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=4 width=24 +*************************** 7.row *************************** +QUERY_PLAN: Join Cond: (`test`.`a`.`ts` = `b`.`ts`) +*************************** 8.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=desc output_order=desc) +*************************** 9.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 10.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 11.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, ts desc +*************************** 12.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 14.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 16.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 17.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 18.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 19.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 21.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 22.row *************************** +QUERY_PLAN: -> Projection (columns=2 width=12 input_order=desc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 24.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=desc output_order=desc) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 29.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, ts desc +*************************** 30.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 31.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 32.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 33.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 34.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 35.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|0 desc|1]) +*************************** 38.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 39.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts asc\G; +*************************** 1.row *************************** +QUERY_PLAN: -> Projection (columns=3 width=16 input_order=unknown ) +*************************** 2.row *************************** +QUERY_PLAN: Output: columns=3 width=16 +*************************** 3.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 4.row *************************** +QUERY_PLAN: Merge ResBlocks: True +*************************** 5.row *************************** +QUERY_PLAN: -> Inner join (columns=4 width=24 input_order=asc ) +*************************** 6.row *************************** +QUERY_PLAN: Output: columns=4 width=24 +*************************** 7.row *************************** +QUERY_PLAN: Join Cond: (`test`.`a`.`ts` = `b`.`ts`) +*************************** 8.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=asc output_order=asc) +*************************** 9.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 10.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 11.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, ts asc +*************************** 12.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 13.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 14.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 15.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 16.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 17.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 18.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 19.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 20.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 21.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 22.row *************************** +QUERY_PLAN: -> Projection (columns=2 width=12 input_order=asc ) +*************************** 23.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 24.row *************************** +QUERY_PLAN: Output: Ignore Group Id: true +*************************** 25.row *************************** +QUERY_PLAN: Merge ResBlocks: False +*************************** 26.row *************************** +QUERY_PLAN: -> SortMerge (columns=2 width=12 input_order=asc output_order=asc) +*************************** 27.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 28.row *************************** +QUERY_PLAN: Output: Ignore Group Id: false +*************************** 29.row *************************** +QUERY_PLAN: Merge Key: _group_id asc, ts asc +*************************** 30.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 31.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 32.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 33.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 34.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] +*************************** 35.row *************************** +QUERY_PLAN: -> Data Exchange 1:1 (width=12) +*************************** 36.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 37.row *************************** +QUERY_PLAN: -> Table Merge Scan on meters (columns=2 width=12 order=[asc|1 desc|0]) +*************************** 38.row *************************** +QUERY_PLAN: Output: columns=2 width=12 +*************************** 39.row *************************** +QUERY_PLAN: Time Range: [-9223372036854775808, 9223372036854775807] + +taos> select a.ts, a.c2, b.c2 from meters as a join (select * from meters order by ts desc) b on a.ts = b.ts order by a.ts asc; + ts | c2 | c2 | +====================================================== + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-15 00:01:08.000 | 234 | 234 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-16 00:01:08.000 | 136 | 136 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-17 00:01:08.000 | 59 | 59 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-18 00:01:08.000 | 58 | 58 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-19 00:01:08.000 | 243 | 243 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-20 00:01:08.000 | 120 | 120 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-21 00:01:08.000 | 11 | 11 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-22 00:01:08.000 | 196 | 196 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-23 00:01:08.000 | 116 | 116 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + 2022-05-24 00:01:08.000 | 210 | 210 | + diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index 849aeb2ac5dabc46e6770de956e4e885e988e2ab..f49a8e0a7d7e4c0780e77bb3ad3f1ae8ef5a3b65 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -131,4 +131,8 @@ print $rows if $rows != 9 then return -1 endi + +print =========================== td-24781 +sql select DISTINCT (`precision`) from `information_schema`.`ins_databases` PARTITION BY `precision` + #system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/t/explain_tsorder.sql b/tests/script/tsim/query/t/explain_tsorder.sql new file mode 100644 index 0000000000000000000000000000000000000000..d3264d8895246a4151a56b28cdc7968b5969a4e0 --- /dev/null +++ b/tests/script/tsim/query/t/explain_tsorder.sql @@ -0,0 +1,73 @@ +use test; +explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc\G; +explain verbose true select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc\G; +explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc\G; +explain verbose true select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc\G; + + +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc\G; + +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc\G; + + +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G; +explain verbose true select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > 10000 and a < 20000 interval(10s) fill(NULL) order by d\G; +explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d\G; +explain verbose true select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > 10000 and b < 20000 interval(10s) fill(NULL) order by d desc\G; + + +select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc; +select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc; +select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc; +select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d desc; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a) order by d desc; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) order by d desc; + + +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) order by d desc; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) order by d desc; + +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b) group by c order by d desc; +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) group by c order by d desc; + +select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-19 00:01:08.000' interval(10s) order by d; +select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d; +select last(b) as d from (select last(ts) as b, avg(c2) as c from meters interval(10s) order by b desc) where b > '2022-05-15 00:01:00.000' and b < '2022-05-19 00:01:08.000' interval(10s) order by d desc; +select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc; + +explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; +explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a asc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; + +explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc)\G; +select * from (select ts as a, c2 as b from meters order by c2 desc); + +explain verbose true select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc\G; +select * from (select ts as a, c2 as b from meters order by c2 desc) order by a desc; + +explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts\G; +explain verbose true select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts\G; +select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts; +select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order by a.ts desc; +explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts desc\G; +explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts asc\G; +select a.ts, a.c2, b.c2 from meters as a join (select * from meters order by ts desc) b on a.ts = b.ts order by a.ts asc; diff --git a/tests/script/tsim/query/unionall_as_table.sim b/tests/script/tsim/query/unionall_as_table.sim index 4d8f99071829050a5da6de2c6b1ea2feb41d27bb..f8145d4e9763621a37719fb5e6c0f44728610aea 100644 --- a/tests/script/tsim/query/unionall_as_table.sim +++ b/tests/script/tsim/query/unionall_as_table.sim @@ -42,4 +42,12 @@ endi if $data00 != 4 then return -1 endi + +sql create table ctcount(ts timestamp, f int); +sql insert into ctcount(ts) values(now)(now+1s); +sql select count(*) from (select f from ctcount); +print $data00 +if $data00 != 2 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim index 242231e40864462be62a0206ad3aba4de9205e4b..692212d511160033e909836a12e2f4747251f574 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim @@ -340,6 +340,95 @@ if $data05 != 30.000000000 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== select with _wstart/order by _wstart from stb from file in designated vgroup +sql select _wstart, _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart; +print $data00 $data01 $data02 $data03 $data04 +if $rows != 1 then + print rows $rows != 1 + return -1 +endi + +if $data02 != -13 then + print data02 $data02 != -13 + return -1 +endi + +if $data03 != 20.00000 then + print data03 $data03 != 20.00000 + return -1 +endi + +if $data04 != 20 then + print data04 $data04 != 20 + return -1 +endi + +print =============== select without _wstart/with order by _wstart from stb from file in designated vgroup +sql select _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart; +print $data00 $data01 $data02 $data03 +if $rows != 1 then + print rows $rows != 1 + return -1 +endi + +if $data01 != -13 then + print data01 $data01 != -13 + return -1 +endi + +if $data02 != 20.00000 then + print data02 $data02 != 20.00000 + return -1 +endi + +if $data03 != 20 then + print data03 $data03 != 20 + return -1 +endi + +print =============== select * from stb from file in common vgroups +sql select _wstart, _wend, min(c1),max(c2),max(c1),max(c3) from stb interval(5m,10s) sliding(5m) order by _wstart; +print $data00 $data01 $data02 $data03 $data04 $data05 +if $rows != 1 then + print rows $rows != 1 + return -1 +endi + +if $data02 != -13 then + print data02 $data02 != -13 + return -1 +endi + +if $data03 != 20.00000 then + print data03 $data03 != 20.00000 + return -1 +endi + +if $data04 != 20 then + print data04 $data04 != 20 + return -1 +endi +if $data05 != 30.000000000 then + print data05 $data05 != 30.000000000 + return -1 +endi + +sql delete from stb; + +print =============== query after delete in common vgroups +sql select _wstart, _wend, min(c1),max(c2),max(c1),max(c3) from stb interval(5m,10s) sliding(5m) order by _wstart; +if $rows != 0 then + print rows $rows != 0 + return -1 +endi + +sleep 2000 +print =============== query after delete in designated vgroups +sql select _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart; +if $rows != 0 then + print rows $rows != 0 + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/system-test/0-others/multilevel.py b/tests/system-test/0-others/multilevel.py index 7ad4eba6453877149e2be6a01f76d6cf50b906bc..f086dcb735e73037866e2a382322c9c4046b00bb 100644 --- a/tests/system-test/0-others/multilevel.py +++ b/tests/system-test/0-others/multilevel.py @@ -116,7 +116,7 @@ class TDTestCase: tdSql.checkRows(1000) tdLog.info("================= step3") tdSql.execute('drop database test') - for i in range(50): + for i in range(10): tdSql.execute("create database test%d duration 1" %(i)) tdSql.execute("use test%d" %(i)) tdSql.execute("create table tb (ts timestamp,i int)") diff --git a/tests/system-test/0-others/splitVGroup.py b/tests/system-test/0-others/splitVGroup.py new file mode 100644 index 0000000000000000000000000000000000000000..32001f34b09566acd5f1faa6d7358dc1ce1ac150 --- /dev/null +++ b/tests/system-test/0-others/splitVGroup.py @@ -0,0 +1,377 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +import time +import copy +import string + +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + + # random string + def random_string(self, count): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(count)) + + # get col value and total max min ... + def getColsValue(self, i, j): + # c1 value + if random.randint(1, 10) == 5: + c1 = None + else: + c1 = 1 + + # c2 value + if j % 3200 == 0: + c2 = 8764231 + elif random.randint(1, 10) == 5: + c2 = None + else: + c2 = random.randint(-87654297, 98765321) + + + value = f"({self.ts}, " + + # c1 + if c1 is None: + value += "null," + else: + self.c1Cnt += 1 + value += f"{c1}," + # c2 + if c2 is None: + value += "null," + else: + value += f"{c2}," + # total count + self.c2Cnt += 1 + # max + if self.c2Max is None: + self.c2Max = c2 + else: + if c2 > self.c2Max: + self.c2Max = c2 + # min + if self.c2Min is None: + self.c2Min = c2 + else: + if c2 < self.c2Min: + self.c2Min = c2 + # sum + if self.c2Sum is None: + self.c2Sum = c2 + else: + self.c2Sum += c2 + + # c3 same with ts + value += f"{self.ts})" + + # move next + self.ts += 1 + + return value + + # insert data + def insertData(self): + tdLog.info("insert data ....") + sqls = "" + for i in range(self.childCnt): + # insert child table + values = "" + pre_insert = f"insert into @db_name.t{i} values " + for j in range(self.childRow): + if values == "": + values = self.getColsValue(i, j) + else: + values += "," + self.getColsValue(i, j) + + # batch insert + if j % self.batchSize == 0 and values != "": + sql = pre_insert + values + self.exeDouble(sql) + values = "" + # append last + if values != "": + sql = pre_insert + values + self.exeDouble(sql) + values = "" + + # insert nomal talbe + for i in range(20): + self.ts += 1000 + name = self.random_string(20) + sql = f"insert into @db_name.ta values({self.ts}, {i}, {self.ts%100000}, '{name}', false)" + self.exeDouble(sql) + + # insert finished + tdLog.info(f"insert data successfully.\n" + f" inserted child table = {self.childCnt}\n" + f" inserted child rows = {self.childRow}\n" + f" total inserted rows = {self.childCnt*self.childRow}\n") + return + + def exeDouble(self, sql): + # dbname replace + sql1 = sql.replace("@db_name", self.db1) + + if len(sql1) > 100: + tdLog.info(sql1[:100]) + else: + tdLog.info(sql1) + tdSql.execute(sql1) + + sql2 = sql.replace("@db_name", self.db2) + if len(sql1) > 100: + tdLog.info(sql1[:100]) + else: + tdLog.info(sql1) + tdSql.execute(sql2) + + + # prepareEnv + def prepareEnv(self): + # init + self.ts = 1680000000000 + self.childCnt = 10 + self.childRow = 10000 + self.batchSize = 5000 + self.vgroups1 = 4 + self.vgroups2 = 4 + self.db1 = "db1" + self.db2 = "db2" + + # total + self.c1Cnt = 0 + self.c2Cnt = 0 + self.c2Max = None + self.c2Min = None + self.c2Sum = None + + # create database db + sql = f"create database @db_name vgroups {self.vgroups1} replica 3" + self.exeDouble(sql) + + # create super talbe st + sql = f"create table @db_name.st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)" + self.exeDouble(sql) + + # create child table + for i in range(self.childCnt): + sql = f"create table @db_name.t{i} using @db_name.st tags({i}) " + self.exeDouble(sql) + + # create normal table + sql = f"create table @db_name.ta(ts timestamp, c1 int, c2 bigint, c3 binary(32), c4 bool)" + self.exeDouble(sql) + + # insert data + self.insertData() + + # check data correct + def checkExpect(self, sql, expectVal): + tdSql.query(sql) + rowCnt = tdSql.getRows() + for i in range(rowCnt): + val = tdSql.getData(i,0) + if val != expectVal: + tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}") + return False + + tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}") + return True + + # init + def init(self, conn, logSql, replicaVar=1): + seed = time.clock_gettime(time.CLOCK_REALTIME) + random.seed(seed) + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + # check query result same + def queryDouble(self, sql): + # sql + sql1 = sql.replace('@db_name', self.db1) + tdLog.info(sql1) + start1 = time.time() + rows1 = tdSql.query(sql1) + spend1 = time.time() - start1 + res1 = copy.copy(tdSql.queryResult) + + sql2 = sql.replace('@db_name', self.db2) + tdLog.info(sql2) + start2 = time.time() + tdSql.query(sql2) + spend2 = time.time() - start2 + res2 = tdSql.queryResult + + rowlen1 = len(res1) + rowlen2 = len(res2) + + if rowlen1 != rowlen2: + tdLog.exit(f"rowlen1={rowlen1} rowlen2={rowlen2} both not equal.") + return False + + for i in range(rowlen1): + row1 = res1[i] + row2 = res2[i] + collen1 = len(row1) + collen2 = len(row2) + if collen1 != collen2: + tdLog.exit(f"collen1={collen1} collen2={collen2} both not equal.") + return False + for j in range(collen1): + if row1[j] != row2[j]: + tdLog.exit(f"col={j} col1={row1[j]} col2={row2[j]} both col not equal.") + return False + + # warning performance + diff = (spend2 - spend1)*100/spend1 + tdLog.info("spend1=%.6fs spend2=%.6fs diff=%.1f%%"%(spend1, spend2, diff)) + if spend2 > spend1 and diff > 20: + tdLog.info("warning: the diff for performance after spliting is over 20%") + + return True + + + # check result + def checkResult(self): + # check vgroupid + sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{self.db2}'" + tdSql.query(sql) + tdSql.checkRows(self.vgroups2) + + # check child table count same + sql = "select table_name from information_schema.ins_tables where db_name='@db_name' order by table_name" + self.queryDouble(sql) + + # check row value is ok + sql = "select * from @db_name.st order by ts" + self.queryDouble(sql) + + # where + sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts" + self.queryDouble(sql) + + # max + sql = "select max(c1) from @db_name.st" + self.queryDouble(sql) + + # min + sql = "select min(c2) from @db_name.st" + self.queryDouble(sql) + + # sum + sql = "select sum(c1) from @db_name.st" + self.queryDouble(sql) + + # normal table + + # all rows + sql = "select * from @db_name.ta" + self.queryDouble(sql) + + # count + sql = "select count(*) from @db_name.ta" + self.queryDouble(sql) + + # sum + sql = "select sum(c1) from @db_name.ta" + self.queryDouble(sql) + + + # get vgroup list + def getVGroup(self, db_name): + vgidList = [] + sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'" + res = tdSql.getResult(sql) + rows = len(res) + for i in range(rows): + vgidList.append(res[i][0]) + + return vgidList; + + # split vgroup on db2 + def splitVGroup(self, db_name): + vgids = self.getVGroup(db_name) + selid = random.choice(vgids) + sql = f"split vgroup {selid}" + tdLog.info(sql) + tdSql.execute(sql) + + # wait end + for i in range(100): + sql ="show transactions;" + rows = tdSql.query(sql) + if rows == 0: + tdLog.info("split vgroup finished.") + return True + #tdLog.info(f"i={i} wait split vgroup ...") + time.sleep(1) + + tdLog.exit("split vgroup transaction is not finished after executing 50s") + return False + + # split empty database + def splitEmptyDB(self): + + dbName = "emptydb" + vgNum = 2 + # create database + sql = f"create database {dbName} vgroups {vgNum}" + tdLog.info(sql) + tdSql.execute(sql) + + # split vgroup + self.splitVGroup(dbName) + vgList = self.getVGroup(dbName) + vgNum1 = len(vgList) + vgNum2 = vgNum + 1 + if vgNum1 != vgNum2: + tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}") + return + + # run + def run(self): + + # prepare env + self.prepareEnv() + + for i in range(5): + # split vgroup on db2 + self.splitVGroup(self.db2) + self.vgroups2 += 1 + + # check two db query result same + self.checkResult() + + tdLog.info(f"split vgroup i={i} passed.") + + # split empty db + self.splitEmptyDB() + + + # stop + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/user_privilege_show.py b/tests/system-test/0-others/user_privilege_show.py new file mode 100644 index 0000000000000000000000000000000000000000..9f49778ba87d039c81ec99761c161e1a7e965fa2 --- /dev/null +++ b/tests/system-test/0-others/user_privilege_show.py @@ -0,0 +1,267 @@ +from itertools import product +import taos +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + """This test case is used to veirfy the show create stable/table command for + the different user privilege(TS-3469) + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.username = 'test' + self.password = 'test' + # db info + self.dbname = "user_privilege_show" + self.stbname = 'stb' + self.common_tbname = "tb" + self.ctbname_list = ["ct1", "ct2"] + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + # privilege check scenario info + self.privilege_check_dic = {} + self.senario_type = ["stable", "table", "ctable"] + self.priv_type = ["read", "write", "all", "none"] + # stable senarios + # include the show stable xxx command test senarios and expect result, true as have privilege, false as no privilege + # the list element is (db_privilege, stable_privilege, expect_res) + st_senarios_list = [] + for senario in list(product(self.priv_type, repeat=2)): + expect_res = True + if senario == ("write", "write") or senario == ("none", "none") or senario == ("none", "write") or senario == ("write", "none"): + expect_res = False + st_senarios_list.append(senario + (expect_res,)) + # self.privilege_check_dic["stable"] = st_senarios_list + + # table senarios + # the list element is (db_privilege, table_privilege, expect_res) + self.privilege_check_dic["table"] = st_senarios_list + + # child table senarios + # the list element is (db_privilege, stable_privilege, ctable_privilege, expect_res) + ct_senarios_list = [] + for senario in list(product(self.priv_type, repeat=3)): + expect_res = True + if senario[2] == "write" or (senario[2] == "none" and senario[1] == "write") or (senario[2] == "none" and senario[1] == "none" and senario[0] == "write"): + expect_res = False + ct_senarios_list.append(senario + (expect_res,)) + self.privilege_check_dic["ctable"] = ct_senarios_list + + def prepare_data(self, senario_type): + """Create the db and data for test + """ + if senario_type == "stable": + # db name + self.dbname = self.dbname + '_stable' + elif senario_type == "table": + # db name + self.dbname = self.dbname + '_table' + else: + # db name + self.dbname = self.dbname + '_ctable' + + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdLog.debug("sql:" + f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + tdLog.debug("sql:" + f"use {self.dbname}") + + # create tables + if "_stable" in self.dbname: + # create stable + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + elif "_table" in self.dbname: + # create common table + tdSql.execute(f"create table {self.common_tbname}(ts timestamp, col1 float, col2 int)") + tdLog.debug("sql:" + f"create table {self.common_tbname}(ts timestamp, col1 float, col2 int)") + else: + # create stable and child table + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + for ctname in self.ctbname_list: + tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')") + tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')") + + def create_user(self): + """Create the user for test + """ + tdSql.execute(f'create user {self.username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {self.username} pass "{self.password}"') + + def grant_privilege(self, username, privilege, privilege_obj, ctable_include=False, tag_condition=None): + """Add the privilege for the user + """ + try: + if ctable_include and tag_condition: + tdSql.execute(f'grant {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} to {username}') + else: + tdSql.execute(f'grant {privilege} on {self.dbname}.{privilege_obj} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{privilege_obj} to {username}') + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self, username, privilege, privilege_obj, ctable_include=False, tag_condition=None): + """Remove the privilege for the user + """ + try: + if ctable_include and tag_condition: + tdSql.execute(f'revoke {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} from {username}') + else: + tdSql.execute(f'revoke {privilege} on {self.dbname}.{privilege_obj} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{privilege_obj} from {username}') + except Exception as ex: + tdLog.exit(ex) + + def run(self): + """Currently, the test case can't be executed for all of the privilege combinations cause + the table privilege isn't finished by dev team, only left one senario: + db read privilege for user and show create table command; will udpate the test case once + the table privilege function is finished + """ + self.create_user() + + # temp solution only for the db read privilege verification + self.prepare_data("table") + # grant db read privilege + self.grant_privilege(self.username, "read", "*") + # create the taos connection with -utest -ptest + testconn = taos.connect(user=self.username, password=self.password) + testconn.execute("use %s;" % self.dbname) + # show the user privileges + res = testconn.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all())) + # query execution + sql = "show create table " + self.common_tbname + ";" + tdLog.debug("sql: %s" % sql) + res = testconn.query(sql) + # query result + tdLog.debug("sql res:" + str(res.fetch_all())) + # remove the privilege + self.remove_privilege(self.username, "read", "*") + # clear env + testconn.close() + tdSql.execute(f"drop database {self.dbname}") + + """ + for senario_type in self.privilege_check_dic.keys(): + tdLog.debug(f"---------check the {senario_type} privilege----------") + self.prepare_data(senario_type) + for senario in self.privilege_check_dic[senario_type]: + # grant db privilege + if senario[0] != "none": + self.grant_privilege(self.username, senario[0], "*") + # grant stable privilege + if senario[1] != "none": + self.grant_privilege(self.username, senario[1], self.stbname if senario_type == "stable" or senario_type == "ctable" else self.common_tbname) + if senario_type == "stable" or senario_type == "table": + tdLog.debug(f"check the db privilege: {senario[0]}, (s)table privilege: {senario[1]}") + else: + if senario[2] != "none": + # grant child table privilege + self.grant_privilege(self.username, senario[2], self.stbname, True, "ctbname='ct1'") + tdLog.debug(f"check the db privilege: {senario[0]}, (s)table privilege: {senario[1]}, ctable privilege: {senario[2]}") + testconn = taos.connect(user=self.username, password=self.password) + tdLog.debug("Create taos connection with user: {}, password: {}".format(self.username, self.password)) + try: + testconn.execute("use %s;" % self.dbname) + except BaseException as ex: + if (senario_type in ["stable", "table"] and senario[0] == "none" and senario[1] == "none") or (senario_type == "ctable" and senario[0] == "none" and senario[1] == "none" and senario[2] == "none"): + continue + else: + tdLog.exit(ex) + + # query privileges for user + res = testconn.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all())) + + if senario_type == "stable" or senario_type == "table": + sql = "show create " + (("stable " + self.stbname) if senario_type == "stable" else (f"table {self.dbname}." + self.common_tbname + ";")) + if senario[2]: + tdLog.debug("sql: %s" % sql) + tdLog.debug(f"expected result: {senario[2]}") + res = testconn.query(sql) + tdLog.debug("sql res:" + res.fetch_all()) + else: + exception_flag = False + try: + tdLog.debug("sql: %s" % sql) + tdLog.debug(f"expected result: {senario[2]}") + res = testconn.query(sql) + tdLog.debug("sql res:" + res.fetch_all()) + except BaseException: + exception_flag = True + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.debug(f"{caller.filename}({caller.lineno}) failed to check the db privilege {senario[0]} and stable privilege {senario[1]} failed as expected") + if not exception_flag: + pass + # tdLog.exit("The expected exception isn't occurred") + else: + sql = f"show create table {self.dbname}.{self.ctbname_list[0]};" + if senario[3]: + tdLog.debug("sql: %s" % sql) + tdLog.debug(f"expected result: {senario[3]}") + res = testconn.query(sql) + tdLog.debug(res.fetch_all()) + else: + exception_flag = False + try: + tdLog.debug("sql: %s" % sql) + tdLog.debug(f"expected result: {senario[3]}") + res = testconn.query(sql) + tdLog.debug(res.fetch_all()) + except BaseException: + exception_flag = True + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.debug(f"{caller.filename}({caller.lineno}) failed to check the db privilege {senario[0]}, stable privilege {senario[1]} and ctable privilege {senario[2]} failed as expected") + if not exception_flag: + pass + # tdLog.exit("The expected exception isn't occurred") + + # remove db privilege + if senario[0] != "none": + self.remove_privilege(self.username, senario[0], "*") + # remove stable privilege + if senario[1] != "none": + self.remove_privilege(self.username, senario[1], self.stbname if senario_type == "stable" else self.common_tbname) + # remove child table privilege + if senario_type == "ctable": + if senario[2] != "none": + self.remove_privilege(self.username, senario[2], self.ctbname_list[0], True, "ctbname='ct1'") + testconn.close() + + # remove the database + tdSql.execute(f"drop database {self.dbname}") + # reset the dbname + self.dbname = "user_privilege_show" + """ + + def stop(self): + # remove the user + tdSql.execute(f'drop user {self.username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py index 51d7d596cd9d94e6078fc7d448583c91a27e1780..6cc978a76ce6f71e2dc0b68fc5561bde7e9a6656 100644 --- a/tests/system-test/1-insert/table_param_ttl.py +++ b/tests/system-test/1-insert/table_param_ttl.py @@ -35,14 +35,14 @@ class TDTestCase: tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) - sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) for i in range(self.tbnum): tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}') for i in range(int(self.tbnum/2)): tdSql.execute(f'alter table db.{self.ntbname}_{i} ttl {self.modify_ttl}') - sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval']) + sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum - int(self.tbnum/2)) tdSql.execute('drop database db') @@ -54,7 +54,7 @@ class TDTestCase: tdSql.execute(f'create table db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param}') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) - sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) for i in range(self.tbnum): @@ -63,7 +63,7 @@ class TDTestCase: tdSql.checkRows(self.tbnum) for i in range(int(self.tbnum/2)): tdSql.execute(f'alter table db.{self.stbname}_{i} ttl {self.modify_ttl}') - sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval']) + sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum - int(self.tbnum/2)) tdSql.execute('drop database db') @@ -75,7 +75,7 @@ class TDTestCase: tdSql.execute(f'insert into db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param} values(now,1)') tdSql.query(f'show db.tables') tdSql.checkRows(self.tbnum) - sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval']) + sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1) tdSql.query(f'show db.tables') tdSql.checkRows(0) tdSql.execute('drop database db') diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index d48a01db6af023966ec9fc8307d93cfab37c4f87..cdea8964b4608059240affb138354bf7675a9cdf 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -52,6 +52,95 @@ class TDTestCase: tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) + # handle null values + tdSql.execute( + f"create table {dbname}.ntb_null(ts timestamp,c1 int,c2 double,c3 float,c4 bool)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, 1.0, NULL, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 2.0, 2.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, 2, NULL, NULL, false)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 1.0, 1.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 3.0, NULL, true)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, 3, NULL, 3.0, NULL)") + tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, NULL, NULL, true)") + + tdSql.query(f"select diff(c1) from {dbname}.ntb_null") + tdSql.checkRows(6) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, -2) + + tdSql.query(f"select diff(c2) from {dbname}.ntb_null") + tdSql.checkRows(6) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -1) + tdSql.checkData(3, 0, 2) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + + tdSql.query(f"select diff(c3) from {dbname}.ntb_null") + tdSql.checkRows(6) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -1) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2) + tdSql.checkData(5, 0, None) + + tdSql.query(f"select diff(c4) from {dbname}.ntb_null") + tdSql.checkRows(6) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, 0) + + tdSql.query(f"select diff(c1),diff(c2),diff(c3),diff(c4) from {dbname}.ntb_null") + tdSql.checkRows(6) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, -2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 1, -1) + tdSql.checkData(3, 1, 2) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, -1) + tdSql.checkData(3, 2, None) + tdSql.checkData(4, 2, 2) + tdSql.checkData(5, 2, None) + tdSql.checkData(0, 3, None) + tdSql.checkData(1, 3, None) + tdSql.checkData(2, 3, None) + tdSql.checkData(3, 3, 1) + tdSql.checkData(4, 3, None) + tdSql.checkData(5, 3, 0) + + tdSql.query(f"select diff(c1),diff(c2),diff(c3),diff(c4) from {dbname}.ntb_null where c1 is not null") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, -2) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(0, 3, None) + tdSql.checkData(1, 3, None) + tdSql.checkData(2, 3, 1) + tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") @@ -103,6 +192,9 @@ class TDTestCase: tdSql.error(f"select diff(col1,1.23) from {dbname}.stb_1") tdSql.error(f"select diff(col1,-1) from {dbname}.stb_1") tdSql.query(f"select ts,diff(col1),ts from {dbname}.stb_1") + tdSql.error(f"select diff(col1, 1),diff(col2) from {dbname}.stb_1") + tdSql.error(f"select diff(col1, 1),diff(col2, 0) from {dbname}.stb_1") + tdSql.error(f"select diff(col1, 1),diff(col2, 1) from {dbname}.stb_1") tdSql.query(f"select diff(ts) from {dbname}.stb_1") tdSql.checkRows(10) diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py index c3f3789a69752b204393f6355d73b480cde90e3a..493e59265e1f64398b1a84d45a39ffde7f83e7e7 100644 --- a/tests/system-test/2-query/function_diff.py +++ b/tests/system-test/2-query/function_diff.py @@ -127,22 +127,33 @@ class TDTestCase: return else: - tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + sql = f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}" + tdSql.query(sql) offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'): pre_result = np.array(pre_result, dtype = 'int64') pre_diff = np.diff(pre_result)[offset_val:] - tdSql.query(self.diff_query_form( - col=col, alias=alias, table_expr=table_expr, condition=condition - )) - - for i in range(tdSql.queryRows): - print(f"case in {line}: ", end='') - if isinstance(pre_diff[i] , float ): - pass - else: - tdSql.checkData(i, 0, pre_diff[i]) + if len(pre_diff) > 0: + sql =self.diff_query_form(col=col, alias=alias, table_expr=table_expr, condition=condition) + tdSql.query(sql) + j = 0 + diff_cnt = len(pre_diff) + for i in range(tdSql.queryRows): + print(f"case in {line}: i={i} j={j} pre_diff[j]={pre_diff[j]} ", end='') + if isinstance(pre_diff[j] , float ): + if j + 1 < diff_cnt: + j += 1 + pass + else: + if tdSql.getData(i,0) != None: + tdSql.checkData(i, 0, pre_diff[j]) + if j + 1 < diff_cnt: + j += 1 + else: + print(f"getData i={i} is None j={j} ") + else: + print("pre_diff len is zero.") pass @@ -354,31 +365,31 @@ class TDTestCase: tdSql.checkRows(229) tdSql.checkData(0,0,0) tdSql.query("select diff(c1) from db.stb1 partition by tbname ") - tdSql.checkRows(190) + tdSql.checkRows(220) tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname") - tdSql.checkRows(190) + tdSql.checkRows(220) tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname") - tdSql.checkRows(190) + tdSql.checkRows(220) tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname") - tdSql.checkRows(190) + tdSql.checkRows(220) # bug need fix tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname") - tdSql.checkRows(190) + tdSql.checkRows(220) # bug need fix tdSql.query("select tbname , diff(c1) from db.stb1 partition by tbname") - tdSql.checkRows(190) + tdSql.checkRows(220) tdSql.query("select tbname , diff(st1) from db.stb1 partition by tbname") tdSql.checkRows(220) # partition by tags tdSql.query("select st1 , diff(c1) from db.stb1 partition by st1") - tdSql.checkRows(190) + tdSql.checkRows(220) tdSql.query("select diff(c1) from db.stb1 partition by st1") - tdSql.checkRows(190) + tdSql.checkRows(220) def diff_test_run(self) : diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index f733a3d008cdfa278fbd24d40b9d102f91e830a6..47a4bc4dcf4cdb71c1a4ee87662f1c8af433dc84 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -226,6 +226,7 @@ class TDTestCase: tdSql.checkData(3, 0, 12) ## test fill value with scalar expression + # data types tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") tdSql.checkRows(4) tdSql.checkData(0, 0, 3) @@ -233,6 +234,49 @@ class TDTestCase: tdSql.checkData(2, 0, 3) tdSql.checkData(3, 0, 3) + tdSql.query(f"select interp(c1) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c2) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c3) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c4) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3.0) + tdSql.checkData(1, 0, 3.0) + tdSql.checkData(2, 0, 3.0) + tdSql.checkData(3, 0, 3.0) + + tdSql.query(f"select interp(c5) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3.0) + tdSql.checkData(1, 0, 3.0) + tdSql.checkData(2, 0, 3.0) + tdSql.checkData(3, 0, 3.0) + + tdSql.query(f"select interp(c6) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, True) + tdSql.checkData(1, 0, True) + tdSql.checkData(2, 0, True) + tdSql.checkData(3, 0, True) + + # expr types tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1.0 + 2.0)") tdSql.checkRows(4) tdSql.checkData(0, 0, 3) @@ -275,6 +319,7 @@ class TDTestCase: tdSql.checkData(2, 0, 3) tdSql.checkData(3, 0, 3) + tdLog.printNoPrefix("==========step5:fill prev") ## {. . .} diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index dec24010fcf4a43fa16863a577ad3b49fb160e23..fbd3488aabc53ad9817b4f4632f490502457785d 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -172,7 +172,7 @@ class TDTestCase: tdSql.checkRows(90) tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1") - tdSql.checkRows(90) + tdSql.checkRows(140) tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1") tdSql.checkRows(100) diff --git a/tests/system-test/2-query/ts_3398.py b/tests/system-test/2-query/ts_3398.py deleted file mode 100644 index 54d5c9180474d103f2dbf53869690c285d886f10..0000000000000000000000000000000000000000 --- a/tests/system-test/2-query/ts_3398.py +++ /dev/null @@ -1,56 +0,0 @@ -from util.log import * -from util.sql import * -from util.cases import * -from util.sqlset import * -import datetime - - -class TDTestCase: - """This test case is used to verify the aliasName of Node structure is not truncated - when sum clause is more than 65 bits. - """ - def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) - - def run(self): - # test case for https://jira.taosdata.com:18080/browse/TS-3405: - # create db - ret = tdSql.execute("CREATE DATABASE IF NOT EXISTS statistics1 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar)) - tdSql.execute("use statistics1;") - - # create stable - ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`g`(`day` timestamp,`run_state` tinyint) TAGS(`vin` binary(32));") - ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`b`(`day` timestamp, `total_heart` int) TAGS(`vin` binary(32));") - ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`tg`(`day` timestamp,`lt_4177` int,`f30_4177` int, `f35_4177` int) TAGS(`vin` binary(32));") - - # insert the data to table - ret = tdSql.execute("insert into d1001 using statistics1.`g` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99)) - ret = tdSql.execute("insert into d2001 using statistics1.`b` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99)) - ret = tdSql.execute("insert into d3001 using statistics1.`tg` tags('NJHYNBSAS0000061') values (%s, %d, %d, %d)" % ("'2023-05-01'", 99, 99, 99)) - - # execute the sql statements - ret = tdSql.query("SELECT b.`day` `day`,sum(CASE WHEN tg.lt_4177 IS NULL THEN 0 ELSE tg.lt_4177 END \ - + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 END) / 3600 es0,sum(CASE WHEN tg.lt_4177 \ - IS NULL THEN 0 ELSE tg.lt_4177 END + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 \ - END + CASE WHEN tg.f30_4177 IS NULL THEN 0 ELSE tg.f30_4177 END) / 3600 es1 FROM \ - statistics1.b b,statistics1.tg tg,statistics1.g g WHERE b.`day` = tg.`day` AND g.`day` = b.`day` \ - AND b.vin = tg.vin AND b.vin = g.vin AND b.`day` BETWEEN '2023-05-01' AND '2023-05-05' \ - AND b.vin = 'NJHYNBSAS0000061' AND g.vin IS NOT NULL AND b.vin IS NOT NULL AND tg.vin IS NOT NULL \ - GROUP BY b.`day`;") - # check the result - if 0.055 in tdSql.queryResult[0] and 0.0825 in tdSql.queryResult[0]: - tdLog.info("query result is correct") - else: - tdLog.info("query result is wrong") - - def stop(self): - # clear the db - tdSql.execute("drop database if exists statistics1;") - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/ts_3405_3398_3423.py b/tests/system-test/2-query/ts_3405_3398_3423.py new file mode 100644 index 0000000000000000000000000000000000000000..dcd8fb5a85711feec23f96d32c39f996e135b860 --- /dev/null +++ b/tests/system-test/2-query/ts_3405_3398_3423.py @@ -0,0 +1,162 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.sqlset import * +import datetime +import random + +class TDTestCase: + """The test cases are for TS_3398, TS_3405, TS_3423 + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), False) + + def run(self): + """This test case is used to verify the query performance for the merge scans process of + multiple tables join + """ + tdLog.info("Start the test case for ts_3405") + # test case for https://jira.taosdata.com:18080/browse/TS-3405: + # create db + tdSql.execute("CREATE DATABASE IF NOT EXISTS statistics2 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar)) + tdSql.execute("use statistics2;") + + # create stable + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`pg`(`day` timestamp,`lt_3` int,`c3_3` int,`c6_3` int,`c9_3` int,`c12_3` int,`c15_3` int,`c18_3` int,`c21_3` int,`c24_3` int,`c27_3` int,`ge_3` int) TAGS(`vin` binary(32));") + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`b`(`day` timestamp, `month` int) TAGS(`group_path` binary(32),`vin` binary(32));") + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`g`(`day` timestamp,`run_state` tinyint) TAGS(`vin` binary(32));") + + # insert the data to table + times = 10 + insertRows = 3000 + pg_sql = "insert into d1001 using statistics2.`pg` tags('test') values" + b_sql = "insert into d2001 using statistics2.`b` tags('1#%', 'test') values" + g_sql = "insert into d3001 using statistics2.`g` tags('test') values" + for t in range(times): + for i in range(t * insertRows, t * insertRows + insertRows): + ts = datetime.datetime.strptime('2023-05-01 00:00:00.000', '%Y-%m-%d %H:%M:%S.%f') + datetime.timedelta(seconds=i) + pg_sql += " ('{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(ts, i, i, i+1, i+2, i+3, i+4, i+5, i+6, i+7, i+8, i+9) + b_sql += " ('{}', {})".format(ts, 5) + g_sql += " ('{}', {})".format(ts, 1) + + tdSql.execute(pg_sql) + tdSql.execute(b_sql) + tdSql.execute(g_sql) + # reset the sql statements + pg_sql = "insert into d1001 using statistics2.`pg` tags('test') values" + b_sql = "insert into d2001 using statistics2.`b` tags('1#%', 'test') values" + g_sql = "insert into d3001 using statistics2.`g` tags('test') values" + tdLog.info("insert %d rows" % (insertRows * times)) + + # execute the sql statements + ret = tdSql.query("SELECT sum(pg.lt_3) es1,sum(pg.c3_3) es2,sum(pg.c6_3) es3,sum(pg.c9_3) es4,sum(pg.c12_3) es5,sum(pg.c15_3) es6,sum(pg.c18_3) es7,sum(pg.c21_3) es8,sum(pg.c24_3) es9,sum(pg.c27_3) es10,sum(pg.ge_3) es11 FROM statistics2.b b,statistics2.pg pg,statistics2.g g WHERE b.`day` = pg.`day` AND b.`day` = g.`day` AND b.vin = pg.vin AND b.vin = g.vin AND b.vin IS NOT NULL AND b.`group_path` LIKE '1#%';") + # check the first query result + if (449985000, 449985000, 450015000, 450045000, 450075000, 450105000, 450135000, 450165000, 450195000, 450225000, 450255000) in tdSql.queryResult: + tdLog.info("first query result is correct") + else: + tdLog.info("first query result is wrong with res: {}".format(str(tdSql.queryResult))) + + ret = tdSql.query("SELECT sum(pg.lt_3) es1, sum(pg.c3_3) es2, sum(pg.c6_3) es3, sum(pg.c9_3) es4, sum(pg.c12_3) es5, sum(pg.c15_3) es6, sum(pg.c18_3) es7, sum(pg.c21_3) es8, sum(pg.c24_3) es9, sum(pg.c27_3) es10, sum(pg.ge_3) es11 FROM (select * from statistics2.b order by day,month) b, (select * from statistics2.pg order by day,lt_3 ) pg, (select * from statistics2.g order by day,run_state) g WHERE b.`day` = pg.`day` AND b.`day` = g.`day` AND b.vin = pg.vin AND b.vin = g.vin AND b.vin IS NOT NULL;") + # check the second query result + if (449985000, 449985000, 450015000, 450045000, 450075000, 450105000, 450135000, 450165000, 450195000, 450225000, 450255000) in tdSql.queryResult: + tdLog.info("second query result is correct") + else: + tdLog.info("second query result is wrong with res: {}".format(str(tdSql.queryResult))) + tdLog.info("Finish the test case for ts_3405 successfully") + + """This test case is used to verify the aliasName of Node structure is not truncated + when sum clause is more than 65 bits. + """ + # test case for https://jira.taosdata.com:18080/browse/TS-3398: + # create db + tdLog.info("Start the test case for ts_3398") + tdSql.execute("CREATE DATABASE IF NOT EXISTS statistics1 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar)) + tdSql.execute("use statistics1;") + + # create stable + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`g`(`day` timestamp,`run_state` tinyint) TAGS(`vin` binary(32));") + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`b`(`day` timestamp, `total_heart` int) TAGS(`vin` binary(32));") + tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`tg`(`day` timestamp,`lt_4177` int,`f30_4177` int, `f35_4177` int) TAGS(`vin` binary(32));") + + # insert the data to table + tdSql.execute("insert into d1001 using statistics1.`g` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99)) + tdSql.execute("insert into d2001 using statistics1.`b` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99)) + tdSql.execute("insert into d3001 using statistics1.`tg` tags('NJHYNBSAS0000061') values (%s, %d, %d, %d)" % ("'2023-05-01'", 99, 99, 99)) + + # execute the sql statements + tdSql.query("SELECT b.`day` `day`,sum(CASE WHEN tg.lt_4177 IS NULL THEN 0 ELSE tg.lt_4177 END \ + + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 END) / 3600 es0,sum(CASE WHEN tg.lt_4177 \ + IS NULL THEN 0 ELSE tg.lt_4177 END + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 \ + END + CASE WHEN tg.f30_4177 IS NULL THEN 0 ELSE tg.f30_4177 END) / 3600 es1 FROM \ + statistics1.b b,statistics1.tg tg,statistics1.g g WHERE b.`day` = tg.`day` AND g.`day` = b.`day` \ + AND b.vin = tg.vin AND b.vin = g.vin AND b.`day` BETWEEN '2023-05-01' AND '2023-05-05' \ + AND b.vin = 'NJHYNBSAS0000061' AND g.vin IS NOT NULL AND b.vin IS NOT NULL AND tg.vin IS NOT NULL \ + GROUP BY b.`day`;") + # check the result + if 0.055 in tdSql.queryResult[0] and 0.0825 in tdSql.queryResult[0]: + tdLog.info("query result is correct") + else: + tdLog.info("query result is wrong") + tdLog.info("Finish the test case for ts_3398 successfully") + + """This test case is used to verify last(*) query result is correct when the data + is group by tag for stable + """ + # test case for https://jira.taosdata.com:18080/browse/TS-3423: + # create db + tdLog.info("Start the test case for ts_3423") + tdSql.execute("CREATE DATABASE IF NOT EXISTS ts_3423 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar)) + tdSql.execute("use ts_3423;") + + # create stable + tdSql.execute("CREATE STABLE IF NOT EXISTS ts_3423.`st_last`(`ts` timestamp,`n1` int,`n2` float) TAGS(`groupname` binary(32));") + + # insert the data to table + insertRows = 10 + child_table_num = 10 + for i in range(insertRows): + ts = datetime.datetime.strptime('2023-05-01 00:00:00.000', '%Y-%m-%d %H:%M:%S.%f') + datetime.timedelta(seconds=i) + for j in range(child_table_num): + tdSql.execute("insert into {} using ts_3423.`st_last` tags('{}') values ('{}', {}, {})".format("d" + str(j), "group" + str(j), str(ts), str(i+1), random.random())) + tdLog.info("insert %d rows for every child table" % (insertRows)) + + # cache model list + cache_model = ["none", "last_row", "last_value", "both"] + query_res = [] + + # execute the sql statements first + tdSql.query("select `cachemodel` from information_schema.ins_databases where name='ts_3423'") + current_cache_model = tdSql.queryResult[0][0] + tdLog.info("query on cache model {}".format(current_cache_model)) + tdSql.query("select last(*) from st_last group by groupname;") + # save the results + query_res.append(len(tdSql.queryResult)) + # remove the current cache model + cache_model.remove(current_cache_model) + + for item in cache_model: + tdSql.execute("alter database ts_3423 cachemodel '{}';".format(item)) + # execute the sql statements + tdSql.query("select last(*) from st_last group by groupname;") + tdLog.info("query on cache model {}".format(item)) + query_res.append(len(tdSql.queryResult)) + # check the result + res = True if query_res.count(child_table_num) == 4 else False + if res: + tdLog.info("query result is correct and same among different cache model") + else: + tdLog.info("query result is wrong") + tdLog.info("Finish the test case for ts_3423 successfully") + + def stop(self): + # clear the db + tdSql.execute("drop database if exists statistics1;") + tdSql.execute("drop database if exists statistics2;") + tdSql.execute("drop database if exists ts_3423;") + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 6345647e2f24b1a87cee8a6a0c60275907946dfe..57415f833578ae9ab003fa829d70716985af1d9a 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -45,6 +45,8 @@ #define SHELL_MAX_PKG_NUM 1 * 1024 * 1024 #define SHELL_MIN_PKG_NUM 1 #define SHELL_DEF_PKG_NUM 100 +#define SHELL_FLOAT_WIDTH 20 +#define SHELL_DOUBLE_WIDTH 25 typedef struct { char* hist[SHELL_MAX_HISTORY_SIZE]; diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 19a888fe82f6eeea23f52c7d7605bb5fc7fdd5c9..41cdb0f9286b84502da146937123c493dfb90331 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -91,9 +91,14 @@ SWords shellCommands[] = { {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql {"create function as outputtype language ", 0, 0, NULL}, + {"create or replace as outputtype language ", 0, 0, NULL}, {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, + {"create or replace aggregate function as outputtype bufsize language ", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, +#ifdef TD_ENTERPRISE + {"compact database ", 0, 0, NULL}, +#endif {"describe ", 0, 0, NULL}, {"delete from where ", 0, 0, NULL}, {"drop database ", 0, 0, NULL}, @@ -117,7 +122,11 @@ SWords shellCommands[] = { {"kill connection ;", 0, 0, NULL}, {"kill query ", 0, 0, NULL}, {"kill transaction ", 0, 0, NULL}, +#ifdef TD_ENTERPRISE {"merge vgroup ", 0, 0, NULL}, +#endif + {"pause stream ;", 0, 0, NULL}, + {"resume stream ;", 0, 0, NULL}, {"reset query cache;", 0, 0, NULL}, {"restore dnode ;", 0, 0, NULL}, {"restore vnode on dnode ;", 0, 0, NULL}, @@ -173,7 +182,9 @@ SWords shellCommands[] = { {"show vgroups;", 0, 0, NULL}, {"show consumers;", 0, 0, NULL}, {"show grants;", 0, 0, NULL}, +#ifdef TD_ENTERPRISE {"split vgroup ", 0, 0, NULL}, +#endif {"insert into values(", 0, 0, NULL}, {"insert into using tags(", 0, 0, NULL}, {"insert into using values(", 0, 0, NULL}, @@ -432,9 +443,10 @@ void showHelp() { kill connection ; \n\ kill query ; \n\ kill transaction ;\n\ - ----- M ----- \n\ - merge vgroup ...\n\ + ----- P ----- \n\ + pause stream ;\n\ ----- R ----- \n\ + resume stream ;\n\ reset query cache;\n\ restore dnode ;\n\ restore vnode on dnode ;\n\ @@ -489,14 +501,20 @@ void showHelp() { show vgroups;\n\ show consumers;\n\ show grants;\n\ - split vgroup ...\n\ ----- T ----- \n\ trim database ;\n\ ----- U ----- \n\ use ;"); - printf("\n\n"); +#ifdef TD_ENTERPRISE + printf( + "\n\n\ + ----- special commands on enterpise version ----- \n\ + compact database ; \n\ + split vgroup ;"); +#endif + printf("\n\n"); // define in getDuration() function printf( "\ diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 7b30052659d2ec8b6d7c400f2d587c259375c472..865d4680a333386f34333a5cae87c76d6071f6ff 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -326,6 +326,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i char quotationStr[2]; quotationStr[0] = '\"'; quotationStr[1] = 0; + int32_t width; int n; char buf[TSDB_MAX_BYTES_PER_ROW]; @@ -358,20 +359,27 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: + width = SHELL_FLOAT_WIDTH; if (tsEnableScience) { - taosFprintfFile(pFile, "%e", GET_FLOAT_VAL(val)); + taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val)); } else { - taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val)); + if (n > SHELL_FLOAT_WIDTH) { + taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val)); + } else { + taosFprintfFile(pFile, "%s", buf); + } } break; case TSDB_DATA_TYPE_DOUBLE: + width = SHELL_DOUBLE_WIDTH; if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9e", 23, GET_DOUBLE_VAL(val)); - taosFprintfFile(pFile, "%s", buf); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%.9e", GET_DOUBLE_VAL(val)); + taosFprintfFile(pFile, "%*s", width, buf); } else { - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); - if (n > TMAX(25, length)) { - taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); + if (n > SHELL_DOUBLE_WIDTH) { + taosFprintfFile(pFile, "%*.15e", width, GET_DOUBLE_VAL(val)); } else { taosFprintfFile(pFile, "%s", buf); } @@ -607,7 +615,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t printf("%*e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val)); - if (n > TMAX(20, width)) { + if (n > SHELL_FLOAT_WIDTH) { printf("%*e", width, GET_FLOAT_VAL(val)); } else { printf("%s", buf); @@ -620,7 +628,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t printf("%*s", width, buf); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); - if (n > TMAX(25, width)) { + if (n > SHELL_DOUBLE_WIDTH) { printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { printf("%s", buf); @@ -757,10 +765,10 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { return TMAX(21, width); // '-9223372036854775807' case TSDB_DATA_TYPE_FLOAT: - return TMAX(20, width); + return TMAX(SHELL_FLOAT_WIDTH, width); case TSDB_DATA_TYPE_DOUBLE: - return TMAX(25, width); + return TMAX(SHELL_DOUBLE_WIDTH, width); case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_GEOMETRY: diff --git a/utils/test/c/write_raw_block_test.c b/utils/test/c/write_raw_block_test.c index 8e5dd6275217b194fa4154fdd1235fdb27e397f1..ee2594af7a89a5710336177debcaff62d6936a59 100644 --- a/utils/test/c/write_raw_block_test.c +++ b/utils/test/c/write_raw_block_test.c @@ -52,7 +52,7 @@ int buildStable(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query(pConn, "create table d2 using meters tags(3, 'San Francisco')"); if (taos_errno(pRes) != 0) { - printf("failed to create child table d1, reason:%s\n", taos_errstr(pRes)); + printf("failed to create child table d2, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5380c4e116e96753fa78f486d918566..81737809d900a8931be71b4b7c605c9f18627d32 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/src/simExe.c b/utils/tsim/src/simExe.c index c1f0f502f3176f9fd7c147fbbc6bc14e3638e09f..9a0a156717026c1fcc8c4da265199bd2c5aa4e54 100644 --- a/utils/tsim/src/simExe.c +++ b/utils/tsim/src/simExe.c @@ -434,7 +434,7 @@ bool simExecuteSystemCmd(SScript *script, char *option) { simLogSql(buf, true); int32_t code = system(buf); int32_t repeatTimes = 0; - while (code < 0) { + while (code != 0) { simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", script->fileName, buf, code, errno, strerror(errno), repeatTimes); taosMsleep(1000);