提交 c31b1034 编写于 作者: H Hongze Cheng

Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/tsdb_optimize

......@@ -131,3 +131,4 @@ tools/BUGS
tools/taos-tools
tools/taosws-rs
tags
.clangd
......@@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
### Ubuntu 18.04 及以上版本 & Debian:
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### 为 taos-tools 安装编译需要的软件
......@@ -352,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
# 加入技术交流群
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
......@@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
### Ubuntu 18.04 and above or Debian
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### Install build dependencies for taosTools
......
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory
......@@ -117,17 +117,11 @@ ELSE ()
IF (${BUILD_SANITIZER})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSE ()
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
IF (${TD_DARWIN})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
# disable all assert
......
......@@ -162,6 +162,14 @@ ELSE ()
ENDIF ()
ENDIF ()
IF(APPLE)
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
set(CMAKE_HAVE_THREADS_LIBRARY 1)
set(CMAKE_USE_WIN32_THREADS_INIT 0)
set(CMAKE_USE_PTHREADS 1)
set(THREADS_PREFER_PTHREAD_FLAG ON)
ENDIF()
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
......
......@@ -8,4 +8,4 @@ ExternalProject_Add(rocksdb
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
\ No newline at end of file
)
......@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG ffc2e6f
GIT_TAG 4378702
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -223,31 +223,53 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
endif(${TD_LINUX})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
if(${TD_WINDOWS})
option(WITH_JNI "" ON)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
option(FAIL_ON_WARNINGS "" OFF)
#option(WITH_JEMALLOC "" ON)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
IF (${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
IF (${TD_DARWIN})
target_compile_options(
rocksdb
PRIVATE -Wno-unused-private-field
)
endif(${TD_DARWIN})
endif(${BUILD_WITH_ROCKSDB})
# lucene
......
#include <assert.h>
#include <bits/stdint-uintn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
......@@ -9,40 +10,307 @@
const char DBPath[] = "rocksdb_c_simple_example";
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
static const int32_t endian_test_var = 1;
#define IS_LITTLE_ENDIAN() (*(uint8_t *)(&endian_test_var) != 0)
#define TD_RT_ENDIAN() (IS_LITTLE_ENDIAN() ? TD_LITTLE_ENDIAN : TD_BIG_ENDIAN)
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
static void *taosDecodeFixedU64(const void *buf, uint64_t *value) {
if (IS_LITTLE_ENDIAN()) {
memcpy(value, buf, sizeof(*value));
} else {
((uint8_t *)value)[7] = ((uint8_t *)buf)[0];
((uint8_t *)value)[6] = ((uint8_t *)buf)[1];
((uint8_t *)value)[5] = ((uint8_t *)buf)[2];
((uint8_t *)value)[4] = ((uint8_t *)buf)[3];
((uint8_t *)value)[3] = ((uint8_t *)buf)[4];
((uint8_t *)value)[2] = ((uint8_t *)buf)[5];
((uint8_t *)value)[1] = ((uint8_t *)buf)[6];
((uint8_t *)value)[0] = ((uint8_t *)buf)[7];
}
return POINTER_SHIFT(buf, sizeof(*value));
}
// ---- Fixed U64
static int32_t taosEncodeFixedU64(void **buf, uint64_t value) {
if (buf != NULL) {
if (IS_LITTLE_ENDIAN()) {
memcpy(*buf, &value, sizeof(value));
} else {
((uint8_t *)(*buf))[0] = value & 0xff;
((uint8_t *)(*buf))[1] = (value >> 8) & 0xff;
((uint8_t *)(*buf))[2] = (value >> 16) & 0xff;
((uint8_t *)(*buf))[3] = (value >> 24) & 0xff;
((uint8_t *)(*buf))[4] = (value >> 32) & 0xff;
((uint8_t *)(*buf))[5] = (value >> 40) & 0xff;
((uint8_t *)(*buf))[6] = (value >> 48) & 0xff;
((uint8_t *)(*buf))[7] = (value >> 56) & 0xff;
}
*buf = POINTER_SHIFT(*buf, sizeof(value));
}
return (int32_t)sizeof(value);
}
typedef struct KV {
uint64_t k1;
uint64_t k2;
} KV;
int kvSerial(KV *kv, char *buf) {
int len = 0;
len += taosEncodeFixedU64((void **)&buf, kv->k1);
len += taosEncodeFixedU64((void **)&buf, kv->k2);
return len;
}
const char *kvDBName(void *name) { return "kvDBname"; }
int kvDBComp(void *state, const char *aBuf, size_t aLen, const char *bBuf, size_t bLen) {
KV w1, w2;
memset(&w1, 0, sizeof(w1));
memset(&w2, 0, sizeof(w2));
char *p1 = (char *)aBuf;
char *p2 = (char *)bBuf;
// p1 += 1;
// p2 += 1;
p1 = taosDecodeFixedU64(p1, &w1.k1);
p2 = taosDecodeFixedU64(p2, &w2.k1);
p1 = taosDecodeFixedU64(p1, &w1.k2);
p2 = taosDecodeFixedU64(p2, &w2.k2);
if (w1.k1 < w2.k1) {
return -1;
} else if (w1.k1 > w2.k1) {
return 1;
}
if (w1.k2 < w2.k2) {
return -1;
} else if (w1.k2 > w2.k2) {
return 1;
}
return 0;
}
int kvDeserial(KV *kv, char *buf) {
char *p1 = (char *)buf;
// p1 += 1;
p1 = taosDecodeFixedU64(p1, &kv->k1);
p1 = taosDecodeFixedU64(p1, &kv->k2);
return 0;
}
int main(int argc, char const *argv[]) {
rocksdb_t * db;
rocksdb_t *db;
rocksdb_backup_engine_t *be;
rocksdb_options_t * options = rocksdb_options_create();
rocksdb_options_set_create_if_missing(options, 1);
// open DB
char *err = NULL;
db = rocksdb_open(options, DBPath, &err);
char *err = NULL;
const char *path = "/tmp/db";
// Write
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
rocksdb_options_t *opt = rocksdb_options_create();
rocksdb_options_set_create_if_missing(opt, 1);
rocksdb_options_set_create_missing_column_families(opt, 1);
// Read
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
char buf[256] = {0};
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
int len = 1;
char buf[256] = {0};
size_t vallen = 0;
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
snprintf(buf, vallen+5, "val:%s", val);
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
snprintf(buf, vallen + 5, "val:%s", val);
printf("%ld %ld %s\n", strlen(val), vallen, buf);
// Update
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
char **cfName = calloc(len, sizeof(char *));
for (int i = 0; i < len; i++) {
cfName[i] = "test";
}
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
for (int i = 0; i < len; i++) {
cfOpt[i] = rocksdb_options_create_copy(opt);
if (i != 0) {
rocksdb_comparator_t *comp = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
rocksdb_options_set_comparator((rocksdb_options_t *)cfOpt[i], comp);
}
}
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
{
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
size_t vlen = 0;
// Delete
rocksdb_delete(db, writeoptions, "key", 3, &err);
char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
printf("Get value %s, and len = %d\n", v, (int)vlen);
}
// Read again
val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
rocksdb_writebatch_put_cf(wBatch, cfHandle[0], "key", strlen("key"), "value", strlen("value"));
rocksdb_write(db, wOpt, wBatch, &err);
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
size_t vlen = 0;
{
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
for (int i = 0; i < 100; i++) {
char buf[128] = {0};
KV kv = {.k1 = (100 - i) % 26, .k2 = i % 26};
kvSerial(&kv, buf);
rocksdb_writebatch_put_cf(wBatch, cfHandle[1], buf, sizeof(kv), "value", strlen("value"));
}
rocksdb_write(db, wOpt, wBatch, &err);
}
{
{
char buf[128] = {0};
KV kv = {.k1 = 0, .k2 = 0};
kvSerial(&kv, buf);
char *v = rocksdb_get_cf(db, rOpt, cfHandle[1], buf, sizeof(kv), &vlen, &err);
printf("Get value %s, and len = %d, xxxx\n", v, (int)vlen);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek_to_first(iter);
int i = 0;
while (rocksdb_iter_valid(iter)) {
size_t klen, vlen;
const char *key = rocksdb_iter_key(iter, &klen);
const char *value = rocksdb_iter_value(iter, &vlen);
KV kv;
kvDeserial(&kv, (char *)key);
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
i++;
rocksdb_iter_next(iter);
}
rocksdb_iter_destroy(iter);
}
{
char buf[128] = {0};
KV kv = {.k1 = 0, .k2 = 0};
int len = kvSerial(&kv, buf);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("invalid iter");
}
{
char buf[128] = {0};
KV kv = {.k1 = 100, .k2 = 0};
int len = kvSerial(&kv, buf);
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
rocksdb_iter_seek(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("invalid iter\n");
rocksdb_iter_seek_for_prev(iter, buf, len);
if (!rocksdb_iter_valid(iter)) {
printf("stay invalid iter\n");
} else {
size_t klen = 0, vlen = 0;
const char *key = rocksdb_iter_key(iter, &klen);
const char *value = rocksdb_iter_value(iter, &vlen);
KV kv;
kvDeserial(&kv, (char *)key);
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
}
}
}
}
}
// char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
// printf("Get value %s, and len = %d\n", v, (int)vlen);
rocksdb_column_family_handle_destroy(cfHandle[0]);
rocksdb_column_family_handle_destroy(cfHandle[1]);
rocksdb_close(db);
// {
// // rocksdb_options_t *Options = rocksdb_options_create();
// db = rocksdb_open(comm, path, &err);
// if (db != NULL) {
// rocksdb_options_t *cfo = rocksdb_options_create_copy(comm);
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
// rocksdb_options_set_comparator(cfo, cmp1);
// rocksdb_column_family_handle_t *handle = rocksdb_create_column_family(db, cfo, "cf1", &err);
// rocksdb_column_family_handle_destroy(handle);
// rocksdb_close(db);
// db = NULL;
// }
// }
// int ncf = 2;
// rocksdb_column_family_handle_t **pHandle = malloc(ncf * sizeof(rocksdb_column_family_handle_t *));
// {
// rocksdb_options_t *options = rocksdb_options_create_copy(comm);
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
// rocksdb_options_t *dbOpts1 = rocksdb_options_create_copy(comm);
// rocksdb_options_t *dbOpts2 = rocksdb_options_create_copy(comm);
// rocksdb_options_set_comparator(dbOpts2, cmp1);
// // rocksdb_column_family_handle_t *cf = rocksdb_create_column_family(db, dbOpts1, "cmp1", &err);
// const char *pName[] = {"default", "cf1"};
// const rocksdb_options_t **pOpts = malloc(ncf * sizeof(rocksdb_options_t *));
// pOpts[0] = dbOpts1;
// pOpts[1] = dbOpts2;
// rocksdb_options_t *allOptions = rocksdb_options_create_copy(comm);
// db = rocksdb_open_column_families(allOptions, "test", ncf, pName, pOpts, pHandle, &err);
// }
// // rocksdb_options_t *options = rocksdb_options_create();
// // rocksdb_options_set_create_if_missing(options, 1);
// // //rocksdb_open_column_families(const rocksdb_options_t *options, const char *name, int num_column_families,
// // const char *const *column_family_names,
// // const rocksdb_options_t *const *column_family_options,
// // rocksdb_column_family_handle_t **column_family_handles, char **errptr);
// for (int i = 0; i < 100; i++) {
// char buf[128] = {0};
// rocksdb_writeoptions_t *wopt = rocksdb_writeoptions_create();
// KV kv = {.k1 = i, .k2 = i};
// kvSerial(&kv, buf);
// rocksdb_put_cf(db, wopt, pHandle[0], buf, strlen(buf), (const char *)&i, sizeof(i), &err);
// }
// rocksdb_close(db);
// Write
// rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
// rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
//// Read
// rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
// size_t vallen = 0;
// char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
// printf("val:%s\n", val);
//// Update
//// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
//// Delete
// rocksdb_delete(db, writeoptions, "key", 3, &err);
//// Read again
// val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
// printf("val:%s\n", val);
// rocksdb_close(db);
return 0;
}
......@@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
slug: /
---
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. Its written mainly for architects, developers, and system administrators.
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
......
......@@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengines core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
......@@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
| Very large total processing capacity | | | √ | TDengines cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengines storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
......
......@@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and wont build the index on any metrics stored. Column wise storage is used.
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
......
......@@ -102,7 +102,7 @@ sudo apt-get install tdengine
:::tip
This installation method is supported only for Debian and Ubuntu.
::::
:::
</TabItem>
<TabItem label="Windows" value="windows">
......
......@@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
More configuration about connectionplease refer to [Java Connector](/reference/connector/java)
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
```php title="原生连接"
```php title=""native"
{{#include docs/examples/php/connect.php}}
```
......@@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
Key differences
Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
......@@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
<TabItem label="R" value="r">
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
2. Install the dependency package `RJDBC`
2. Install the dependency package `RJDBC`:
```R
install.packages("RJDBC")
......@@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
</TabItem>
<TabItem label="PHP" value="php">
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
......@@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine Location**
**Specify TDengine Location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
......@@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by the TDengine installation location.
> This way is useful in case TDengine location can't be found automatically or macOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install
......
......@@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
## Query Examples
If you want query the data of `location=California.LosAngeles,groupid=2`here is the query SQL:
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
```sql
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
......
......@@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
## Query Examples
If you want query the data of `location=California.LosAngeles groupid=3`here is the query SQL:
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
......
......@@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}here is the query SQL:
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
......
......@@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
## Sample Programs
......@@ -98,7 +98,7 @@ The main Program is responsible for:
3. Start reading threads
4. Output writing speed every 10 seconds
The main program provides 4 parameters for tuning
The main program provides 4 parameters for tuning:
1. The number of reading threads, default value is 1
2. The number of writing threads, default value is 2
......@@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
If you want to launch the sample program on a remote server, please follow below steps:
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
```
mvn package
```
......@@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
pip3 install faster-fifo
```
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py``sql_writer.py` and `mockdatasource.py`.
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
4. Execute the program
......
### python Kafka 客户端
### python Kafka client
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
......@@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
<details>
<summary>kafka_example_consumer</summary>
`kafka_example_consumer` is `consumer`which is responsible for consuming data from kafka and writing it to TDengine.
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
```py
{{#include docs/examples/python/kafka_example_consumer.py}}
......
......@@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
## Introduction
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns>, <, =, <\>, like
- Filter on tags or data columns: >, <, =, <\>, like
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
- Arithmetic on columns of numeric types or aggregate results
......@@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
:::note
1. With either REST connection or native connection, the above sample code works well.
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
:::
......
......@@ -252,9 +252,9 @@ create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId
```
Create the UDF:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use the UDF in the query
Use the UDF in the query:
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
......@@ -271,9 +271,9 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
## Implement a UDF in Python
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF
- implement `start`, `reduce`, `finish` for the aggregate UDF
- implement `init` for initialization and `destroy` for termination
- implement `process` function for the scalar UDF.
- implement `start`, `reduce`, `finish` for the aggregate UDF.
- implement `init` for initialization and `destroy` for termination.
### Implement a Scalar UDF in Python
......
......@@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
- Internal function `NOW` can be used to get the current timestamp on the client side.
- The current timestamp of the client side is applied when `NOW` is used to insert data.
- Epoch Timetimestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
......
......@@ -72,8 +72,8 @@ database_option: {
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIX The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIXThe suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
......
......@@ -33,7 +33,7 @@ column_definition:
SHOW STABLES [LIKE tb_name_wildcard];
```
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
The preceding SQL statement shows all supertables in the current TDengine database.
### View the CREATE Statement for a Supertable
......
......@@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
```sql
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31;
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
## Automatically Create Table When Inserting
......
......@@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
For sub-table and super table
For sub-table and super table:
```sql
SELECT *
......
......@@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
## Introduction
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
1. The syntax of creating an index
```sql
CREATE INDEX index_name ON tbl_name (tagColName
CREATE INDEX index_name ON tbl_name (tagColName)
```
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
......
......@@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
**More explanations**:
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm] For example, TO_ISO8601(1, "+00:00").
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
......@@ -626,7 +626,7 @@ algo_type: {
**Applicable table types**: standard tables and supertables
**Explanations**
**Explanations**:
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
......@@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit])
```
**Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL;
......@@ -680,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
**Applicable tables**: table, STable, outer in nested query
**Explanations**
**Explanations**:
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
......@@ -758,7 +758,7 @@ SUM(expr)
HYPERLOGLOG(expr)
```
**Description**
**Description**:
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
......@@ -772,10 +772,10 @@ HYPERLOGLOG(expr)
### HISTOGRAM
```sql
HISTOGRAM(exprbin_type, bin_description, normalized)
HISTOGRAM(expr, bin_type, bin_description, normalized)
```
**Description**Returns count of data points in user-specified ranges.
**Description**: Returns count of data points in user-specified ranges.
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
......@@ -783,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
**Applicable table types**: table, STable
**Explanations**
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"
- bin_description: parameter to describe how to generate bucketscan be in the following JSON formats for each bin_type respectively:
**Explanations**:
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
- "user_input": "[1, 3, 5, 7]":
User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
......@@ -1107,7 +1107,7 @@ ignore_negative: {
**More explanation**:
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF
......@@ -1131,7 +1131,7 @@ ignore_negative: {
**More explanation**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- It can be used together with a selected column. For example: select \_rowts, DIFF() from
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
### IRATE
......@@ -1183,7 +1183,7 @@ STATECOUNT(expr, oper, val)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
**Return value type**: Integer
......@@ -1210,7 +1210,7 @@ STATEDURATION(expr, oper, val, unit)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
**Return value type**: Integer
......
......@@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
1. NONE: No fill (the default fill mode)
2. VALUEFill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREVFill with the previous non-NULL value, `FILL(PREV)`
4. NULLFill with NULL, `FILL(NULL)`
5. LINEARFill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXTFill with the next non-NULL value, `FILL(NEXT)`
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
4. NULL: Fill with NULL, `FILL(NULL)`
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
1. NULL_F: Fill `NULL` by force
2. VALUE_F: Fill `VALUE` by force
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
......@@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
There are two kinds of time windows: sliding window and flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
![TDengine Database Time Window](./timewindow-1.webp)
......@@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
### State Window
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
![TDengine Database Status Window](./timewindow-3.webp)
......@@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
### Session Window
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
![TDengine Database Session Window](./timewindow-2.webp)
......@@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
### Examples
A table of intelligent meters can be created by the SQL statement below
A table of intelligent meters can be created by the SQL statement below:
```
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
......
......@@ -112,7 +112,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggeringthe default value is AT_ONCE:
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
1. AT_ONCE: triggers on write
......
......@@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
- JSON format
- JSON format:
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
......
......@@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
1. If there are escape characters in identifiers (database name, table name, column name)
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
- Identifier quoted with `` Original content is kept, no escaping
- Identifier quoted with ``: Original content is kept, no escaping
2. If there are escape characters in values
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
......@@ -184,7 +184,7 @@ Provides information about standard tables and subtables.
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
......
......@@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
description: This document describes how to use the SHOW statement in TDengine.
---
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS
......@@ -187,45 +187,45 @@ SHOW TABLE DISTRIBUTED table_name;
Shows how table data is distributed.
Examples Below is an example of this command to display the block distribution of table `d0` in detailed format.
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
```sql
show table distributed d0\G;
show table distributed d0\G;
```
<details>
<summary> Show Example </summary>
<pre><code>
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
Total_Blocks : Table `d0` contains total 5 blocks
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Average_size: The average size of each block is 18.73 KB
Compression_Ratio: The data compression rate is 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: Table `d0` contains 20,000 rows
Inmem_Rows The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
MinRows: The minimum number of rows in a block is 3,616
MinRows: The minimum number of rows in a block is 3,616
MaxRows The maximum number of rows in a block is 4,096B
MaxRows: The maximum number of rows in a block is 4,096B
Average_Rows The average number of rows in a block is 4,000
Average_Rows: The average number of rows in a block is 4,000
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: The number of child tables, 1 in this example
Total_Tables: The number of child tables, 1 in this example
Total_Files The number of files storing the table's data, 2 in this example
Total_Files: The number of files storing the table's data, 2 in this example
*************************** 4.row ***************************
......@@ -361,7 +361,7 @@ SHOW VARIABLES;
SHOW DNODE dnode_id VARIABLES;
```
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
## SHOW VGROUPS
......@@ -369,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
SHOW [db_name.]VGROUPS;
```
Shows information about all vgroups in the current database.
Shows information about all vgroups in the current database.
## SHOW VNODES
......
......@@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
- Information that you input is given in lowercase.
- \[ \] means optional input, excluding [] itself.
- | means one of a few options, excluding | itself.
- means the item prior to it can be repeated multiple times.
- ... means the item prior to it can be repeated multiple times.
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
......
......@@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
chmod +x TDinsight.sh
```
Prepare
Prepare:
1. TDengine Server
- The URL of REST servicefor example `http://localhost:6041` if TDengine is deployed locally
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
- User name and password
2. Grafana Alert Notification
......
......@@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
Diagnostics for network connections can be executed between Linux/Windows/macOS.
Diagnostic steps
Diagnostic steps:
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
-l <pktlen\> The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
......
......@@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
- authentication information is shown below
- authentication information is shown below:
```text
Authorization: Taosd <TOKEN>
```
- Basic authentication information is shown below
- Basic authentication information is shown below:
```text
Authorization: Basic <TOKEN>
......
......@@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
After TDengine server or client installation, `taos.h` is located at
- Linux:`/usr/local/taos/include`
- Windows:`C:\TDengine\include`
- macOS:`/usr/local/include`
- Linux: usr/local/taos/include`
- Windows: C:\TDengine\include`
- macOS: usr/local/include`
The dynamic libraries for the TDengine client driver are located in.
......@@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
schemaless 其他相关的接口
schemaless interfaces:
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
......
......@@ -90,46 +90,35 @@ For specific error codes, please refer to.
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
| 0x2309 | invalid sql for executeQuery: (?) | - |
| 0x230a | Database not specified or available | - |
| 0x230b | invalid sql for executeUpdate: (?) | - |
| 0x230c | invalid sql for execute: (?) | - |
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
| 0x2311 | can't register JDBC-RESTful driver | - |
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
| 0x2313 | invalid sql | - |
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
| 0x2316 | unknown timestamp precision | - |
| 0x2317 | | wrong request type was used in the REST connection. |
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
| 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection |
| 0x231b | invalid json format | - |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2351 | failed to create subscription | - |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
| 0x2358 | fetch to the end of resultSet | - |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
| 0x2374 | consumer config error | - |
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
| - | can't create connection with server within: | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
| - | failed to complete the task within the specified time : | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
......@@ -981,8 +970,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- group.id: consumer: Specifies the group that the consumer is in.
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
- httpConnectTimeoutWebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeoutsocket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
#### Subscribe to consume data
......@@ -1278,9 +1267,9 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
**Cause**taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Solution** Use taos-jdbcdriver 3.0.2.
**Solution**: Use taos-jdbcdriver 3.0.2.
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
......
......@@ -121,7 +121,7 @@ The parameters are described as follows:
- **username/password**: Username and password used to create connections.
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
- **database**: Specify the default database to connect to. It's optional.
- **params**Optional parameters.
- **params**: Optional parameters.
A sample DSN description string is as follows:
......
......@@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
- `url` The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `user`: TDengine user name. The default is `root`.
- `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
......
......@@ -321,18 +321,18 @@ let cursor = conn.cursor();
| package name | version | TDengine version | Description |
|------------------|---------|---------------------|------------------------------------------------------------------|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
### REST Connector
| package name | version | TDengine version | Description |
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
## API Reference
......
......@@ -165,7 +165,7 @@ The parameters are described as follows:
* **username/password**: Username and password used to create connections.
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
* **database**: Specify the default database to connect to. It's optional.
* **params**Optional parameters.
* **params**: Optional parameters.
A sample DSN description string is as follows:
......@@ -279,7 +279,7 @@ ws://localhost:6041/test
| TDengine.Connector | Description |
|--------------------|--------------------------------|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
| 3.0.1 | Support WebSocket and CloudWith function query, insert, and parameter binding|
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
......
......@@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
PHP Connector relies on TDengine client driver.
Project Repository<https://github.com/Yurunsoft/php-tdengine>
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
After TDengine client or server is installed, `taos.h` is located at:
- Linux`/usr/local/taos/include`
- Windows`C:\TDengine\include`
- macOS`/usr/local/include`
- Linux: `/usr/local/taos/include`
- Windows: `C:\TDengine\include`
- macOS: `/usr/local/include`
TDengine client driver is located at:
- Linux: `/usr/local/taos/driver/libtaos.so`
- Windows: `C:\TDengine\taos.dll`
- macOS`/usr/local/lib/libtaos.dylib`
- macOS: `/usr/local/lib/libtaos.dylib`
## Supported Platforms
- Windows、Linux、MacOS
- Windows, Linux, and macOS
- PHP >= 7.4
......@@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
### Install php-tdengine
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
......@@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine location**
**Specify TDengine location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
......@@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by TDengine location.
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install
......
......@@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
 
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
......
......@@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp)
![TDengine Database TDinsight select database](./assets/select_dashboard_db.webp)
## TDinsight dashboard details
......
......@@ -151,7 +151,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| -------- | -------------------------------------------- |
| Applicable | Server Only |
| Meaning |Switch for allowing TDengine to collect and report crash related information |
| Value Range | 0,1 0: Not allowed;1:allowed |
| Value Range | 0,1 0: Not allowed; 1: allowed |
| Default Value | 1 |
......@@ -183,7 +183,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| -------- | -------------------------------- |
| Applicable | Server only |
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
| Vlue Range | 0:Return empty line,1:Return 0 |
| Vlue Range | 0: Return empty line, 1: Return 0 |
| Default | 1 |
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
......@@ -661,7 +661,7 @@ The charset that takes effect is UTF-8.
## 3.0 Parameters
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
| 1 | firstEp | Yes | Yes | |
| 2 | secondEp | Yes | Yes | |
......
......@@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
:::note
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
:::
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp)
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
The example to query the average system memory usage for the specified interval on each server as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard3.webp)
......@@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
......
......@@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
### Edit SQL fields
Copy SQL bellow and paste it to the SQL edit area
Copy SQL bellow and paste it to the SQL edit area:
```sql
SELECT
......@@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
### Edit "action"
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
```
Basic cm9vdDp0YW9zZGF0YQ==
```
......
......@@ -46,15 +46,14 @@ Execute in any directory:
````
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
tar xzf confluent-7.1.1.tar.gz -C /opt/test
tar xzf confluent-7.1.1.tar.gz -C /opt/
````
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1
PATH=$CONFLUENT_HOME/bin
export PATH
export PATH=$CONFLUENT_HOME/bin:$PATH
```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
......@@ -329,7 +328,15 @@ DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
```
Use TDengine CLI to execute SQL script
......@@ -384,7 +391,7 @@ confluent local services connect connector status
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
````
confluent local services connect connector unload TDengineSourceConnector
confluent local services connect connector unload TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector
````
......
......@@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
![02](gds/gds-02.png.webp)
......@@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
![06](gds/gds-06.png.webp)
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data some examples are shown below.
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
![07](gds/gds-07.png.webp)
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
......@@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
**Computation node (qnode)** A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Stream Processing node (snode)** A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
......@@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if its not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
### A Typical Data Writing Process
......@@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
......@@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
<center> Figure 3: TDengine Leader writing process </center>
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Leader vnode Writes the data into memory and add the record to “skip list”;
4. Leader vnode Writes the data into memory and add the record to "skip list";
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
......@@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
3. Write into memory and add the record to “skip list”.
3. Write into memory and add the record to "skip list".
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
......@@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. Its necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
### Synchronous Replication
......@@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
### Tiered Storage
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
dataDir format is as follows:
......@@ -202,7 +202,7 @@ dataDir data_path [tier_level]
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos
......
......@@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
### 2. Manual data migration
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statementwritten to the database.
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
Manual migration of data requires attention to the following two issues:
......@@ -258,7 +258,7 @@ Equivalent function: apercentile
Example:
```sql
Select apercentile(col1, 50, t-digest) from table_name
select apercentile(col1, 50, "t-digest") from table_name
```
Remark:
......
......@@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
:::note
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
:::
......
......@@ -90,46 +90,35 @@ JDBC 连接器可能报错的错误码包括 4 种:
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
| 0x2309 | invalid sql for executeQuery: (?) | - |
| 0x230a | Database not specified or available | - |
| 0x230b | invalid sql for executeUpdate: (?) | - |
| 0x230c | invalid sql for execute: (?) | - |
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
| 0x2311 | can't register JDBC-RESTful driver | - |
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
| 0x2313 | invalid sql | - |
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
| 0x2316 | unknown timestamp precision | - |
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
| 0x231a | password is required | 创建连接时缺少密码信息 |
| 0x231b | invalid json format | - |
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
| 0x2350 | unknown error | 未知异常,请在 github 返回给开发人员。 |
| 0x2351 | failed to create subscription | - |
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
| 0x2358 | fetch to the end of resultSet | - |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
| 0x2374 | consumer config error | - |
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
| - | can't create connection with server within: | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| - | failed to complete the task within the specified time : | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
......
......@@ -197,7 +197,7 @@ SHOW TABLE DISTRIBUTED table_name;
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
Total_Blocks: 表 d0 占用的 block 个数为 5 个
......
......@@ -200,6 +200,12 @@ docker run -d \
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
:::note
由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。Grafana 插件中 SQL 语句中可以使用 <db_name>.<table_name> 来指定数据库。
:::
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp)
......
......@@ -48,15 +48,14 @@ Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍
```
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
tar xzf confluent-7.1.1.tar.gz -C /opt/test
tar xzf confluent-7.1.1.tar.gz -C /opt/
```
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1
PATH=$CONFLUENT_HOME/bin
export PATH
export PATH=$CONFLUENT_HOME/bin:$PATH
```
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
......@@ -333,7 +332,15 @@ DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
```
使用 TDengine CLI, 执行 SQL 文件。
......@@ -388,7 +395,7 @@ confluent local services connect connector status
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
```
confluent local services connect connector unload TDengineSourceConnector
confluent local services connect connector unload TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector
```
......
......@@ -77,7 +77,7 @@ description: 一些常见问题的解决方法汇总
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)
11. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](../../operation/diagnose/)
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
......
......@@ -10,7 +10,7 @@
<description>Demo project for TDengine</description>
<properties>
<spring.version>5.3.26</spring.version>
<spring.version>5.3.27</spring.version>
</properties>
<dependencies>
......
......@@ -167,7 +167,7 @@ DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name
DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields);
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
DLL_EXPORT void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields);
DLL_EXPORT void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields);
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
......@@ -230,6 +230,9 @@ DLL_EXPORT int taos_get_tables_vgId(TAOS *taos, const char *db, const char *tabl
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
DLL_EXPORT void taos_set_hb_quit(int8_t quitByKill);
DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type);
/* --------------------------schemaless INTERFACE------------------------------- */
......@@ -270,10 +273,10 @@ DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
......@@ -283,8 +286,9 @@ DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char* pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char* pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
int32_t *numOfAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
......
......@@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_SYSTABLE_H
#define TDENGINE_SYSTABLE_H
......
......@@ -213,14 +213,6 @@ enum {
FETCH_TYPE__NONE,
};
typedef struct {
int8_t fetchType;
union {
SSDataBlock data;
void* meta;
};
} SFetchRet;
typedef struct SVarColAttr {
int32_t* offset; // start position for each entry in the list
uint32_t length; // used buffer size that contain the valid data
......@@ -342,6 +334,8 @@ typedef struct {
float f;
};
size_t length;
bool keyEscaped;
bool valueEscaped;
} SSmlKv;
#define QUERY_ASC_FORWARD_STEP 1
......@@ -380,6 +374,8 @@ typedef struct STUidTagInfo {
#define UD_GROUPID_COLUMN_INDEX 1
#define UD_TAG_COLUMN_INDEX 2
int32_t taosGenCrashJsonMsg(int signum, char **pMsg, int64_t clusterId, int64_t startTime);
#ifdef __cplusplus
}
#endif
......
......@@ -176,7 +176,9 @@ extern int32_t tsUptimeInterval;
extern int32_t tsRpcRetryLimit;
extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
......
......@@ -416,7 +416,7 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
return pSW;
}
static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
static FORCE_INLINE void tDeleteSchemaWrapper(SSchemaWrapper* pSchemaWrapper) {
if (pSchemaWrapper) {
taosMemoryFree(pSchemaWrapper->pSchema);
taosMemoryFree(pSchemaWrapper);
......@@ -691,6 +691,7 @@ typedef struct {
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
int32_t tDeserializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
void tFreeSAlterUserReq(SAlterUserReq* pReq);
typedef struct {
char user[TSDB_USER_LEN];
......@@ -1928,7 +1929,7 @@ typedef struct {
#define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
#define STREAM_DEFAULT_IGNORE_UPDATE 0
#define STREAM_DEFAULT_IGNORE_UPDATE 1
#define STREAM_CREATE_STABLE_TRUE 1
#define STREAM_CREATE_STABLE_FALSE 0
......@@ -2121,7 +2122,6 @@ static FORCE_INLINE void* tDeserializeSMVSubscribeReq(void* buf, SMVSubscribeReq
typedef struct {
char key[TSDB_SUBSCRIBE_KEY_LEN];
SArray* lostConsumers; // SArray<int64_t>
SArray* removedConsumers; // SArray<int64_t>
SArray* newConsumers; // SArray<int64_t>
} SMqRebInfo;
......@@ -2132,10 +2132,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
return NULL;
}
tstrncpy(pRebInfo->key, key, TSDB_SUBSCRIBE_KEY_LEN);
pRebInfo->lostConsumers = taosArrayInit(0, sizeof(int64_t));
if (pRebInfo->lostConsumers == NULL) {
goto _err;
}
pRebInfo->removedConsumers = taosArrayInit(0, sizeof(int64_t));
if (pRebInfo->removedConsumers == NULL) {
goto _err;
......@@ -2146,7 +2142,6 @@ static FORCE_INLINE SMqRebInfo* tNewSMqRebSubscribe(const char* key) {
}
return pRebInfo;
_err:
taosArrayDestroy(pRebInfo->lostConsumers);
taosArrayDestroy(pRebInfo->removedConsumers);
taosArrayDestroy(pRebInfo->newConsumers);
taosMemoryFreeClear(pRebInfo);
......@@ -2931,6 +2926,42 @@ typedef struct SMqVgOffset {
int32_t tEncodeMqVgOffset(SEncoder* pEncoder, const SMqVgOffset* pOffset);
int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
typedef struct {
SMsgHead head;
int32_t taskId;
} SVPauseStreamTaskReq;
typedef struct {
int8_t reserved;
} SVPauseStreamTaskRsp;
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
} SMPauseStreamReq;
int32_t tSerializeSMPauseStreamReq(void* buf, int32_t bufLen, const SMPauseStreamReq* pReq);
int32_t tDeserializeSMPauseStreamReq(void* buf, int32_t bufLen, SMPauseStreamReq* pReq);
typedef struct {
SMsgHead head;
int32_t taskId;
int8_t igUntreated;
} SVResumeStreamTaskReq;
typedef struct {
int8_t reserved;
} SVResumeStreamTaskRsp;
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
int8_t igUntreated;
} SMResumeStreamReq;
int32_t tSerializeSMResumeStreamReq(void* buf, int32_t bufLen, const SMResumeStreamReq* pReq);
int32_t tDeserializeSMResumeStreamReq(void* buf, int32_t bufLen, SMResumeStreamReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
char stb[TSDB_TABLE_FNAME_LEN];
......@@ -3452,10 +3483,10 @@ typedef struct {
char data[]; // SSubmitReq2
} SSubmitReq2Msg;
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
int32_t tEncodeSubmitReq(SEncoder* pCoder, const SSubmitReq2* pReq);
int32_t tDecodeSubmitReq(SDecoder* pCoder, SSubmitReq2* pReq);
void tDestroySubmitTbData(SSubmitTbData* pTbData, int32_t flag);
void tDestroySubmitReq(SSubmitReq2* pReq, int32_t flag);
typedef struct {
int32_t affectedRows;
......
......@@ -179,6 +179,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
......@@ -256,6 +258,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MON_MSG)
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
......
......@@ -213,140 +213,143 @@
#define TK_REPLACE 195
#define TK_STREAM 196
#define TK_INTO 197
#define TK_TRIGGER 198
#define TK_AT_ONCE 199
#define TK_WINDOW_CLOSE 200
#define TK_IGNORE 201
#define TK_EXPIRED 202
#define TK_FILL_HISTORY 203
#define TK_UPDATE 204
#define TK_SUBTABLE 205
#define TK_KILL 206
#define TK_CONNECTION 207
#define TK_TRANSACTION 208
#define TK_BALANCE 209
#define TK_VGROUP 210
#define TK_LEADER 211
#define TK_MERGE 212
#define TK_REDISTRIBUTE 213
#define TK_SPLIT 214
#define TK_DELETE 215
#define TK_INSERT 216
#define TK_NULL 217
#define TK_NK_QUESTION 218
#define TK_NK_ARROW 219
#define TK_ROWTS 220
#define TK_QSTART 221
#define TK_QEND 222
#define TK_QDURATION 223
#define TK_WSTART 224
#define TK_WEND 225
#define TK_WDURATION 226
#define TK_IROWTS 227
#define TK_ISFILLED 228
#define TK_CAST 229
#define TK_NOW 230
#define TK_TODAY 231
#define TK_TIMEZONE 232
#define TK_CLIENT_VERSION 233
#define TK_SERVER_VERSION 234
#define TK_SERVER_STATUS 235
#define TK_CURRENT_USER 236
#define TK_CASE 237
#define TK_WHEN 238
#define TK_THEN 239
#define TK_ELSE 240
#define TK_BETWEEN 241
#define TK_IS 242
#define TK_NK_LT 243
#define TK_NK_GT 244
#define TK_NK_LE 245
#define TK_NK_GE 246
#define TK_NK_NE 247
#define TK_MATCH 248
#define TK_NMATCH 249
#define TK_CONTAINS 250
#define TK_IN 251
#define TK_JOIN 252
#define TK_INNER 253
#define TK_SELECT 254
#define TK_DISTINCT 255
#define TK_WHERE 256
#define TK_PARTITION 257
#define TK_BY 258
#define TK_SESSION 259
#define TK_STATE_WINDOW 260
#define TK_EVENT_WINDOW 261
#define TK_SLIDING 262
#define TK_FILL 263
#define TK_VALUE 264
#define TK_VALUE_F 265
#define TK_NONE 266
#define TK_PREV 267
#define TK_NULL_F 268
#define TK_LINEAR 269
#define TK_NEXT 270
#define TK_HAVING 271
#define TK_RANGE 272
#define TK_EVERY 273
#define TK_ORDER 274
#define TK_SLIMIT 275
#define TK_SOFFSET 276
#define TK_LIMIT 277
#define TK_OFFSET 278
#define TK_ASC 279
#define TK_NULLS 280
#define TK_ABORT 281
#define TK_AFTER 282
#define TK_ATTACH 283
#define TK_BEFORE 284
#define TK_BEGIN 285
#define TK_BITAND 286
#define TK_BITNOT 287
#define TK_BITOR 288
#define TK_BLOCKS 289
#define TK_CHANGE 290
#define TK_COMMA 291
#define TK_CONCAT 292
#define TK_CONFLICT 293
#define TK_COPY 294
#define TK_DEFERRED 295
#define TK_DELIMITERS 296
#define TK_DETACH 297
#define TK_DIVIDE 298
#define TK_DOT 299
#define TK_EACH 300
#define TK_FAIL 301
#define TK_FILE 302
#define TK_FOR 303
#define TK_GLOB 304
#define TK_ID 305
#define TK_IMMEDIATE 306
#define TK_IMPORT 307
#define TK_INITIALLY 308
#define TK_INSTEAD 309
#define TK_ISNULL 310
#define TK_KEY 311
#define TK_MODULES 312
#define TK_NK_BITNOT 313
#define TK_NK_SEMI 314
#define TK_NOTNULL 315
#define TK_OF 316
#define TK_PLUS 317
#define TK_PRIVILEGE 318
#define TK_RAISE 319
#define TK_RESTRICT 320
#define TK_ROW 321
#define TK_SEMI 322
#define TK_STAR 323
#define TK_STATEMENT 324
#define TK_STRICT 325
#define TK_STRING 326
#define TK_TIMES 327
#define TK_VALUES 328
#define TK_VARIABLE 329
#define TK_VIEW 330
#define TK_WAL 331
#define TK_PAUSE 198
#define TK_RESUME 199
#define TK_TRIGGER 200
#define TK_AT_ONCE 201
#define TK_WINDOW_CLOSE 202
#define TK_IGNORE 203
#define TK_EXPIRED 204
#define TK_FILL_HISTORY 205
#define TK_UPDATE 206
#define TK_SUBTABLE 207
#define TK_UNTREATED 208
#define TK_KILL 209
#define TK_CONNECTION 210
#define TK_TRANSACTION 211
#define TK_BALANCE 212
#define TK_VGROUP 213
#define TK_LEADER 214
#define TK_MERGE 215
#define TK_REDISTRIBUTE 216
#define TK_SPLIT 217
#define TK_DELETE 218
#define TK_INSERT 219
#define TK_NULL 220
#define TK_NK_QUESTION 221
#define TK_NK_ARROW 222
#define TK_ROWTS 223
#define TK_QSTART 224
#define TK_QEND 225
#define TK_QDURATION 226
#define TK_WSTART 227
#define TK_WEND 228
#define TK_WDURATION 229
#define TK_IROWTS 230
#define TK_ISFILLED 231
#define TK_CAST 232
#define TK_NOW 233
#define TK_TODAY 234
#define TK_TIMEZONE 235
#define TK_CLIENT_VERSION 236
#define TK_SERVER_VERSION 237
#define TK_SERVER_STATUS 238
#define TK_CURRENT_USER 239
#define TK_CASE 240
#define TK_WHEN 241
#define TK_THEN 242
#define TK_ELSE 243
#define TK_BETWEEN 244
#define TK_IS 245
#define TK_NK_LT 246
#define TK_NK_GT 247
#define TK_NK_LE 248
#define TK_NK_GE 249
#define TK_NK_NE 250
#define TK_MATCH 251
#define TK_NMATCH 252
#define TK_CONTAINS 253
#define TK_IN 254
#define TK_JOIN 255
#define TK_INNER 256
#define TK_SELECT 257
#define TK_DISTINCT 258
#define TK_WHERE 259
#define TK_PARTITION 260
#define TK_BY 261
#define TK_SESSION 262
#define TK_STATE_WINDOW 263
#define TK_EVENT_WINDOW 264
#define TK_SLIDING 265
#define TK_FILL 266
#define TK_VALUE 267
#define TK_VALUE_F 268
#define TK_NONE 269
#define TK_PREV 270
#define TK_NULL_F 271
#define TK_LINEAR 272
#define TK_NEXT 273
#define TK_HAVING 274
#define TK_RANGE 275
#define TK_EVERY 276
#define TK_ORDER 277
#define TK_SLIMIT 278
#define TK_SOFFSET 279
#define TK_LIMIT 280
#define TK_OFFSET 281
#define TK_ASC 282
#define TK_NULLS 283
#define TK_ABORT 284
#define TK_AFTER 285
#define TK_ATTACH 286
#define TK_BEFORE 287
#define TK_BEGIN 288
#define TK_BITAND 289
#define TK_BITNOT 290
#define TK_BITOR 291
#define TK_BLOCKS 292
#define TK_CHANGE 293
#define TK_COMMA 294
#define TK_CONCAT 295
#define TK_CONFLICT 296
#define TK_COPY 297
#define TK_DEFERRED 298
#define TK_DELIMITERS 299
#define TK_DETACH 300
#define TK_DIVIDE 301
#define TK_DOT 302
#define TK_EACH 303
#define TK_FAIL 304
#define TK_FILE 305
#define TK_FOR 306
#define TK_GLOB 307
#define TK_ID 308
#define TK_IMMEDIATE 309
#define TK_IMPORT 310
#define TK_INITIALLY 311
#define TK_INSTEAD 312
#define TK_ISNULL 313
#define TK_KEY 314
#define TK_MODULES 315
#define TK_NK_BITNOT 316
#define TK_NK_SEMI 317
#define TK_NOTNULL 318
#define TK_OF 319
#define TK_PLUS 320
#define TK_PRIVILEGE 321
#define TK_RAISE 322
#define TK_RESTRICT 323
#define TK_ROW 324
#define TK_SEMI 325
#define TK_STAR 326
#define TK_STATEMENT 327
#define TK_STRICT 328
#define TK_STRING 329
#define TK_TIMES 330
#define TK_VALUES 331
#define TK_VARIABLE 332
#define TK_VIEW 333
#define TK_WAL 334
#define TK_NK_SPACE 600
......
......@@ -82,6 +82,7 @@ typedef struct SCatalogReq {
SArray* pUser; // element is SUserAuthInfo
SArray* pTableIndex; // element is SNAME
SArray* pTableCfg; // element is SNAME
SArray* pTableTag; // element is SNAME
bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode
bool svrVerRequired;
......@@ -105,6 +106,7 @@ typedef struct SMetaData {
SArray* pUser; // pRes = SUserAuthRes*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pTableCfg; // pRes = STableCfg*
SArray* pTableTag; // pRes = SArray<STagVal>*
SArray* pDnodeList; // pRes = SArray<SEpSet>*
SMetaRes* pSvrVer; // pRes = char*
} SMetaData;
......@@ -122,8 +124,8 @@ typedef struct SSTableVersion {
char stbName[TSDB_TABLE_NAME_LEN];
uint64_t dbId;
uint64_t suid;
int16_t sversion;
int16_t tversion;
int32_t sversion;
int32_t tversion;
int32_t smaVer;
} SSTableVersion;
......@@ -312,6 +314,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
int32_t catalogGetTableTag(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableCfg** pCfg);
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp* pRsp);
......
......@@ -190,9 +190,9 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
void verifyOffset(void *pWalReader, STqOffsetVal* pOffset);
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
void qStreamSetOpen(qTaskInfo_t tinfo);
......@@ -208,8 +208,6 @@ void* qExtractReaderFromStreamScanner(void* scanner);
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
......
......@@ -441,6 +441,19 @@ typedef struct SDropStreamStmt {
bool ignoreNotExists;
} SDropStreamStmt;
typedef struct SPauseStreamStmt {
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
bool ignoreNotExists;
} SPauseStreamStmt;
typedef struct SResumeStreamStmt {
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
bool ignoreNotExists;
bool ignoreUntreated;
} SResumeStreamStmt;
typedef struct SCreateFunctionStmt {
ENodeType type;
bool orReplace;
......
......@@ -214,7 +214,9 @@ typedef enum ENodeType {
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
......
......@@ -379,6 +379,8 @@ typedef struct SVnodeModifyOpStmt {
SName usingTableName;
const char* pBoundCols;
struct STableMeta* pTableMeta;
SNode* pTagCond;
SArray* pTableTag;
SHashObj* pVgroupsHashObj;
SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
SHashObj* pSubTableHashObj;
......
......@@ -116,8 +116,8 @@ typedef struct STableMeta {
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
// info
int16_t sversion;
int16_t tversion;
int32_t sversion;
int32_t tversion;
STableComInfo tableInfo;
SSchema schema[];
} STableMeta;
......
......@@ -14,7 +14,11 @@
*/
#include "tdatablock.h"
#include "rocksdb/c.h"
#include "tdbInt.h"
#include "tsimplehash.h"
#include "tstreamFileState.h"
#ifdef __cplusplus
extern "C" {
......@@ -23,10 +27,26 @@ extern "C" {
#ifndef _STREAM_STATE_H_
#define _STREAM_STATE_H_
// void* streamBackendInit(const char* path);
// void streamBackendCleanup(void* arg);
// SListNode* streamBackendAddCompare(void* backend, void* arg);
// void streamBackendDelCompare(void* backend, void* arg);
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
typedef struct STdbState {
struct SStreamTask* pOwner;
rocksdb_t* rocksdb;
rocksdb_column_family_handle_t** pHandle;
rocksdb_writeoptions_t* writeOpts;
rocksdb_readoptions_t* readOpts;
rocksdb_options_t** cfOpts;
rocksdb_options_t* dbOpt;
struct SStreamTask* pOwner;
void* param;
void* env;
SListNode* pComparNode;
void* pBackendHandle;
char idstr[64];
void* compactFactory;
TDB* db;
TTB* pStateDb;
......@@ -40,19 +60,28 @@ typedef struct STdbState {
// incremental state storage
typedef struct {
STdbState* pTdbState;
int32_t number;
int64_t checkPointId;
STdbState* pTdbState;
SStreamFileState* pFileState;
int32_t number;
SSHashObj* parNameMap;
int64_t checkPointId;
int32_t taskId;
int64_t streamId;
} SStreamState;
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
void streamStateClose(SStreamState* pState);
void streamStateClose(SStreamState* pState, bool remove);
int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState);
int32_t streamStateAbort(SStreamState* pState);
void streamStateDestroy(SStreamState* pState);
void streamStateDestroy(SStreamState* pState, bool remove);
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
typedef struct {
rocksdb_iterator_t* iter;
rocksdb_snapshot_t* snapshot;
rocksdb_readoptions_t* readOpt;
rocksdb_t* db;
TBC* pCur;
int64_t number;
} SStreamStateCur;
......@@ -63,9 +92,13 @@ int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
bool streamStateCheck(SStreamState* pState, const SWinKey* key);
int32_t streamStateGetByPos(SStreamState* pState, void* pos, void** pVal);
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateClear(SStreamState* pState);
void streamStateSetNumber(SStreamState* pState, int32_t number);
int32_t streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
......@@ -89,7 +122,6 @@ int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void*
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
void streamFreeVal(void* val);
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
......@@ -109,9 +141,33 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
int32_t streamStatePutParTag(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen);
int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen);
/***compare func **/
typedef struct SStateChekpoint {
char* taskName;
int64_t checkpointId;
} SStateChekpoint;
// todo refactor
typedef struct SStateKey {
SWinKey key;
int64_t opNum;
} SStateKey;
typedef struct SStateSessionKey {
SSessionKey key;
int64_t opNum;
} SStateSessionKey;
typedef struct SStreamValue {
int64_t unixTimestamp;
int32_t len;
char* data;
} SStreamValue;
int sessionRangeKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
int sessionWinKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
int stateSessionKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
#if 0
char* streamStateSessionDump(SStreamState* pState);
char* streamStateIntervalDump(SStreamState* pState);
......
......@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "executor.h"
#include "os.h"
#include "query.h"
#include "streamState.h"
#include "tdatablock.h"
......@@ -39,6 +39,7 @@ enum {
STREAM_STATUS__INIT,
STREAM_STATUS__FAILED,
STREAM_STATUS__RECOVER,
STREAM_STATUS__PAUSE,
};
enum {
......@@ -50,7 +51,7 @@ enum {
TASK_STATUS__RECOVER_PREPARE,
TASK_STATUS__RECOVER1,
TASK_STATUS__RECOVER2,
TASK_STATUS__RESTORE, // only available for source task to replay WAL from the checkpoint
TASK_STATUS__PAUSE,
};
enum {
......@@ -192,7 +193,7 @@ typedef struct {
int32_t streamInit();
void streamCleanUp();
SStreamQueue* streamQueueOpen();
SStreamQueue* streamQueueOpen(int64_t cap);
void streamQueueClose(SStreamQueue* queue);
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
......@@ -206,14 +207,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
}
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
return queue->qItem;
}
void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
......@@ -242,6 +239,7 @@ typedef struct {
void* vnode; // not available to encoder and decoder
FTbSink* tbSinkFunc;
STSchema* pTSchema;
SSHashObj* pTblInfo;
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
......@@ -272,12 +270,14 @@ typedef struct SStreamId {
typedef struct SCheckpointInfo {
int64_t id;
int64_t version; // offset in WAL
int64_t version; // offset in WAL
int64_t currentVer; // current offset in WAL, not serialize it
} SCheckpointInfo;
typedef struct SStreamStatus {
int8_t taskStatus;
int8_t schedStatus;
int8_t keepTaskStatus;
} SStreamStatus;
struct SStreamTask {
......@@ -340,12 +340,16 @@ typedef struct SStreamMeta {
TTB* pTaskDb;
TTB* pCheckpointDb;
SHashObj* pTasks;
SArray* pTaskList; // SArray<task_id*>
void* ahandle;
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
SRWLatch lock;
int8_t walScan;
int32_t walScanCounter;
void* streamBackend;
int32_t streamBackendId;
int64_t streamBackendRid;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
......@@ -537,14 +541,17 @@ void streamTaskInputFail(SStreamTask* pTask);
int32_t streamTryExec(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask);
int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock);
bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
// recover and fill history
int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version);
int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version);
int32_t streamProcessTaskCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq);
int32_t streamTaskCheckStatus(SStreamTask* pTask);
int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version);
// common
int32_t streamSetParamForRecover(SStreamTask* pTask);
int32_t streamRestoreParam(SStreamTask* pTask);
......@@ -564,19 +571,19 @@ int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _STREAM_FILE_STATE_H_
#define _STREAM_FILE_STATE_H_
#include "os.h"
#include "tarray.h"
#include "tdef.h"
#include "tlist.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SStreamFileState SStreamFileState;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
bool beFlushed;
bool beUsed;
} SRowBuffPos;
typedef SList SStreamSnapshot;
typedef TSKEY (*GetTsFun)(void*);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, GetTsFun fp, void* pFile,
TSKEY delMark);
void streamFileStateDestroy(SStreamFileState* pFileState);
void streamFileStateClear(SStreamFileState* pFileState);
bool needClearDiskBuff(SStreamFileState* pFileState);
int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen);
int32_t deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen);
int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal);
void releaseRowBuffPos(SRowBuffPos* pBuff);
bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
int32_t recoverSnapshot(SStreamFileState* pFileState);
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);
#ifdef __cplusplus
}
#endif
#endif // _STREAM_FILE_STATE_H_
......@@ -40,9 +40,7 @@ typedef struct SUpdateInfo {
TSKEY minTS;
SScalableBf *pCloseWinSBF;
SHashObj *pMap;
STimeWindow scanWindow;
uint64_t scanGroupId;
uint64_t maxVersion;
uint64_t maxDataVersion;
} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
......@@ -50,8 +48,6 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
void updateInfoDestroy(SUpdateInfo *pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
......
......@@ -127,8 +127,8 @@ typedef struct SWal {
typedef struct {
int64_t refId;
int64_t refVer;
// int64_t refFile;
SWal *pWal;
// int64_t refFile;
SWal *pWal;
} SWalRef;
typedef struct {
......@@ -138,6 +138,8 @@ typedef struct {
int8_t enableRef;
} SWalFilterCond;
typedef struct SWalReader SWalReader;
// todo hide this struct
typedef struct SWalReader {
SWal *pWal;
......@@ -147,8 +149,6 @@ typedef struct SWalReader {
int64_t curFileFirstVer;
int64_t curVersion;
int64_t capacity;
// int8_t curInvalid;
// int8_t curStopped;
TdThreadMutex mutex;
SWalFilterCond cond;
// TODO remove it
......@@ -195,10 +195,11 @@ SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond);
void walCloseReader(SWalReader *pRead);
void walReadReset(SWalReader *pReader);
int32_t walReadVer(SWalReader *pRead, int64_t ver);
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver);
int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader* pReader);
void walReaderValidVersionRange(SWalReader* pReader, int64_t *sver, int64_t *ever);
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
// only for tq usage
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
......
......@@ -22,19 +22,25 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following sectio
#if !defined(WINDOWS)
#ifndef ALLOW_FORBID_FUNC
#define malloc MALLOC_FUNC_TAOS_FORBID
#define calloc CALLOC_FUNC_TAOS_FORBID
#define realloc REALLOC_FUNC_TAOS_FORBID
#define free FREE_FUNC_TAOS_FORBID
#ifdef strdup
#undef strdup
#define strdup STRDUP_FUNC_TAOS_FORBID
#endif
#endif // ifndef ALLOW_FORBID_FUNC
#endif // if !defined(WINDOWS)
// #if !defined(WINDOWS)
// #ifndef ALLOW_FORBID_FUNC
// #define malloc MALLOC_FUNC_TAOS_FORBID
// #define calloc CALLOC_FUNC_TAOS_FORBID
// #define realloc REALLOC_FUNC_TAOS_FORBID
// #define free FREE_FUNC_TAOS_FORBID
// #ifdef strdup
// #undef strdup
// #define strdup STRDUP_FUNC_TAOS_FORBID
// #endif
// #endif // ifndef ALLOW_FORBID_FUNC
// #endif // if !defined(WINDOWS)
// // #define taosMemoryFree malloc
// #define taosMemoryMalloc malloc
// #define taosMemoryCalloc calloc
// #define taosMemoryRealloc realloc
// #define taosMemoryFree free
int32_t taosMemoryDbgInit();
int32_t taosMemoryDbgInitRestore();
......
......@@ -147,7 +147,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215)
#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216)
#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217)
#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218)
//#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218)
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219)
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A)
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B)
......@@ -262,6 +262,7 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) // 2.x
#define TSDB_CODE_MND_INVALID_STB_OPTION TAOS_DEF_ERROR_CODE(0, 0x036E)
#define TSDB_CODE_MND_INVALID_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F)
#define TSDB_CODE_MND_FIELD_VALUE_OVERFLOW TAOS_DEF_ERROR_CODE(0, 0x0370)
// mnode-func
......@@ -738,28 +739,21 @@ int32_t* taosGetErrno();
//tsma
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
#define TSDB_CODE_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x3101)
#define TSDB_CODE_TSMA_NO_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x3102)
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3103)
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3104)
#define TSDB_CODE_TSMA_INVALID_PTR TAOS_DEF_ERROR_CODE(0, 0x3105)
#define TSDB_CODE_TSMA_INVALID_PARA TAOS_DEF_ERROR_CODE(0, 0x3106)
#define TSDB_CODE_TSMA_NO_INDEX_IN_CACHE TAOS_DEF_ERROR_CODE(0, 0x3107)
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3102)
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3103)
#define TSDB_CODE_TSMA_INVALID_PTR TAOS_DEF_ERROR_CODE(0, 0x3104)
#define TSDB_CODE_TSMA_INVALID_PARA TAOS_DEF_ERROR_CODE(0, 0x3105)
//rsma
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
#define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151)
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
#define TSDB_CODE_RSMA_FS_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159)
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160)
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3161)
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3162)
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3163)
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3156)
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3157)
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3158)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
......@@ -773,11 +767,15 @@ int32_t* taosGetErrno();
// stream
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
#define TSDB_CODE_STREAM_BACKPRESSURE_OUT_OF_QUEUE TAOS_DEF_ERROR_CODE(0, 0x4101)
// TDLite
#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101)
// UTIL
#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000)
#ifdef __cplusplus
}
#endif
......
......@@ -368,11 +368,11 @@ typedef enum ELogicConditionType {
#define TSDB_MIN_STT_TRIGGER 1
#define TSDB_MAX_STT_TRIGGER 16
#define TSDB_DEFAULT_SST_TRIGGER 1
#define TSDB_MIN_HASH_PREFIX 0
#define TSDB_MAX_HASH_PREFIX 128
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
#define TSDB_DEFAULT_HASH_PREFIX 0
#define TSDB_MIN_HASH_SUFFIX 0
#define TSDB_MAX_HASH_SUFFIX 128
#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN)
#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2)
#define TSDB_DEFAULT_HASH_SUFFIX 0
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
......
......@@ -17,6 +17,7 @@
#define _TD_UTIL_LIST_H_
#include "os.h"
#include "talgo.h"
#ifdef __cplusplus
extern "C" {
......@@ -222,10 +223,12 @@ void tdListInit(SList *list, int32_t eleSize);
void tdListEmpty(SList *list);
SList *tdListNew(int32_t eleSize);
void *tdListFree(SList *list);
void *tdListFreeP(SList *list, FDelete fp);
void tdListPrependNode(SList *list, SListNode *node);
void tdListAppendNode(SList *list, SListNode *node);
int32_t tdListPrepend(SList *list, void *data);
int32_t tdListAppend(SList *list, const void *data);
SListNode *tdListAdd(SList *list, const void *data);
SListNode *tdListPopHead(SList *list);
SListNode *tdListPopTail(SList *list);
SListNode *tdListGetHead(SList *list);
......
......@@ -108,7 +108,6 @@ bool taosAssertRelease(bool condition);
void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo);
void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd);
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
int32_t taosGenCrashJsonMsg(int signum, char **pMsg, int64_t clusterId, int64_t startTime);
// clang-format off
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
......
......@@ -84,6 +84,8 @@ struct STaosQueue {
int64_t memOfItems;
int32_t numOfItems;
int64_t threadId;
int64_t memLimit;
int64_t itemLimit;
};
struct STaosQset {
......@@ -106,12 +108,14 @@ void taosCloseQueue(STaosQueue *queue);
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
void *taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize);
void taosFreeQitem(void *pItem);
void taosWriteQitem(STaosQueue *queue, void *pItem);
int32_t taosWriteQitem(STaosQueue *queue, void *pItem);
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
bool taosQueueEmpty(STaosQueue *queue);
void taosUpdateItemSize(STaosQueue *queue, int32_t items);
int32_t taosQueueItemSize(STaosQueue *queue);
int64_t taosQueueMemorySize(STaosQueue *queue);
void taosSetQueueCapacity(STaosQueue *queue, int64_t size);
void taosSetQueueMemoryCapacity(STaosQueue *queue, int64_t mem);
STaosQall *taosAllocateQall();
void taosFreeQall(STaosQall *qall);
......
......@@ -81,14 +81,23 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
int32_t suffix) {
if (prefix == 0 && suffix == 0) {
if ((prefix == 0 && suffix == 0) || (tblen <= (prefix + suffix)) || (tblen <= -1 * (prefix + suffix)) ||
prefix * suffix < 0) {
return MurmurHash3_32(tbname, tblen);
} else if (prefix > 0 || suffix > 0) {
return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
} else {
if (tblen <= (prefix + suffix)) {
return MurmurHash3_32(tbname, tblen);
} else {
return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
char tbName[TSDB_TABLE_FNAME_LEN];
int32_t offset = 0;
if (prefix < 0) {
offset = -1 * prefix;
strncpy(tbName, tbname, offset);
}
if (suffix < 0) {
strncpy(tbName + offset, tbname + tblen + suffix, -1 * suffix);
offset += -1 * suffix;
}
return MurmurHash3_32(tbName, offset);
}
}
......@@ -107,6 +116,8 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#define VND_CHECK_CODE(CODE, LINO, LABEL) TSDB_CHECK_CODE(CODE, LINO, LABEL)
#ifdef __cplusplus
}
#endif
......
......@@ -26,6 +26,38 @@ if pidof taosd &> /dev/null; then
sleep 1
fi
# Stop adapter service if running
if pidof taosadapter &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosadapter || :
elif $(which service &> /dev/null); then
${csudo}service taosadapter stop || :
else
pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosadapter service success!"
sleep 1
fi
# Stop keeper service if running
if pidof taoskeeper &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taoskeeper || :
elif $(which service &> /dev/null); then
${csudo}service taoskeeper stop || :
else
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taoskeeper service success!"
sleep 1
fi
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
......@@ -41,6 +73,11 @@ if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
if [ -f "${install_main_dir}/taoskeeper.toml" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taoskeeper.toml || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
......@@ -32,6 +32,7 @@ else
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
......
......@@ -44,8 +44,31 @@ mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
# download taoskeeper and build
if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then
arch=amd64
elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then
arch=386
elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then
arch=arm
elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then
arch=arm64
else
arch=$cpuType
fi
echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper"
echo "$top_dir=${top_dir}"
taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper`
echo "taoskeeper_binary: ${taoskeeper_binary}"
# copy config files
cp $(dirname ${taoskeeper_binary})/config/taoskeeper.toml ${pkg_dir}${install_home_path}/cfg
cp $(dirname ${taoskeeper_binary})/taoskeeper.service ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
......@@ -53,6 +76,7 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
cp ${taoskeeper_binary} ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
......@@ -143,6 +167,7 @@ else
exit 1
fi
rm -rf ${pkg_dir}/build-taoskeeper
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
......@@ -150,4 +175,5 @@ echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
rm -rf ${pkg_dir}
......@@ -42,8 +42,9 @@ if [ "$DISABLE_ADAPTER" = "0" ]; then
done
fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
# if dnode has been created or has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/dnode.json" ] ||
[ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@
# others will first wait the first ep ready.
......@@ -54,7 +55,7 @@ else
exit $?
fi
while true; do
es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check)
es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check | grep "^[0-9]*:")
echo ${es}
if [ "${es%%:*}" -eq 2 ]; then
echo "execute create dnode"
......
#!/bin/sh
es=$(taos --check)
es=$(taos --check | grep "^[0-9]*:")
code=${es%%:*}
if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then
exit 0
......
......@@ -35,14 +35,16 @@ function cp_rpm_package() {
local cur_dir
cd $1
cur_dir=$(pwd)
echo "cp_rpm_package cd: ${cur_dir}"
for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then
cd ${dirlist}
echo 'cp_rpm_package ${cur_dir}/${dirlist}'
cp_rpm_package ${cur_dir}/${dirlist}
cd ..
fi
if test -e ${dirlist}; then
echo "${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm"
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
fi
done
......@@ -54,6 +56,25 @@ fi
${csudo}mkdir -p ${pkg_dir}
cd ${pkg_dir}
# download taoskeeper and build
if [ "$cpuType" = "x64" ] || [ "$cpuType" = "x86_64" ] || [ "$cpuType" = "amd64" ]; then
arch=amd64
elif [ "$cpuType" = "x32" ] || [ "$cpuType" = "i386" ] || [ "$cpuType" = "i686" ]; then
arch=386
elif [ "$cpuType" = "arm" ] || [ "$cpuType" = "aarch32" ]; then
arch=arm
elif [ "$cpuType" = "arm64" ] || [ "$cpuType" = "aarch64" ]; then
arch=arm64
else
arch=$cpuType
fi
cd ${top_dir}
echo "${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r ${arch} -e taoskeeper"
taoskeeper_binary=`${top_dir}/../enterprise/packaging/build_taoskeeper.sh -r $arch -e taoskeeper`
echo "taoskeeper_binary: ${taoskeeper_binary}"
cd ${package_dir}
${csudo}mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo}rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
......@@ -85,3 +106,4 @@ mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo}rm -rf ${pkg_dir}
rm -rf ${top_dir}/build-taoskeeper
\ No newline at end of file
......@@ -3,6 +3,7 @@
%define cfg_install_dir /etc/taos
%define __strip /bin/true
%global __python /usr/bin/python3
%global _build_id_links none
Name: tdengine
Version: %{_version}
......@@ -62,6 +63,15 @@ fi
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
if [ -f %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml ]; then
cp %{_compiledir}/../build-taoskeeper/config/taoskeeper.toml %{buildroot}%{homepath}/cfg ||:
fi
if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper.service ]; then
cp %{_compiledir}/../build-taoskeeper/taoskeeper.service %{buildroot}%{homepath}/cfg ||:
fi
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
......@@ -73,8 +83,12 @@ cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper ]; then
cp %{_compiledir}/../build-taoskeeper/taoskeeper %{buildroot}%{homepath}/bin
fi
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
......@@ -119,7 +133,9 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig
fi
fi
ls -al %{buildroot}%{homepath}/bin
tree -L 5
echo "==============================copying files done"
#Scripts executed before installation
%pre
if [ -f /var/lib/taos/dnode/dnodeCfg.json ]; then
......@@ -196,6 +212,7 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
......
......@@ -197,8 +197,8 @@ if [[ $productName == "TDengine" ]]; then
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
jars=$(ls ${build_dir}/lib/*.jar 2>/dev/null|wc -l)
[ "${jars}" != "0" ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
jars=$(ls ${build_dir}/lib/*.jar 2>/dev/null|wc -l)
[ "${jars}" != "0" ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
git clone --depth 1 https://github.com/taosdata/driver-go ${install_dir}/connector/go
rm -rf ${install_dir}/connector/go/.git ||:
......
......@@ -436,7 +436,7 @@ function local_fqdn_check() {
function install_taosadapter_config() {
if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
[ ! -d %{cfg_install_dir} ] &&
[ ! -d ${cfg_install_dir} ] &&
${csudo}${csudo}mkdir -p ${cfg_install_dir}
[ -f ${cfg_dir}/taosadapter.toml ] && ${csudo}cp ${cfg_dir}/taosadapter.toml ${cfg_install_dir}
[ -f ${cfg_install_dir}/taosadapter.toml ] &&
......@@ -451,19 +451,26 @@ function install_taosadapter_config() {
}
function install_taoskeeper_config() {
if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then
[ ! -d %{cfg_install_dir} ] &&
${csudo}${csudo}mkdir -p ${cfg_install_dir}
[ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir}
[ -f ${cfg_install_dir}/keeper.toml ] &&
${csudo}chmod 644 ${cfg_install_dir}/keeper.toml
# if new environment without taoskeeper
if [[ ! -f "${cfg_install_dir}/keeper.toml" ]] && [[ ! -f "${cfg_install_dir}/taoskeeper.toml" ]]; then
[ ! -d ${cfg_install_dir} ] && ${csudo}${csudo}mkdir -p ${cfg_install_dir}
[ -f ${cfg_dir}/taoskeeper.toml ] && ${csudo}cp ${cfg_dir}/taoskeeper.toml ${cfg_install_dir}
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml
fi
# if old machine with taoskeeper.toml file
if [ -f ${cfg_install_dir}/taoskeeper.toml ]; then
${csudo}mv ${cfg_dir}/taoskeeper.toml ${cfg_dir}/taoskeeper.toml.new
fi
[ -f ${cfg_dir}/keeper.toml ] &&
${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new
if [ -f ${cfg_install_dir}/keeper.toml ]; then
echo "The file keeper.toml will be renamed to taoskeeper.toml"
${csudo}mv ${cfg_install_dir}/keeper.toml ${cfg_install_dir}/taoskeeper.toml
${csudo}mv ${cfg_dir}/taoskeeper.toml ${cfg_dir}/taoskeeper.toml.new
fi
[ -f ${cfg_install_dir}/keeper.toml ] &&
${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir}
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml ${cfg_dir}
}
function install_config() {
......@@ -655,6 +662,15 @@ function install_taosadapter_service() {
fi
}
function install_taoskeeper_service() {
if ((${service_mod}==0)); then
[ -f ${script_dir}/../cfg/taoskeeper.service ] &&\
${csudo}cp ${script_dir}/../cfg/taoskeeper.service \
${service_config_dir}/ || :
${csudo}systemctl daemon-reload
fi
}
function install_service() {
log_print "start install service"
if [ "$osType" != "Darwin" ]; then
......@@ -732,6 +748,7 @@ function install_TDengine() {
install_taosadapter_config
install_taoskeeper_config
install_taosadapter_service
install_taoskeeper_service
install_service
install_app
......
......@@ -17,7 +17,7 @@ cfg_link_dir="/usr/local/taos/cfg"
service_config_dir="/etc/systemd/system"
taos_service_name="taosd"
taoskeeper_service_name="taoskeeper"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
......@@ -57,6 +57,13 @@ function kill_taosd() {
fi
}
function kill_taoskeeper() {
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function clean_service_on_systemd() {
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet taosadapter; then
......@@ -76,6 +83,12 @@ function clean_service_on_systemd() {
[ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config}
taoskeeper_service_config="${service_config_dir}/${taoskeeper_service_name}.service"
if systemctl is-active --quiet ${taoskeeper_service_name}; then
echo "TDengine taoskeeper is running, stopping it..."
${csudo}systemctl stop ${taoskeeper_service_name} &> /dev/null || echo &> /dev/null
fi
[ -f ${taoskeeper_service_config} ] && ${csudo}rm -f ${taoskeeper_service_config}
}
function clean_service_on_sysvinit() {
......@@ -111,6 +124,7 @@ function clean_service() {
# must manual stop taosd
kill_taosadapter
kill_taosd
kill_taoskeeper
fi
}
......@@ -124,6 +138,7 @@ ${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${cfg_link_dir}/*.new || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
......
......@@ -81,6 +81,7 @@ typedef struct {
int64_t appId;
// ctl
int8_t threadStop;
int8_t quitByKill;
TdThread thread;
TdThreadMutex lock; // used when app init and cleanup
SHashObj* appSummary;
......
......@@ -70,7 +70,7 @@ extern "C" {
#define VALUE_LEN 6
#define OTD_JSON_FIELDS_NUM 4
#define MAX_RETRY_TIMES 100
#define MAX_RETRY_TIMES 10
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
......@@ -107,6 +107,7 @@ typedef struct {
int32_t colsLen;
int32_t timestampLen;
bool measureEscaped;
SArray *colArray;
} SSmlLineInfo;
......@@ -168,6 +169,7 @@ typedef struct {
int32_t uid; // used for automatic create child table
SHashObj *childTables;
SHashObj *tableUids;
SHashObj *superTables;
SHashObj *pVgHash;
......@@ -206,6 +208,19 @@ typedef struct {
#define IS_SAME_KEY (maxKV->keyLen == kv.keyLen && memcmp(maxKV->key, kv.key, kv.keyLen) == 0)
#define IS_SLASH_LETTER_IN_MEASUREMENT(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE))
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
#define PROCESS_SLASH_IN_MEASUREMENT(key, keyLen) \
for (int i = 1; i < keyLen; ++i) { \
if (IS_SLASH_LETTER_IN_MEASUREMENT(key + i)) { \
MOVE_FORWARD_ONE(key + i, keyLen - i); \
keyLen--; \
} \
}
extern int64_t smlFactorNS[3];
extern int64_t smlFactorS[3];
......@@ -228,6 +243,7 @@ int8_t smlGetTsTypeByLen(int32_t len);
SSmlTableInfo* smlBuildTableInfo(int numRows, const char* measure, int32_t measureLen);
SSmlSTableMeta* smlBuildSTableMeta(bool isDataFormat);
int32_t smlSetCTableName(SSmlTableInfo *oneTable);
void getTableUid(SSmlHandle *info, SSmlLineInfo *currElement, SSmlTableInfo *tinfo);
STableMeta* smlGetMeta(SSmlHandle *info, const void* measure, int32_t measureLen);
int32_t is_same_child_table_telnet(const void *a, const void *b);
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len);
......@@ -237,6 +253,7 @@ uint8_t smlGetTimestampLen(int64_t num);
void clearColValArray(SArray* pCols);
void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag);
void freeSSmlKv(void* data);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册