提交 1cf0b485 编写于 作者: K kailixu

Merge branch '3.0' into feat/TD-23643-3.0

......@@ -120,8 +120,14 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
IF (${TD_DARWIN})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
ENDIF ()
# disable all assert
......
......@@ -109,7 +109,7 @@ option(
option(
BUILD_WITH_ROCKSDB
"If build with rocksdb"
OFF
ON
)
option(
......
......@@ -223,17 +223,31 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
option(FAIL_ON_WARNINGS "" OFF)
#option(WITH_JEMALLOC "" ON)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
IF (${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
IF (${TD_DARWIN})
target_compile_options(
rocksdb
PRIVATE -Wno-unused-private-field
)
endif(${TD_DARWIN})
endif(${BUILD_WITH_ROCKSDB})
# lucene
......
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
add_executable(rocksdbTest "")
target_sources(rocksdbTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
target_link_libraries(rocksdbTest rocksdb)
\ No newline at end of file
target_link_libraries(rocksdbTest rocksdb)
......@@ -25,10 +25,12 @@ int main(int argc, char const *argv[]) {
// Read
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
char buf[256] = {0};
size_t vallen = 0;
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
snprintf(buf, vallen+5, "val:%s", val);
printf("%ld %ld %s\n", strlen(val), vallen, buf);
// Update
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
......@@ -43,4 +45,4 @@ int main(int argc, char const *argv[]) {
rocksdb_close(db);
return 0;
}
\ No newline at end of file
}
......@@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
......@@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
## More readings
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
......@@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
This document describes how to install TDengine in a Docker container and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
......
......@@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
......@@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service:
- Check TDengine Server status: `sudo launchctl list | grep taosd`
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.
......
......@@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
</Tabs>
:::tip
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
:::
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```
......@@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
......@@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />
......
......@@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
......
......@@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup
......
......@@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### Install TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup
......
......@@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
4. Install TDengine 3.0.
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
### 2. How can I resolve the "Unable to establish connection" error?
......
......@@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.1.0</version>
<version>3.2.1</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>
......
package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
......@@ -64,7 +65,8 @@ public class SubscribeDemo {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) {
for (ConsumerRecord<Meters> recode : meters) {
Meters meter = recode.value();
System.out.println(meter);
}
}
......
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_json() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
// SchemalessProtocol::Json
let data = [
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(300u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.ttl(1000)
.req_id(301u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.req_id(302u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_line() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(100u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.req_id(101u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl and req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(103u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_telnet() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(200u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.ttl(1000)
.req_id(201u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.req_id(202u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
......@@ -207,6 +207,8 @@ Active: inactive (dead)
- 查看服务状态:`sudo launchctl list | grep taosd`
- 查看服务详细信息:`launchctl print system/com.tdengine.taosd`
:::info
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
......
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```
此差异已折叠。
......@@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem';
import Preparation from "./_preparation.mdx"
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
......@@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
<RustBind />
#### Schemaless 写入
<RustSml />
### 查询数据
<RustQuery />
......
......@@ -185,6 +185,7 @@ typedef struct SMergeLogicNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergeLogicNode;
typedef enum EWindowType {
......@@ -444,6 +445,7 @@ typedef struct SMergePhysiNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergePhysiNode;
typedef struct SWinodwPhysiNode {
......
......@@ -1342,6 +1342,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
goto _return;
}
pRequest->syncQuery = true;
STscObj *pTscObj = pRequest->pTscObj;
code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta);
if (code) {
......@@ -1368,7 +1370,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tsem_wait(&pParam->sem);
_return:
taosArrayDestroy(catalogReq.pTableMeta);
taosArrayDestroyEx(catalogReq.pTableMeta, destoryTablesReq);
destroyRequest(pRequest);
return code;
}
......
......@@ -1219,6 +1219,7 @@ static int32_t smlPushCols(SArray *colsArray, SArray *cols) {
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
if (terrno == TSDB_CODE_DUP_KEY) {
taosHashCleanup(kvHash);
return terrno;
}
}
......@@ -1292,12 +1293,12 @@ static int32_t smlParseLineBottom(SSmlHandle *info) {
uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat,
info->lineNum);
SSmlSTableMeta *meta = smlBuildSTableMeta(info->dataFormat);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags);
if (terrno == TSDB_CODE_DUP_KEY) {
return terrno;
}
smlInsertMeta(meta->colHash, meta->cols, elements->colArray);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
}
}
uDebug("SML:0x%" PRIx64 " smlParseLineBottom end, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum);
......
......@@ -755,7 +755,7 @@ SColVal *tRowIterNext(SRowIter *pIter) {
}
if (pIter->pRow->flag == HAS_NULL) {
pIter->cv = COL_VAL_NULL(pTColumn->type, pTColumn->colId);
pIter->cv = COL_VAL_NULL(pTColumn->colId, pTColumn->type);
goto _exit;
}
......@@ -2439,7 +2439,7 @@ _exit:
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes, int32_t nRows, char *lengthOrbitmap,
char *data) {
int32_t code = 0;
if(data == NULL){
if (data == NULL) {
for (int32_t i = 0; i < nRows; ++i) {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NONE](pColData, NULL, 0);
}
......@@ -2453,8 +2453,9 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
if (code) goto _exit;
} else {
if(ASSERT(varDataTLen(data + offset) <= bytes)){
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), bytes);
if (ASSERT(varDataTLen(data + offset) <= bytes)) {
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset),
bytes);
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
......
......@@ -84,6 +84,7 @@ target_include_directories(
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
target_link_libraries(
vnode
......@@ -100,6 +101,7 @@ target_link_libraries(
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index
......
......@@ -182,7 +182,7 @@ int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t n
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly);
void tsdbReaderSetId(STsdbReader* pReader, const char* idstr);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
......@@ -196,8 +196,9 @@ void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
SArray *pTableUids);
void *tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
......
......@@ -343,6 +343,16 @@ struct STsdbFS {
SArray *aDFileSet; // SArray<SDFileSet>
};
typedef struct {
rocksdb_t *db;
rocksdb_options_t *options;
rocksdb_flushoptions_t *flushoptions;
rocksdb_writeoptions_t *writeoptions;
rocksdb_readoptions_t *readoptions;
rocksdb_writebatch_t *writebatch;
TdThreadMutex rMutex;
} SRocksCache;
struct STsdb {
char *path;
SVnode *pVnode;
......@@ -355,6 +365,7 @@ struct STsdb {
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
};
struct TSDBKEY {
......@@ -777,6 +788,8 @@ typedef struct SCacheRowsReader {
uint64_t suid;
char **transferBuf; // todo remove it soon
int32_t numOfCols;
SArray *pCidList;
int32_t *pSlotIds;
int32_t type;
int32_t tableIndex; // currently returned result tables
STableKeyInfo *pTableList; // table id list
......@@ -796,6 +809,10 @@ typedef struct {
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype);
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
......
......@@ -19,6 +19,7 @@
#include "executor.h"
#include "filter.h"
#include "qworker.h"
#include "rocksdb/c.h"
#include "sync.h"
#include "tRealloc.h"
#include "tchecksum.h"
......@@ -177,6 +178,7 @@ int tsdbClose(STsdb** pTsdb);
int32_t tsdbBegin(STsdb* pTsdb);
int32_t tsdbPrepareCommit(STsdb* pTsdb);
int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbCacheCommit(STsdb* pTsdb);
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
int32_t tsdbFinishCommit(STsdb* pTsdb);
int32_t tsdbRollbackCommit(STsdb* pTsdb);
......@@ -193,9 +195,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t type);
int32_t type);
int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
......
......@@ -21,47 +21,30 @@
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
void** pRes, const char* idStr) {
const int32_t* dstSlotIds, void** pRes, const char* idStr) {
int32_t numOfRows = pBlock->info.rows;
bool allNullRow = true;
if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) {
bool allNullRow = true;
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[i]);
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
if (slotIds[i] == -1) { // the primary timestamp
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
p->ts = pColVal->ts;
p->bytes = TSDB_KEYSIZE;
*(int64_t*)p->buf = pColVal->ts;
allNullRow = false;
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
p->ts = 0;
p->isNull = true;
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
}
......@@ -74,36 +57,31 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
pBlock->info.rows += allNullRow ? 0 : 1;
} else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
if (slotIds[i] == -1) {
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
colDataSetVal(pColInfoData, numOfRows, (const char*)&pColVal->ts, false);
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
SColVal* pVal = &pColVal->colVal;
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
SColVal* pVal = &pColVal->colVal;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
continue;
}
allNullRow = false;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
}
}
pBlock->info.rows += 1;
pBlock->info.rows += allNullRow ? 0 : 1;
} else {
tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr);
return TSDB_CODE_INVALID_PARA;
......@@ -143,7 +121,7 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void** pReader, const char* idstr) {
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) {
*pReader = NULL;
SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
......@@ -155,6 +133,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
p->pTsdb = p->pVnode->pTsdb;
p->verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX};
p->numOfCols = numOfCols;
p->pCidList = pCidList;
p->pSlotIds = pSlotIds;
p->suid = suid;
if (numOfTables == 0) {
......@@ -226,32 +206,9 @@ void* tsdbCacherowsReaderClose(void* pReader) {
return NULL;
}
static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, SArray** pRow,
LRUHandle** h) {
int32_t code = TSDB_CODE_SUCCESS;
*pRow = NULL;
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
code = tsdbCacheGetLastrowH(lruCache, uid, pr, h);
} else {
code = tsdbCacheGetLastH(lruCache, uid, pr, h);
}
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// no data in the table of Uid
if (*h != NULL) {
*pRow = (SArray*)taosLRUCacheValue(lruCache, *h);
}
return code;
}
static void freeItem(void* pItem) {
SLastCol* pCol = (SLastCol*)pItem;
if (IS_VAR_DATA_TYPE(pCol->colVal.type)) {
if (IS_VAR_DATA_TYPE(pCol->colVal.type) && pCol->colVal.value.pData) {
taosMemoryFree(pCol->colVal.value.pData);
}
}
......@@ -277,19 +234,17 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) {
}
}
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds,
SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SCacheRowsReader* pr = pReader;
int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
SArray* pRow = NULL;
bool hasRes = false;
SArray* pLastCols = NULL;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
bool hasRes = false;
SArray* pLastCols = NULL;
void** pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
if (pRes == NULL) {
......@@ -298,20 +253,22 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
for (int32_t j = 0; j < pr->numOfCols; ++j) {
pRes[j] = taosMemoryCalloc(
1, sizeof(SFirstLastRes) + pr->pSchema->columns[-1 == slotIds[j] ? 0 : slotIds[j]].bytes + VARSTR_HEADER_SIZE);
pRes[j] =
taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[/*-1 == slotIds[j] ? 0 : */ slotIds[j]].bytes +
VARSTR_HEADER_SIZE);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
p->ts = INT64_MIN;
}
pLastCols = taosArrayInit(pr->pSchema->numOfCols, sizeof(SLastCol));
pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
if (pLastCols == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) {
struct STColumn* pCol = &pr->pSchema->columns[i];
for (int32_t i = 0; i < pr->numOfCols; ++i) {
int32_t slotId = slotIds[i];
struct STColumn* pCol = &pr->pSchema->columns[slotId];
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL};
if (IS_VAR_DATA_TYPE(pCol->type)) {
......@@ -328,6 +285,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
pr->pDataFReader = NULL;
pr->pDataFReaderLast = NULL;
int32_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
int64_t st = taosGetTimestampUs();
......@@ -335,16 +294,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t i = 0; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
......@@ -352,47 +309,34 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
bool hasNotNullRow = true;
int64_t singleTableLastTs = INT64_MAX;
for (int32_t k = 0; k < pr->numOfCols; ++k) {
int32_t slotId = slotIds[k];
if (slotId == -1) { // the primary timestamp
SLastCol* p = taosArrayGet(pLastCols, 0);
SLastCol* pCol = (SLastCol*)taosArrayGet(pRow, 0);
if (pCol->ts > p->ts) {
hasRes = true;
p->ts = pCol->ts;
p->colVal = pCol->colVal;
singleTableLastTs = pCol->ts;
}
} else {
SLastCol* p = taosArrayGet(pLastCols, slotId);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
}
SLastCol* p = taosArrayGet(pLastCols, k);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, k);
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
}
}
}
......@@ -414,33 +358,31 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
tsdbCacheRelease(lruCache, h);
taosArrayClearEx(pRow, freeItem);
}
if (hasRes) {
saveOneRow(pLastCols, pResBlock, pr, slotIds, pRes, pr->idstr);
saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
}
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
saveOneRow(pRow, pResBlock, pr, slotIds, pRes, pr->idstr);
// TODO reset the pRes
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
taosArrayClearEx(pRow, freeItem);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
tsdbCacheRelease(lruCache, h);
pr->tableIndex += 1;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
......@@ -466,6 +408,7 @@ _end:
}
taosMemoryFree(pRes);
taosArrayDestroyEx(pRow, freeItem);
taosArrayDestroyEx(pLastCols, freeItem);
return code;
}
......@@ -140,7 +140,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
SMemTable *pMemTable = pTsdb->mem;
STbData *pTbData = NULL;
SVBufPool *pPool = pTsdb->pVnode->inUse;
TSDBKEY lastKey = {.version = version, .ts = eKey};
// check if table exists
SMetaInfo info;
......@@ -181,7 +180,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pMemTable->nDel++;
pMemTable->minVer = TMIN(pMemTable->minVer, version);
pMemTable->maxVer = TMIN(pMemTable->maxVer, version);
/*
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
tsdbCacheDeleteLastrow(pTsdb->lruCache, pTbData->uid, eKey);
}
......@@ -189,6 +188,10 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
*/
if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) {
tsdbCacheDel(pTsdb, suid, uid, sKey, eKey);
}
tsdbTrace("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
" at version %" PRId64,
......@@ -284,8 +287,8 @@ bool tsdbTbDataIterNext(STbDataIter *pIter) {
int64_t tsdbCountTbDataRows(STbData *pTbData) {
SMemSkipListNode *pNode = pTbData->sl.pHead;
int64_t rowsNum = 0;
int64_t rowsNum = 0;
while (NULL != pNode) {
pNode = SL_GET_NODE_FORWARD(pNode, 0);
if (pNode == pTbData->sl.pTail) {
......@@ -298,17 +301,17 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) {
return rowsNum;
}
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj* pTableMap, int64_t *rowsNum) {
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj *pTableMap, int64_t *rowsNum) {
taosRLockLatch(&pMemTable->latch);
for (int32_t i = 0; i < pMemTable->nBucket; ++i) {
STbData *pTbData = pMemTable->aBucket[i];
while (pTbData) {
void* p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
void *p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
if (p == NULL) {
pTbData = pTbData->next;
continue;
}
*rowsNum += tsdbCountTbDataRows(pTbData);
pTbData = pTbData->next;
}
......@@ -668,15 +671,8 @@ static int32_t tsdbInsertColDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
......@@ -736,15 +732,8 @@ static int32_t tsdbInsertRowDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
......
......@@ -144,8 +144,8 @@ _exit:
}
int vnodeShouldCommit(SVnode *pVnode, bool atExit) {
bool diskAvail = osDataSpaceAvailable();
bool needCommit = false;
bool diskAvail = osDataSpaceAvailable();
bool needCommit = false;
taosThreadMutexLock(&pVnode->mutex);
if (pVnode->inUse && diskAvail) {
......@@ -439,6 +439,9 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) {
code = tsdbCommit(pVnode->pTsdb, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbCacheCommit(pVnode->pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
if (VND_IS_RSMA(pVnode)) {
code = smaCommit(pVnode->pSma, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
......
......@@ -1128,6 +1128,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pMergeNode->ignoreGroupId ? "true" : "false");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
if (pMergeNode->groupSort) {
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc");
......
......@@ -31,18 +31,21 @@ typedef struct SCacheRowsScanInfo {
void* pLastrowReader;
SColMatchInfo matchInfo;
int32_t* pSlotIds;
int32_t* pDstSlotIds;
SExprSupp pseudoExprSup;
int32_t retrieveType;
int32_t currentGroupIndex;
SSDataBlock* pBufferredRes;
SArray* pUidList;
SArray* pCidList;
int32_t indexOfBufferedRes;
STableListInfo* pTableList;
} SCacheRowsScanInfo;
static SSDataBlock* doScanCache(SOperatorInfo* pOperator);
static void destroyCacheScanOperator(void* param);
static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds,
int32_t** pDstSlotIds);
static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pColMatchInfo);
#define SCAN_ROW_TYPE(_t) ((_t) ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW)
......@@ -71,9 +74,16 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
goto _error;
}
SArray* pCidList = taosArrayInit(numOfCols, sizeof(int16_t));
for (int i = 0; i < TARRAY_SIZE(pInfo->matchInfo.pList); ++i) {
SColMatchItem* pColInfo = taosArrayGet(pInfo->matchInfo.pList, i);
taosArrayPush(pCidList, &pColInfo->colId);
}
pInfo->pCidList = pCidList;
removeRedundantTsCol(pScanNode, &pInfo->matchInfo);
code = extractCacheScanSlotId(pInfo->matchInfo.pList, pTaskInfo, &pInfo->pSlotIds);
code = extractCacheScanSlotId(pInfo->matchInfo.pList, pTaskInfo, &pInfo->pSlotIds, &pInfo->pDstSlotIds);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
......@@ -91,8 +101,8 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
uint64_t suid = tableListGetSuid(pTableListInfo);
code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables,
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader,
pTaskInfo->id.str);
taosArrayGetSize(pInfo->matchInfo.pList), pCidList, pInfo->pSlotIds, suid,
&pInfo->pLastrowReader, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
......@@ -160,8 +170,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
int32_t code =
tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds,
pInfo->pDstSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
......@@ -227,8 +237,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
}
code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num,
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader,
pTaskInfo->id.str);
taosArrayGetSize(pInfo->matchInfo.pList), pInfo->pCidList, pInfo->pSlotIds, suid,
&pInfo->pLastrowReader, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
pInfo->currentGroupIndex += 1;
taosArrayClear(pInfo->pUidList);
......@@ -237,7 +247,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
taosArrayClear(pInfo->pUidList);
code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pDstSlotIds,
pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
......@@ -280,6 +291,8 @@ void destroyCacheScanOperator(void* param) {
blockDataDestroy(pInfo->pRes);
blockDataDestroy(pInfo->pBufferredRes);
taosMemoryFree(pInfo->pSlotIds);
taosMemoryFree(pInfo->pDstSlotIds);
taosArrayDestroy(pInfo->pCidList);
taosArrayDestroy(pInfo->pUidList);
taosArrayDestroy(pInfo->matchInfo.pList);
tableListDestroy(pInfo->pTableList);
......@@ -292,7 +305,8 @@ void destroyCacheScanOperator(void* param) {
taosMemoryFreeClear(param);
}
int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds) {
int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds,
int32_t** pDstSlotIds) {
size_t numOfCols = taosArrayGetSize(pColMatchInfo);
*pSlotIds = taosMemoryMalloc(numOfCols * sizeof(int32_t));
......@@ -300,18 +314,25 @@ int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTask
return TSDB_CODE_OUT_OF_MEMORY;
}
*pDstSlotIds = taosMemoryMalloc(numOfCols * sizeof(int32_t));
if (*pDstSlotIds == NULL) {
taosMemoryFree(*pSlotIds);
return TSDB_CODE_OUT_OF_MEMORY;
}
SSchemaWrapper* pWrapper = pTaskInfo->schemaInfo.sw;
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchItem* pColMatch = taosArrayGet(pColMatchInfo, i);
for (int32_t j = 0; j < pWrapper->nCols; ++j) {
if (pColMatch->colId == pWrapper->pSchema[j].colId && pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
/* if (pColMatch->colId == pWrapper->pSchema[j].colId && pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
(*pSlotIds)[pColMatch->dstSlotId] = -1;
break;
}
}*/
if (pColMatch->colId == pWrapper->pSchema[j].colId) {
(*pSlotIds)[pColMatch->dstSlotId] = j;
(*pSlotIds)[i] = j;
(*pDstSlotIds)[i] = pColMatch->dstSlotId;
break;
}
}
......
......@@ -545,6 +545,7 @@ typedef struct SMultiwayMergeOperatorInfo {
SSDataBlock* pIntermediateBlock; // to hold the intermediate result
int64_t startTs; // sort start time
bool groupSort;
bool ignoreGroupId;
uint64_t groupId;
STupleHandle* prefetchedTuple;
} SMultiwayMergeOperatorInfo;
......@@ -694,7 +695,11 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
}
pDataBlock->info.rows = p->info.rows;
pDataBlock->info.id.groupId = pInfo->groupId;
if (pInfo->ignoreGroupId) {
pDataBlock->info.id.groupId = 0;
} else {
pDataBlock->info.id.groupId = pInfo->groupId;
}
pDataBlock->info.dataLoad = 1;
}
......@@ -785,6 +790,7 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
pInfo->groupSort = pMergePhyNode->groupSort;
pInfo->ignoreGroupId = pMergePhyNode->ignoreGroupId;
pInfo->pSortInfo = createSortInfo(pMergePhyNode->pMergeKeys);
pInfo->pInputBlock = pInputBlock;
size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock);
......
......@@ -455,6 +455,7 @@ static int32_t logicMergeCopy(const SMergeLogicNode* pSrc, SMergeLogicNode* pDst
COPY_SCALAR_FIELD(numOfChannels);
COPY_SCALAR_FIELD(srcGroupId);
COPY_SCALAR_FIELD(groupSort);
COPY_SCALAR_FIELD(ignoreGroupId);
return TSDB_CODE_SUCCESS;
}
......
......@@ -2027,6 +2027,7 @@ static const char* jkMergePhysiPlanTargets = "Targets";
static const char* jkMergePhysiPlanNumOfChannels = "NumOfChannels";
static const char* jkMergePhysiPlanSrcGroupId = "SrcGroupId";
static const char* jkMergePhysiPlanGroupSort = "GroupSort";
static const char* jkMergePhysiPlanIgnoreGroupID = "IgnoreGroupID";
static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) {
const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj;
......@@ -2047,6 +2048,9 @@ static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkMergePhysiPlanGroupSort, pNode->groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkMergePhysiPlanIgnoreGroupID, pNode->ignoreGroupId);
}
return code;
}
......@@ -2070,6 +2074,9 @@ static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkMergePhysiPlanGroupSort, &pNode->groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkMergePhysiPlanIgnoreGroupID, &pNode->ignoreGroupId);
}
return code;
}
......
......@@ -2512,7 +2512,8 @@ enum {
PHY_MERGE_CODE_TARGETS,
PHY_MERGE_CODE_NUM_OF_CHANNELS,
PHY_MERGE_CODE_SRC_GROUP_ID,
PHY_MERGE_CODE_GROUP_SORT
PHY_MERGE_CODE_GROUP_SORT,
PHY_MERGE_CODE_IGNORE_GROUP_ID,
};
static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
......@@ -2534,6 +2535,9 @@ static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_GROUP_SORT, pNode->groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId);
}
return code;
}
......@@ -2563,6 +2567,9 @@ static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) {
case PHY_MERGE_CODE_GROUP_SORT:
code = tlvDecodeBool(pTlv, &pNode->groupSort);
break;
case PHY_MERGE_CODE_IGNORE_GROUP_ID:
code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId);
break;
default:
break;
}
......
......@@ -1559,6 +1559,7 @@ static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pM
pMerge->numOfChannels = pMergeLogicNode->numOfChannels;
pMerge->srcGroupId = pMergeLogicNode->srcGroupId;
pMerge->groupSort = pMergeLogicNode->groupSort;
pMerge->ignoreGroupId = pMergeLogicNode->ignoreGroupId;
int32_t code = addDataBlockSlots(pCxt, pMergeLogicNode->pInputs, pMerge->node.pOutputDataBlockDesc);
......
......@@ -538,7 +538,8 @@ static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* p
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_PROJECT: {
SProjectLogicNode *pLogicNode = (SProjectLogicNode*)pNode;
if (pMerge->node.pLimit || pMerge->node.pSlimit) {
if (pLogicNode->ignoreGroupId && (pMerge->node.pLimit || pMerge->node.pSlimit)) {
pMerge->ignoreGroupId = true;
pLogicNode->ignoreGroupId = false;
}
break;
......
......@@ -134,7 +134,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
#,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_stable.py
......
......@@ -68,7 +68,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \
-v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=true;make -j || exit 1"
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1"
# -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \
if [[ -d ${WORKDIR}/debugNoSan ]] ;then
......@@ -97,7 +97,7 @@ docker run \
-v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \
-v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \
-v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=true;make -j || exit 1 "
--rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1 "
mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan
......
......@@ -36,4 +36,34 @@ if $rows != 0 then
return -1
endi
sql insert into tb0 values (now, 0);
sql insert into tb1 values (now, 1);
sql insert into tb2 values (now, 2);
sql insert into tb3 values (now, 3);
sql insert into tb4 values (now, 4);
sql insert into tb5 values (now, 5);
sql insert into tb6 values (now, 6);
sql insert into tb7 values (now, 7);
sql select * from (select 1 from $mt1 where ts is not null partition by tbname limit 1);
if $rows != 8 then
return -1
endi
sql select count(*) from (select ts from $mt1 where ts is not null partition by tbname slimit 2);
if $rows != 1 then
return -1
endi
if $data00 != 2 then
return -1
endi
sql select count(*) from (select ts from $mt1 where ts is not null partition by tbname limit 2);
if $rows != 1 then
return -1
endi
if $data00 != 8 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册