diff --git a/cmake/cmake.define b/cmake/cmake.define index 5b65738c700bd7fa862da6284881ada17f245302..dcb4ce2702b9bf96f01e55affcc50ec2c4dd696a 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -120,8 +120,14 @@ ELSE () SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0") MESSAGE(STATUS "Compile with Address Sanitizer!") ELSE () - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN}) + IF (${TD_DARWIN}) + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k") + ELSE () + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") + ENDIF () ENDIF () # disable all assert diff --git a/cmake/cmake.options b/cmake/cmake.options index 60ff00affc01408b084a8993441e4fe7052f4977..4ec9d18e08580a735598b647ef65188b46fdbc61 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -109,7 +109,7 @@ option( option( BUILD_WITH_ROCKSDB "If build with rocksdb" - OFF + ON ) option( diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 8f943133820bfa45a963ad9c36c48ddf60756a98..942e77dacbd8b1a4d7db2bc61a2204bdcf436750 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -223,17 +223,31 @@ endif(${BUILD_WITH_LEVELDB}) # rocksdb # To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev if(${BUILD_WITH_ROCKSDB}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") + #SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") option(WITH_TESTS "" OFF) option(WITH_BENCHMARK_TOOLS "" OFF) option(WITH_TOOLS "" OFF) option(WITH_LIBURING "" OFF) + option(WITH_IOSTATS_CONTEXT "" OFF) + option(WITH_PERF_CONTEXT "" OFF) + option(FAIL_ON_WARNINGS "" OFF) + #option(WITH_JEMALLOC "" ON) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) + IF (${TD_WINDOWS}) + option(WITH_MD_LIBRARY "build with MD" OFF) + set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) + endif(${TD_WINDOWS}) add_subdirectory(rocksdb EXCLUDE_FROM_ALL) target_include_directories( rocksdb PUBLIC $ ) + IF (${TD_DARWIN}) + target_compile_options( + rocksdb + PRIVATE -Wno-unused-private-field + ) + endif(${TD_DARWIN}) endif(${BUILD_WITH_ROCKSDB}) # lucene diff --git a/contrib/test/rocksdb/CMakeLists.txt b/contrib/test/rocksdb/CMakeLists.txt index b500e0a66158ba33d52d351f4e34462b27771892..5ae1b0395b5c2a4b323c07de19d1298579cfcc3f 100644 --- a/contrib/test/rocksdb/CMakeLists.txt +++ b/contrib/test/rocksdb/CMakeLists.txt @@ -1,6 +1,8 @@ +message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS}) + add_executable(rocksdbTest "") target_sources(rocksdbTest PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/main.c" ) -target_link_libraries(rocksdbTest rocksdb) \ No newline at end of file +target_link_libraries(rocksdbTest rocksdb) diff --git a/contrib/test/rocksdb/main.c b/contrib/test/rocksdb/main.c index d1cbd373da1f8af09b6f46fbfbeb9da27dd43678..09435790cc3f1b0a05b0fe3db73e708243cfaa8f 100644 --- a/contrib/test/rocksdb/main.c +++ b/contrib/test/rocksdb/main.c @@ -25,10 +25,12 @@ int main(int argc, char const *argv[]) { // Read rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create(); - rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db)); +//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db)); + char buf[256] = {0}; size_t vallen = 0; char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err); - printf("val:%s\n", val); + snprintf(buf, vallen+5, "val:%s", val); + printf("%ld %ld %s\n", strlen(val), vallen, buf); // Update // rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err); @@ -43,4 +45,4 @@ int main(int argc, char const *argv[]) { rocksdb_close(db); return 0; -} \ No newline at end of file +} diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index 79d170eeb9abd42f13a5625fc807c11d03a7c598..9324ca9f7e4e3a29fc0e20b49b0c35ee531ec38e 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation. ## Competitive Advantages -By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages. +By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages. - **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. @@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine ## Comparison with other databases -- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/) -- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/) -- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/) -- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/) -- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/) +- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/) +- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/) +- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/) +- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/) ## More readings - [Introduction to Time-Series Database](https://tdengine.com/tsdb/) - [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/) - + diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 42e6861674434ad9800f69dcf00fe80a140e9bf7..2049e1615f33134f43aeb668308f19a4f0059bdd 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain This document describes how to install TDengine in a Docker container and perform queries and inserts. -- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com). +- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com). - To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package). - If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index 3282f600ac81fd6a4cc45fb8c09e3c036570cc18..8f9cb9aedcfc7d0e760d5111c844c1d3f002d135 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3"; This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts. -- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com). +- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com). - To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). - If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). @@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service: - Check TDengine Server status: `sudo launchctl list | grep taosd` +- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd` + :::info - Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges. - The administrator privilege is required for service management to enhance security. diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 913c24f1896e3c2af6614fc07e9319c6fb06783f..8cf3c463afe663c18a1765f41fa449fc19c05399 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a :::tip -If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq). +If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq). ::: diff --git a/docs/en/07-develop/03-insert-data/_rust_schemaless.mdx b/docs/en/07-develop/03-insert-data/_rust_schemaless.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a26613bd15e010b8982c9d93ee1894df7e8d1407 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_rust_schemaless.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}} +``` diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 2847bf20f88d5a468cfd99296b9f1e3390616fc4..b878d47a1c4dc7b2e06c995ce25dc119206a49d0 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -36,6 +36,104 @@ REST connection supports all platforms that can run Java. Please refer to [version support list](/reference/connector#version-support) +## Recent update logs + +| taos-jdbcdriver version | major changes | +| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | +| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | +| 3.2.0 | This version has been deprecated | +| 3.1.0 | JDBC REST connection supports subscription over WebSocket | +| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | +| 3.0.0 | Support for TDengine 3.0 | +| 2.0.42 | fix wasNull interface return value in WebSocket connection | +| 2.0.41 | fix decode method of username and password in REST connection | +| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | +| 2.0.38 | JDBC REST connections add bulk pull function | +| 2.0.37 | Support json tags | +| 2.0.36 | Support schemaless writing | + +**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection. + +### Handling exceptions + +After an error is reported, the error message and error code can be obtained through SQLException. + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +There are four types of error codes that the JDBC connector can report: + +- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), +- Error code of the native connection method (error code between 0x2351 and 0x2360) +- Error code of the consumer method (error code between 0x2371 and 0x2380) +- Error code of other TDengine function modules. + +For specific error codes, please refer to. + +| Error Code | Description | Suggested Actions | +| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. | +| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. | +| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. | +| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. | +| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. | +| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. | +| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). | +| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). | +| 0x2309 | invalid sql for executeQuery: (?) | - | +| 0x230a | Database not specified or available | - | +| 0x230b | invalid sql for executeUpdate: (?) | - | +| 0x230c | invalid sql for execute: (?) | - | +| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. | +| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. | +| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. | +| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. | +| 0x2311 | can't register JDBC-RESTful driver | - | +| 0x2312 | url is not set | Check whether the REST connection url is correct. | +| 0x2313 | invalid sql | - | +| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. | +| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. | +| 0x2316 | unknown timestamp precision | - | +| 0x2317 | | wrong request type was used in the REST connection. | +| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. | +| 0x2319 | user is required | The user name information is missing when creating the connection | +| 0x231a | password is required | Password information is missing when creating a connection | +| 0x231b | invalid json format | - | +| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection | +| 0x2350 | unknown error | Unknown exception, please return to the developer on github. | +| 0x2351 | failed to create subscription | - | +| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. | +| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. | +| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. | +| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. | +| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. | +| 0x2357 | empty sql string | Fill in the correct SQL for execution. | +| 0x2358 | fetch to the end of resultSet | - | +| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. | +| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. | +| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. | +| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. | +| 0x2374 | consumer config error | - | +| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. | +| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. | +| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. | +| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. | +| - | can't create connection with server within: | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. | +| - | failed to complete the task within the specified time : | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. | + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) + + ## TDengine DataType vs. Java DataType TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: @@ -82,7 +180,7 @@ Add following dependency in the `pom.xml` file of your Maven project: com.taosdata.jdbc taos-jdbcdriver - 3.1.0 + 3.2.1 ``` @@ -97,7 +195,7 @@ cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` -After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository. +After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository. @@ -333,35 +431,6 @@ while(resultSet.next()){ > The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. -### Handling exceptions - -After an error is reported, the error message and error code can be obtained through SQLException. - -```java -try (Statement statement = connection.createStatement()) { - // executeQuery - ResultSet resultSet = statement.executeQuery(sql); - // print result - printResult(resultSet); -} catch (SQLException e) { - System.out.println("ERROR Message: " + e.getMessage()); - System.out.println("ERROR Code: " + e.getErrorCode()); - e.printStackTrace(); -} -``` - -There are four types of error codes that the JDBC connector can report: - -- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), -- Error code of the native connection method (error code between 0x2351 and 0x2360) -- Error code of the consumer method (error code between 0x2371 and 0x2380) -- Error code of other TDengine function modules. - -For specific error codes, please refer to. - -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - - ### Writing data via parameter binding TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. @@ -369,9 +438,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER **Note:** - JDBC REST connections do not currently support bind interface -- The following sample code is based on taos-jdbcdriver-3.1.0 +- The following sample code is based on taos-jdbcdriver-3.2.1 - The setString method should be called for binary type data, and the setNString method should be called for nchar type data -- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition +- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`. + + + ```java public class ParameterBindingDemo { @@ -599,21 +671,7 @@ public class ParameterBindingDemo { } ``` -The methods to set TAGS values: - -```java -public void setTagNull(int index, int type) -public void setTagBoolean(int index, boolean value) -public void setTagInt(int index, int value) -public void setTagByte(int index, byte value) -public void setTagShort(int index, short value) -public void setTagLong(int index, long value) -public void setTagTimestamp(int index, long value) -public void setTagFloat(int index, float value) -public void setTagDouble(int index, double value) -public void setTagString(int index, String value) -public void setTagNString(int index, String value) -``` +**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition The methods to set VALUES columns: @@ -630,17 +688,203 @@ public void setString(int columnIndex, ArrayList list, int size) throws public void setNString(int columnIndex, ArrayList list, int size) throws SQLException ``` + + + +```java +public class ParameterBindingDemo { + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 30; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + conn.close(); + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_ws_parabind"); + stmt.execute("create database if not exists test_ws_parabind"); + stmt.execute("use test_ws_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(4, random.nextLong()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE)); + pstmt.setLong(5, random.nextLong()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(1, random.nextFloat()); + pstmt.setTagDouble(2, random.nextDouble()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setFloat(2, random.nextFloat()); + pstmt.setDouble(3, random.nextDouble()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(1, random.nextBoolean()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setBoolean(2, random.nextBoolean()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(1, new String("abc")); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setString(2, "abc"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(1, "California.SanFrancisco"); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(0, new Timestamp(current + j)); + pstmt.setNString(1, "California.SanFrancisco"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } +} +``` + + + + +The methods to set TAGS values: + +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + ### Schemaless Writing TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). -Note: - -- JDBC REST connections do not currently support schemaless writes -- The following sample code is based on taos-jdbcdriver-3.1.0 + + ```java -public class SchemalessInsertTest { +public class SchemalessJniTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; @@ -668,6 +912,41 @@ public class SchemalessInsertTest { } ``` + + + +```java +public class SchemalessWsTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true"; + Connection connection = DriverManager.getConnection(url); + init(connection); + + SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless"); + writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); + writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); + System.exit(0); + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.executeUpdate("drop database if exists test_ws_schemaless"); + stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500"); + stmt.executeUpdate("use test_ws_schemaless"); + } + } +} +``` + + + + ### Data Subscription The TDengine Java Connector supports subscription functionality with the following application API. @@ -711,8 +990,9 @@ TaosConsumer consumer = new TaosConsumer<>(config); ```java while(true) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } ``` @@ -765,8 +1045,9 @@ public abstract class ConsumerLoop { while (!shutdown.get()) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } consumer.unsubscribe(); @@ -841,8 +1122,9 @@ public abstract class ConsumerLoop { while (!shutdown.get()) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } consumer.unsubscribe(); @@ -968,20 +1250,6 @@ The source code of the sample application is under `TDengine/examples/JDBC`: [JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) -## Recent update logs - -| taos-jdbcdriver version | major changes | -| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | -| 3.1.0 | JDBC REST connection supports subscription over WebSocket | -| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | -| 3.0.0 | Support for TDengine 3.0 | -| 2.0.42 | fix wasNull interface return value in WebSocket connection | -| 2.0.41 | fix decode method of username and password in REST connection | -| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | -| 2.0.38 | JDBC REST connections add bulk pull function | -| 2.0.37 | Support json tags | -| 2.0.36 | Support schemaless writing | - ## Frequently Asked Questions 1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`? diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index ad522e9d2b26ff02ee06bb7438acc12d315529c4..597a9b0f52efcf4dcd54d58a36cd8ced761ecd1c 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem'; import Preparition from "./_preparation.mdx" import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx" +import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx" import RustQuery from "../../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) @@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se +#### Schemaless Write + + + ### Query data diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md index 652d5ded0b3fed28961e92d4894fa090be393873..8d97dafa7c8362a694ca8d025e1035b74ec20e8b 100644 --- a/docs/en/14-reference/07-tdinsight/index.md +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval To deploy TDinsight, we need - a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters). -- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter). -- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper). +- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter). +- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper). Please record - The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041` diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md index b84b30bb71fe2231228007f95154a181000ac566..1e3325b2b2a527840ab867927c9fcae55c0ed4c5 100644 --- a/docs/en/25-application/01-telegraf.md +++ b/docs/en/25-application/01-telegraf.md @@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa ### TDengine -Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it. +Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/). ## Data Connection Setup diff --git a/docs/en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md index 6ac7253fc46c50d3efe698d508de8669c65bf5d9..ee1e9449281560943b647b3d67ae9adb3eec3476 100644 --- a/docs/en/25-application/02-collectd.md +++ b/docs/en/25-application/02-collectd.md @@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa ### Install TDengine -Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it. +Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/). ## Data Connection Setup diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index 9e7f4f8e0d3d94f0016c0dcc6701c670fd7d32c8..aa28303f5d10e69a8446a2511d288c67ecc8ac02 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo 2. Run `sudo rm -rf /var/log/taos/` to delete your log files. 3. Run `sudo rm -rf /var/lib/taos/` to delete your data files. 4. Install TDengine 3.0. -5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support). +5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/). ### 2. How can I resolve the "Unable to establish connection" error? diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index 01d0ce936c20df1f8567869c1307606ab949cd32..4bdb2062b279ed16367d5b7f6ea5f9404dc361bd 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -22,7 +22,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.1.0 + 3.2.1 diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index 8da6f77bae432dee987d4567229c2fbebd657be8..b5cdedc34f00dc2914f62176e4bd8b1d80a01bf6 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -1,5 +1,6 @@ package com.taos.example; +import com.taosdata.jdbc.tmq.ConsumerRecord; import com.taosdata.jdbc.tmq.ConsumerRecords; import com.taosdata.jdbc.tmq.TMQConstants; import com.taosdata.jdbc.tmq.TaosConsumer; @@ -64,7 +65,8 @@ public class SubscribeDemo { consumer.subscribe(Collections.singletonList(TOPIC)); while (!shutdown.get()) { ConsumerRecords meters = consumer.poll(Duration.ofMillis(100)); - for (Meters meter : meters) { + for (ConsumerRecord recode : meters) { + Meters meter = recode.value(); System.out.println(meter); } } diff --git a/docs/examples/rust/nativeexample/examples/schemaless_insert_json.rs b/docs/examples/rust/nativeexample/examples/schemaless_insert_json.rs new file mode 100644 index 0000000000000000000000000000000000000000..d5aa45016ec3bb27a251206f7e544b1a6ffa003b --- /dev/null +++ b/docs/examples/rust/nativeexample/examples/schemaless_insert_json.rs @@ -0,0 +1,74 @@ +use taos_query::common::SchemalessPrecision; +use taos_query::common::SchemalessProtocol; +use taos_query::common::SmlDataBuilder; + +use crate::AsyncQueryable; +use crate::AsyncTBuilder; +use crate::TaosBuilder; + +async fn put_json() -> anyhow::Result<()> { + // std::env::set_var("RUST_LOG", "taos=trace"); + std::env::set_var("RUST_LOG", "taos=debug"); + pretty_env_logger::init(); + let dsn = + std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string()); + log::debug!("dsn: {:?}", &dsn); + + let client = TaosBuilder::from_dsn(dsn)?.build().await?; + + let db = "demo_schemaless_ws"; + + client.exec(format!("drop database if exists {db}")).await?; + + client + .exec(format!("create database if not exists {db}")) + .await?; + + // should specify database before insert + client.exec(format!("use {db}")).await?; + + // SchemalessProtocol::Json + let data = [ + r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"# + ] + .map(String::from) + .to_vec(); + + // demo with all fields + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Json) + .precision(SchemalessPrecision::Millisecond) + .data(data.clone()) + .ttl(1000) + .req_id(300u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default precision + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Json) + .data(data.clone()) + .ttl(1000) + .req_id(301u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default ttl + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Json) + .data(data.clone()) + .req_id(302u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default req_id + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Json) + .data(data.clone()) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + client.exec(format!("drop database if exists {db}")).await?; + + Ok(()) +} diff --git a/docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs b/docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs new file mode 100644 index 0000000000000000000000000000000000000000..3cf4d969cee9b01b49f67dab3ce32f66e0dd2100 --- /dev/null +++ b/docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs @@ -0,0 +1,78 @@ +use taos_query::common::SchemalessPrecision; +use taos_query::common::SchemalessProtocol; +use taos_query::common::SmlDataBuilder; + +use crate::AsyncQueryable; +use crate::AsyncTBuilder; +use crate::TaosBuilder; + +async fn put_line() -> anyhow::Result<()> { + // std::env::set_var("RUST_LOG", "taos=trace"); + std::env::set_var("RUST_LOG", "taos=debug"); + pretty_env_logger::init(); + + let dsn = + std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string()); + log::debug!("dsn: {:?}", &dsn); + + let client = TaosBuilder::from_dsn(dsn)?.build().await?; + + let db = "demo_schemaless_ws"; + + client.exec(format!("drop database if exists {db}")).await?; + + client + .exec(format!("create database if not exists {db}")) + .await?; + + // should specify database before insert + client.exec(format!("use {db}")).await?; + + let data = [ + "measurement,host=host1 field1=2i,field2=2.0 1577837300000", + "measurement,host=host1 field1=2i,field2=2.0 1577837400000", + "measurement,host=host1 field1=2i,field2=2.0 1577837500000", + "measurement,host=host1 field1=2i,field2=2.0 1577837600000", + ] + .map(String::from) + .to_vec(); + + // demo with all fields + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .precision(SchemalessPrecision::Millisecond) + .data(data.clone()) + .ttl(1000) + .req_id(100u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default ttl + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .precision(SchemalessPrecision::Millisecond) + .data(data.clone()) + .req_id(101u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default ttl and req_id + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .precision(SchemalessPrecision::Millisecond) + .data(data.clone()) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default precision + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(103u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + client.exec(format!("drop database if exists {db}")).await?; + + Ok(()) +} diff --git a/docs/examples/rust/nativeexample/examples/schemaless_insert_telnet.rs b/docs/examples/rust/nativeexample/examples/schemaless_insert_telnet.rs new file mode 100644 index 0000000000000000000000000000000000000000..d2bbc692615b832ec51c1ea666f30569e61a505a --- /dev/null +++ b/docs/examples/rust/nativeexample/examples/schemaless_insert_telnet.rs @@ -0,0 +1,80 @@ +use taos_query::common::SchemalessPrecision; +use taos_query::common::SchemalessProtocol; +use taos_query::common::SmlDataBuilder; + +use crate::AsyncQueryable; +use crate::AsyncTBuilder; +use crate::TaosBuilder; + +async fn put_telnet() -> anyhow::Result<()> { + // std::env::set_var("RUST_LOG", "taos=trace"); + std::env::set_var("RUST_LOG", "taos=debug"); + pretty_env_logger::init(); + let dsn = + std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string()); + log::debug!("dsn: {:?}", &dsn); + + let client = TaosBuilder::from_dsn(dsn)?.build().await?; + + let db = "demo_schemaless_ws"; + + client.exec(format!("drop database if exists {db}")).await?; + + client + .exec(format!("create database if not exists {db}")) + .await?; + + // should specify database before insert + client.exec(format!("use {db}")).await?; + + let data = [ + "meters.current 1648432611249 10.3 location=California.SanFrancisco group=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco group=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles group=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles group=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco group=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco group=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles group=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles group=3", + ] + .map(String::from) + .to_vec(); + + // demo with all fields + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Telnet) + .precision(SchemalessPrecision::Millisecond) + .data(data.clone()) + .ttl(1000) + .req_id(200u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default precision + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Telnet) + .data(data.clone()) + .ttl(1000) + .req_id(201u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default ttl + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Telnet) + .data(data.clone()) + .req_id(202u64) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + // demo with default req_id + let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Telnet) + .data(data.clone()) + .build()?; + assert_eq!(client.put(&sml_data).await?, ()); + + client.exec(format!("drop database if exists {db}")).await?; + + Ok(()) +} diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index b327d33350e2396d47c7e7edd90a65ad1ccb35a9..1cd0076ba592e5055b7018f0149b5f6f53f18626 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -207,6 +207,8 @@ Active: inactive (dead) - 查看服务状态:`sudo launchctl list | grep taosd` +- 查看服务详细信息:`launchctl print system/com.tdengine.taosd` + :::info - `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。 diff --git a/docs/zh/07-develop/03-insert-data/_rust_schemaless.mdx b/docs/zh/07-develop/03-insert-data/_rust_schemaless.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a26613bd15e010b8982c9d93ee1894df7e8d1407 --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/_rust_schemaless.mdx @@ -0,0 +1,3 @@ +```rust +{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}} +``` diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index bf89b1df8363eb054af7309d4548f726a2295f9a..0bc113e58bb630e280cc3e275ac535d108c4b4ad 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -36,6 +36,104 @@ REST 连接支持所有能运行 Java 的平台。 请参考[版本支持列表](../#版本支持) +## 最近更新记录 + +| taos-jdbcdriver 版本 | 主要变化 | +| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | +| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | +| 3.2.0 | 存在连接问题,不推荐使用 | +| 3.1.0 | WebSocket 连接支持订阅功能 | +| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | +| 3.0.0 | 支持 TDengine 3.0 | +| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | +| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | +| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | +| 2.0.38 | JDBC REST 连接增加批量拉取功能 | +| 2.0.37 | 增加对 json tag 支持 | +| 2.0.36 | 增加对 schemaless 写入支持 | + +**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。 + +## 处理异常 + +在报错后,通过 SQLException 可以获取到错误的信息和错误码: + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +JDBC 连接器可能报错的错误码包括 4 种: + +- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间) +- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间) +- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间) +- TDengine 其他功能模块的报错。 + +具体的错误码请参考: + +| Error Code | Description | Suggested Actions | +| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 | +| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 | +| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 | +| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 | +| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 | +| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 | +| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 | +| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 | +| 0x2309 | invalid sql for executeQuery: (?) | - | +| 0x230a | Database not specified or available | - | +| 0x230b | invalid sql for executeUpdate: (?) | - | +| 0x230c | invalid sql for execute: (?) | - | +| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 | +| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 | +| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 | +| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 | +| 0x2311 | can't register JDBC-RESTful driver | - | +| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 | +| 0x2313 | invalid sql | - | +| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 | +| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 | +| 0x2316 | unknown timestamp precision | - | +| 0x2317 | | REST 连接中使用了错误的请求类型。 | +| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 | +| 0x2319 | user is required | 创建连接时缺少用户名信息 | +| 0x231a | password is required | 创建连接时缺少密码信息 | +| 0x231b | invalid json format | - | +| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 | +| 0x2350 | unknown error | 未知异常,请在 github 返回给开发人员。 | +| 0x2351 | failed to create subscription | - | +| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 | +| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 | +| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 | +| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 | +| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 | +| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 | +| 0x2358 | fetch to the end of resultSet | - | +| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 | +| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 | +| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 | +| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 | +| 0x2374 | consumer config error | - | +| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 | +| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 | +| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 | +| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 | +| - | can't create connection with server within: | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 | +| - | failed to complete the task within the specified time : | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 | + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) + + ## TDengine DataType 和 Java DataType TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: @@ -82,7 +180,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖: com.taosdata.jdbc taos-jdbcdriver - 3.1.0 + 3.2.1 ``` @@ -97,7 +195,7 @@ cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` -编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 +编译后,在 target 目录下会产生 taos-jdbcdriver-3.2.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 @@ -336,35 +434,6 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 -### 处理异常 - -在报错后,通过 SQLException 可以获取到错误的信息和错误码: - -```java -try (Statement statement = connection.createStatement()) { - // executeQuery - ResultSet resultSet = statement.executeQuery(sql); - // print result - printResult(resultSet); -} catch (SQLException e) { - System.out.println("ERROR Message: " + e.getMessage()); - System.out.println("ERROR Code: " + e.getErrorCode()); - e.printStackTrace(); -} -``` - -JDBC 连接器可能报错的错误码包括 4 种: - -- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间) -- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间) -- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间) -- TDengine 其他功能模块的报错。 - -具体的错误码请参考: - -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - - ### 通过参数绑定写入数据 TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 @@ -372,9 +441,12 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据 **注意**: - JDBC REST 连接目前不支持参数绑定 -- 以下示例代码基于 taos-jdbcdriver-3.1.0 +- 以下示例代码基于 taos-jdbcdriver-3.2.1 - binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法 -- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽 +- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。 + + + ```java public class ParameterBindingDemo { @@ -602,21 +674,7 @@ public class ParameterBindingDemo { } ``` -用于设定 TAGS 取值的方法总共有: - -```java -public void setTagNull(int index, int type) -public void setTagBoolean(int index, boolean value) -public void setTagInt(int index, int value) -public void setTagByte(int index, byte value) -public void setTagShort(int index, short value) -public void setTagLong(int index, long value) -public void setTagTimestamp(int index, long value) -public void setTagFloat(int index, float value) -public void setTagDouble(int index, double value) -public void setTagString(int index, String value) -public void setTagNString(int index, String value) -``` +**注**:setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽 用于设定 VALUES 数据列的取值的方法总共有: @@ -633,17 +691,203 @@ public void setString(int columnIndex, ArrayList list, int size) throws public void setNString(int columnIndex, ArrayList list, int size) throws SQLException ``` + + + +```java +public class ParameterBindingDemo { + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 30; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + conn.close(); + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_ws_parabind"); + stmt.execute("create database if not exists test_ws_parabind"); + stmt.execute("use test_ws_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(4, random.nextLong()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE)); + pstmt.setLong(5, random.nextLong()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(1, random.nextFloat()); + pstmt.setTagDouble(2, random.nextDouble()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setFloat(2, random.nextFloat()); + pstmt.setDouble(3, random.nextDouble()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(1, random.nextBoolean()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setBoolean(2, random.nextBoolean()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(1, new String("abc")); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setString(2, "abc"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(1, "California.SanFrancisco"); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(0, new Timestamp(current + j)); + pstmt.setNString(1, "California.SanFrancisco"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } +} +``` + + + + +用于设定 TAGS 取值的方法总共有: + +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + ### 无模式写入 TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。 -**注意**: - -- JDBC REST 连接目前不支持无模式写入 -- 以下示例代码基于 taos-jdbcdriver-3.1.0 + + ```java -public class SchemalessInsertTest { +public class SchemalessJniTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; @@ -671,6 +915,41 @@ public class SchemalessInsertTest { } ``` + + + +```java +public class SchemalessWsTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; + private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true"; + Connection connection = DriverManager.getConnection(url); + init(connection); + + SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless"); + writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); + writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); + System.exit(0); + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.executeUpdate("drop database if exists test_ws_schemaless"); + stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500"); + stmt.executeUpdate("use test_ws_schemaless"); + } + } +} +``` + + + + ### 数据订阅 TDengine Java 连接器支持订阅功能,应用 API 如下: @@ -714,8 +993,9 @@ TaosConsumer consumer = new TaosConsumer<>(config); ```java while(true) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } ``` @@ -766,8 +1046,9 @@ public abstract class ConsumerLoop { while (!shutdown.get()) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } consumer.unsubscribe(); @@ -844,8 +1125,9 @@ public abstract class ConsumerLoop { while (!shutdown.get()) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ResultBean record : records) { - process(record); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + process(bean); } } consumer.unsubscribe(); @@ -971,20 +1253,6 @@ public static void main(String[] args) throws Exception { 请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) -## 最近更新记录 - -| taos-jdbcdriver 版本 | 主要变化 | -| :------------------: | :--------------------------------------------------------------------------------------------: | -| 3.1.0 | WebSocket 连接支持订阅功能 | -| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | -| 3.0.0 | 支持 TDengine 3.0 | -| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | -| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | -| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | -| 2.0.38 | JDBC REST 连接增加批量拉取功能 | -| 2.0.37 | 增加对 json tag 支持 | -| 2.0.36 | 增加对 schemaless 写入支持 | - ## 常见问题 1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升? diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index eeb93a6558b755a6c179ecea86aae8b594fd16fe..d4ca25be8109a2aa28d0805edcd0add5d052138b 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem'; import Preparation from "./_preparation.mdx" import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx" import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx" +import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx" import RustQuery from "../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) @@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { +#### Schemaless 写入 + + + ### 查询数据 diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index ad4b59714cbcd83f80a4ad260004310e0740b94f..197a5ecaf9b1e13af8d19dfde550f04c83183711 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -185,6 +185,7 @@ typedef struct SMergeLogicNode { int32_t numOfChannels; int32_t srcGroupId; bool groupSort; + bool ignoreGroupId; } SMergeLogicNode; typedef enum EWindowType { @@ -444,6 +445,7 @@ typedef struct SMergePhysiNode { int32_t numOfChannels; int32_t srcGroupId; bool groupSort; + bool ignoreGroupId; } SMergePhysiNode; typedef struct SWinodwPhysiNode { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 55465f227e1df479ba8a010fb665469fc42e2a2a..243a84ca93e21717865cdc9eb98ef75221651295 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -1342,6 +1342,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { goto _return; } + pRequest->syncQuery = true; + STscObj *pTscObj = pRequest->pTscObj; code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta); if (code) { @@ -1368,7 +1370,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { tsem_wait(&pParam->sem); _return: - taosArrayDestroy(catalogReq.pTableMeta); + taosArrayDestroyEx(catalogReq.pTableMeta, destoryTablesReq); destroyRequest(pRequest); return code; } diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index cf1c6cc434739190734af155b95a5d78cab366e4..d12e0b5c197584e4800fe577bc40ebd44384865d 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1219,6 +1219,7 @@ static int32_t smlPushCols(SArray *colsArray, SArray *cols) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); if (terrno == TSDB_CODE_DUP_KEY) { + taosHashCleanup(kvHash); return terrno; } } @@ -1292,12 +1293,12 @@ static int32_t smlParseLineBottom(SSmlHandle *info) { uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum); SSmlSTableMeta *meta = smlBuildSTableMeta(info->dataFormat); + taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES); smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags); if (terrno == TSDB_CODE_DUP_KEY) { return terrno; } smlInsertMeta(meta->colHash, meta->cols, elements->colArray); - taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES); } } uDebug("SML:0x%" PRIx64 " smlParseLineBottom end, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum); diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 796c5cf8ff5990b438a48dc6fa54037fe44807b4..62f837f3b6f4bad95fd5a035e6094bff076ec686 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -755,7 +755,7 @@ SColVal *tRowIterNext(SRowIter *pIter) { } if (pIter->pRow->flag == HAS_NULL) { - pIter->cv = COL_VAL_NULL(pTColumn->type, pTColumn->colId); + pIter->cv = COL_VAL_NULL(pTColumn->colId, pTColumn->type); goto _exit; } @@ -2439,7 +2439,7 @@ _exit: int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes, int32_t nRows, char *lengthOrbitmap, char *data) { int32_t code = 0; - if(data == NULL){ + if (data == NULL) { for (int32_t i = 0; i < nRows; ++i) { code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NONE](pColData, NULL, 0); } @@ -2453,8 +2453,9 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0); if (code) goto _exit; } else { - if(ASSERT(varDataTLen(data + offset) <= bytes)){ - uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), bytes); + if (ASSERT(varDataTLen(data + offset) <= bytes)) { + uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), + bytes); code = TSDB_CODE_INVALID_PARA; goto _exit; } diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index c713d1e247a8cd358fbe0c8a4a529f884e8810b6..8c0e5c35cd46e83a3aa3ba124fbf4adafbe607f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -84,6 +84,7 @@ target_include_directories( PUBLIC "inc" PUBLIC "src/inc" PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" + PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" ) target_link_libraries( vnode @@ -100,6 +101,7 @@ target_link_libraries( # PUBLIC bdb # PUBLIC scalar + PUBLIC rocksdb PUBLIC transport PUBLIC stream PUBLIC index diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 0a54da9b5318dcf94173e29d4167a0a0b3dc0f46..34536d453c9ed527b84edd36577c61f26c67d21f 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -182,7 +182,7 @@ int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t n int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables, SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly); -void tsdbReaderSetId(STsdbReader* pReader, const char* idstr); +void tsdbReaderSetId(STsdbReader *pReader, const char *idstr); void tsdbReaderClose(STsdbReader *pReader); int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext); int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave); @@ -196,8 +196,9 @@ void *tsdbGetIvtIdx(SMeta *pMeta); uint64_t getReaderMaxVersion(STsdbReader *pReader); int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols, - uint64_t suid, void **pReader, const char *idstr); -int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids); + SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr); +int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds, + SArray *pTableUids); void *tsdbCacherowsReaderClose(void *pReader); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 2a85b191a42839a580da6ff8b65ee4e9c65430b5..95bce321964b432182aaeef556bec8e52509e31e 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -343,6 +343,16 @@ struct STsdbFS { SArray *aDFileSet; // SArray }; +typedef struct { + rocksdb_t *db; + rocksdb_options_t *options; + rocksdb_flushoptions_t *flushoptions; + rocksdb_writeoptions_t *writeoptions; + rocksdb_readoptions_t *readoptions; + rocksdb_writebatch_t *writebatch; + TdThreadMutex rMutex; +} SRocksCache; + struct STsdb { char *path; SVnode *pVnode; @@ -355,6 +365,7 @@ struct STsdb { TdThreadMutex lruMutex; SLRUCache *biCache; TdThreadMutex biMutex; + SRocksCache rCache; }; struct TSDBKEY { @@ -777,6 +788,8 @@ typedef struct SCacheRowsReader { uint64_t suid; char **transferBuf; // todo remove it soon int32_t numOfCols; + SArray *pCidList; + int32_t *pSlotIds; int32_t type; int32_t tableIndex; // currently returned result tables STableKeyInfo *pTableList; // table id list @@ -796,6 +809,10 @@ typedef struct { int32_t tsdbOpenCache(STsdb *pTsdb); void tsdbCloseCache(STsdb *pTsdb); +int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row); +int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype); +int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey); + int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb); int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup); int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 70b997aba2cadb4e9088c11cb5115a4079382ea4..94e5f253bfb73323f14c649f5356b7ed6df70786 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -19,6 +19,7 @@ #include "executor.h" #include "filter.h" #include "qworker.h" +#include "rocksdb/c.h" #include "sync.h" #include "tRealloc.h" #include "tchecksum.h" @@ -177,6 +178,7 @@ int tsdbClose(STsdb** pTsdb); int32_t tsdbBegin(STsdb* pTsdb); int32_t tsdbPrepareCommit(STsdb* pTsdb); int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo); +int32_t tsdbCacheCommit(STsdb* pTsdb); int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo); int32_t tsdbFinishCommit(STsdb* pTsdb); int32_t tsdbRollbackCommit(STsdb* pTsdb); @@ -193,9 +195,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode); void tqClose(STQ*); int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver); int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp, - int32_t type); + int32_t type); int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer); -int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed. +int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed. int tqCommit(STQ*); int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 3c7edd931baf1fd2f8796368e4ac76bbb398e636..7d22e59b4a845444852391594803cfdc12537af0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - #include "tsdb.h" static int32_t tsdbOpenBICache(STsdb *pTsdb) { @@ -47,6 +46,512 @@ static void tsdbCloseBICache(STsdb *pTsdb) { } } +#define ROCKS_KEY_LEN 64 + +static void tsdbGetRocksPath(STsdb *pTsdb, char *path) { + SVnode *pVnode = pTsdb->pVnode; + if (pVnode->pTfs) { + if (path) { + snprintf(path, TSDB_FILENAME_LEN, "%s%s%s%scache.rdb", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), TD_DIRSEP, + pTsdb->path, TD_DIRSEP); + } + } else { + if (path) { + snprintf(path, TSDB_FILENAME_LEN, "%s%scache.rdb", pTsdb->path, TD_DIRSEP); + } + } +} + +static int32_t tsdbOpenRocksCache(STsdb *pTsdb) { + int32_t code = 0; + + rocksdb_options_t *options = rocksdb_options_create(); + if (NULL == options) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + rocksdb_options_set_create_if_missing(options, 1); + // rocksdb_options_set_inplace_update_support(options, 1); + // rocksdb_options_set_allow_concurrent_memtable_write(options, 0); + + rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create(); + if (NULL == writeoptions) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err2; + } + // rocksdb_writeoptions_disable_WAL(writeoptions, 1); + + rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create(); + if (NULL == readoptions) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err2; + } + + char *err = NULL; + char cachePath[TSDB_FILENAME_LEN] = {0}; + tsdbGetRocksPath(pTsdb, cachePath); + + rocksdb_t *db = rocksdb_open(options, cachePath, &err); + if (NULL == db) { + code = -1; + goto _err3; + } + + rocksdb_flushoptions_t *flushoptions = rocksdb_flushoptions_create(); + if (NULL == flushoptions) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err4; + } + + rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create(); + + pTsdb->rCache.writebatch = writebatch; + pTsdb->rCache.options = options; + pTsdb->rCache.writeoptions = writeoptions; + pTsdb->rCache.readoptions = readoptions; + pTsdb->rCache.flushoptions = flushoptions; + pTsdb->rCache.db = db; + + taosThreadMutexInit(&pTsdb->rCache.rMutex, NULL); + + return code; + +_err4: + rocksdb_readoptions_destroy(readoptions); +_err3: + rocksdb_writeoptions_destroy(writeoptions); +_err2: + rocksdb_options_destroy(options); +_err: + return code; +} + +static void tsdbCloseRocksCache(STsdb *pTsdb) { + rocksdb_close(pTsdb->rCache.db); + rocksdb_flushoptions_destroy(pTsdb->rCache.flushoptions); + rocksdb_writebatch_destroy(pTsdb->rCache.writebatch); + rocksdb_readoptions_destroy(pTsdb->rCache.readoptions); + rocksdb_writeoptions_destroy(pTsdb->rCache.writeoptions); + rocksdb_options_destroy(pTsdb->rCache.options); + taosThreadMutexDestroy(&pTsdb->rCache.rMutex); +} + +int32_t tsdbCacheCommit(STsdb *pTsdb) { + int32_t code = 0; + char *err = NULL; + + rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + code = -1; + } + + return code; +} + +SLastCol *tsdbCacheDeserialize(char const *value) { + if (!value) { + return NULL; + } + + SLastCol *pLastCol = (SLastCol *)value; + SColVal *pColVal = &pLastCol->colVal; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + if (pColVal->value.nData > 0) { + pColVal->value.pData = (char *)value + sizeof(*pLastCol); + } else { + pColVal->value.pData = NULL; + } + } + + return pLastCol; +} + +void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) { + SColVal *pColVal = &pLastCol->colVal; + size_t length = sizeof(*pLastCol); + if (IS_VAR_DATA_TYPE(pColVal->type)) { + length += pColVal->value.nData; + } + *value = taosMemoryMalloc(length); + + *(SLastCol *)(*value) = *pLastCol; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + SColVal *pDColVal = &((SLastCol *)(*value))->colVal; + pDColVal->value.pData = *value + sizeof(*pLastCol); + if (pColVal->value.nData > 0) { + memcpy(pDColVal->value.pData, pVal, pColVal->value.nData); + } else { + pDColVal->value.pData = NULL; + } + } + *size = length; +} + +static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, char const *lstring) { + SLastCol *pLastCol = NULL; + + char *err = NULL; + size_t vlen = 0; + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring); + char *value = NULL; + value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, key, klen, &vlen, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + } + + pLastCol = tsdbCacheDeserialize(value); + + return pLastCol; +} + +int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow) { + int32_t code = 0; + + // 1, fetch schema + STSchema *pTSchema = NULL; + int32_t sver = TSDBROW_SVERSION(pRow); + code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return -1; + } + + // 2, iterate col values into array + SArray *aColVal = taosArrayInit(32, sizeof(SColVal)); + + STSDBRowIter iter = {0}; + tsdbRowIterOpen(&iter, pRow, pTSchema); + + for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal; pColVal = tsdbRowIterNext(&iter)) { + /* + if (IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + + pColVal->value.pData = NULL; + code = tRealloc(&pColVal->value.pData, pColVal->value.nData); + if (code) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + if (pColVal->value.nData) { + memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } + } + */ + taosArrayPush(aColVal, pColVal); + } + + tsdbRowClose(&iter); + + // 3, build keys & multi get from rocks + int num_keys = TARRAY_SIZE(aColVal); + char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); + for (int i = 0; i < num_keys; ++i) { + SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i); + int16_t cid = pColVal->cid; + + char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN); + int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid); + if (last_key_len >= ROCKS_KEY_LEN) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid); + if (lr_key_len >= ROCKS_KEY_LEN) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + keys_list[i] = keys; + keys_list[num_keys + i] = keys + ROCKS_KEY_LEN; + keys_list_sizes[i] = last_key_len; + keys_list_sizes[num_keys + i] = lr_key_len; + } + char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); + char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + taosThreadMutexLock(&pTsdb->rCache.rMutex); + rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list, + keys_list_sizes, values_list, values_list_sizes, errs); + for (int i = 0; i < num_keys; ++i) { + taosMemoryFree(keys_list[i]); + } + for (int i = 0; i < num_keys * 2; ++i) { + rocksdb_free(errs[i]); + } + taosMemoryFree(keys_list); + taosMemoryFree(keys_list_sizes); + taosMemoryFree(errs); + + TSKEY keyTs = TSDBROW_TS(pRow); + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; + for (int i = 0; i < num_keys; ++i) { + SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i); + if (COL_VAL_IS_VALUE(pColVal)) { + SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]); + + if (NULL == pLastCol || pLastCol->ts <= keyTs) { + char *value = NULL; + size_t vlen = 0; + tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pColVal->cid); + rocksdb_writebatch_put(wb, key, klen, value, vlen); + taosMemoryFree(value); + } + } + + if (!COL_VAL_IS_NONE(pColVal)) { + SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]); + + if (NULL == pLastCol || pLastCol->ts <= keyTs) { + char *value = NULL; + size_t vlen = 0; + tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pColVal->cid); + rocksdb_writebatch_put(wb, key, klen, value, vlen); + taosMemoryFree(value); + } + } + + rocksdb_free(values_list[i]); + rocksdb_free(values_list[i + num_keys]); + } + taosMemoryFree(values_list); + taosMemoryFree(values_list_sizes); + + char *err = NULL; + rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + } + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + rocksdb_writebatch_clear(wb); + +_exit: + taosArrayDestroy(aColVal); + taosMemoryFree(pTSchema); + return code; +} + +static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols, + int nCols, int16_t *slotIds); + +static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols, + int nCols, int16_t *slotIds); + +int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype) { + static char const *alstring[2] = {"last_row", "last"}; + char const *lstring = alstring[ltype]; + int32_t code = 0; + + SArray *pCidList = pr->pCidList; + int num_keys = TARRAY_SIZE(pCidList); + char **keys_list = taosMemoryCalloc(num_keys, sizeof(char *)); + size_t *keys_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t)); + for (int i = 0; i < num_keys; ++i) { + int16_t cid = *(int16_t *)taosArrayGet(pCidList, i); + + char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN); + int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring); + if (last_key_len >= ROCKS_KEY_LEN) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + + keys_list[i] = keys; + keys_list_sizes[i] = last_key_len; + } + char **values_list = taosMemoryCalloc(num_keys, sizeof(char *)); + size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t)); + char **errs = taosMemoryCalloc(num_keys, sizeof(char *)); + rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list, + keys_list_sizes, values_list, values_list_sizes, errs); + for (int i = 0; i < num_keys; ++i) { + taosMemoryFree(keys_list[i]); + rocksdb_free(errs[i]); + } + taosMemoryFree(keys_list); + taosMemoryFree(keys_list_sizes); + taosMemoryFree(errs); + + for (int i = 0; i < num_keys; ++i) { + bool freeCol = true; + SArray *pTmpColArray = NULL; + SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]); + int16_t cid = *(int16_t *)taosArrayGet(pCidList, i); + SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[pr->pSlotIds[i]].type)}; + if (pLastCol) { + SColVal *pColVal = &pLastCol->colVal; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData); + if (pColVal->value.nData) { + memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } + } + } else { + taosThreadMutexLock(&pTsdb->rCache.rMutex); + + pLastCol = tsdbCacheLookup(pTsdb, uid, cid, lstring); + if (!pLastCol) { + // recalc: load from tsdb + int16_t aCols[1] = {cid}; + int16_t slotIds[1] = {pr->pSlotIds[i]}; + pTmpColArray = NULL; + + if (ltype) { + mergeLastCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds); + } else { + mergeLastRowCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds); + } + + if (pTmpColArray && TARRAY_SIZE(pTmpColArray) >= 1) { + pLastCol = taosArrayGet(pTmpColArray, 0); + freeCol = false; + } + + // still null, then make up a none col value + if (!pLastCol) { + pLastCol = &noneCol; + freeCol = false; + } + + // store result back to rocks cache + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; + char *value = NULL; + size_t vlen = 0; + tsdbCacheSerialize(pLastCol, &value, &vlen); + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, pLastCol->colVal.cid, lstring); + rocksdb_writebatch_put(wb, key, klen, value, vlen); + + taosMemoryFree(value); + + char *err = NULL; + rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + } + } else { + SColVal *pColVal = &pLastCol->colVal; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData); + if (pColVal->value.nData) { + memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } + } + } + + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + } + + taosArrayPush(pLastArray, pLastCol); + + taosArrayDestroy(pTmpColArray); + if (freeCol) { + taosMemoryFree(pLastCol); + } + } + taosMemoryFree(values_list); + taosMemoryFree(values_list_sizes); + + return code; +} + +int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) { + int32_t code = 0; + // 1, fetch schema + STSchema *pTSchema = NULL; + int32_t sver = -1; + code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return -1; + } + + // 3, build keys & multi get from rocks + int num_keys = pTSchema->numOfCols; + char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); + for (int i = 0; i < num_keys; ++i) { + int16_t cid = pTSchema->columns[i].colId; + + char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN); + int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid); + if (last_key_len >= ROCKS_KEY_LEN) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid); + if (lr_key_len >= ROCKS_KEY_LEN) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + keys_list[i] = keys; + keys_list[num_keys + i] = keys + ROCKS_KEY_LEN; + keys_list_sizes[i] = last_key_len; + keys_list_sizes[num_keys + i] = lr_key_len; + } + char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); + char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *)); + taosThreadMutexLock(&pTsdb->rCache.rMutex); + rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list, + keys_list_sizes, values_list, values_list_sizes, errs); + for (int i = 0; i < num_keys; ++i) { + taosMemoryFree(keys_list[i]); + } + for (int i = 0; i < num_keys * 2; ++i) { + rocksdb_free(errs[i]); + } + taosMemoryFree(keys_list); + taosMemoryFree(keys_list_sizes); + taosMemoryFree(errs); + + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; + for (int i = 0; i < num_keys; ++i) { + SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]); + if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pLastCol->colVal.cid); + rocksdb_writebatch_delete(wb, key, klen); + } + + pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]); + if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { + char key[ROCKS_KEY_LEN]; + size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pLastCol->colVal.cid); + rocksdb_writebatch_delete(wb, key, klen); + } + + rocksdb_free(values_list[i]); + rocksdb_free(values_list[i + num_keys]); + } + taosMemoryFree(values_list); + taosMemoryFree(values_list_sizes); + + char *err = NULL; + rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + } + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + rocksdb_writebatch_clear(wb); + +_exit: + taosMemoryFree(pTSchema); + + return code; +} + int32_t tsdbOpenCache(STsdb *pTsdb) { int32_t code = 0; SLRUCache *pCache = NULL; @@ -64,6 +569,12 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { goto _err; } + code = tsdbOpenRocksCache(pTsdb); + if (code != TSDB_CODE_SUCCESS) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + taosLRUCacheSetStrictCapacity(pCache, false); taosThreadMutexInit(&pTsdb->lruMutex, NULL); @@ -84,6 +595,7 @@ void tsdbCloseCache(STsdb *pTsdb) { } tsdbCloseBICache(pTsdb); + tsdbCloseRocksCache(pTsdb); } static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) { @@ -170,7 +682,7 @@ int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) { return code; } - +/* int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) { int32_t code = 0; char key[32] = {0}; @@ -225,7 +737,7 @@ int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) { return code; } - +*/ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup) { int32_t code = 0; STSRow *cacheRow = NULL; @@ -879,7 +1391,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie *pIgnoreEarlierTs = false; tBlockDataReset(state->pBlockData); TABLEID tid = {.suid = state->suid, .uid = state->uid}; - code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, aCols, nCols); + int nTmpCols = nCols; + if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID && nCols == 1) { + nTmpCols = 0; + skipBlock = false; + } + code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, aCols, nTmpCols); if (code) goto _err; code = tsdbReadDataBlock(*state->pDataFReader, &block, state->pBlockData); @@ -1423,6 +1940,21 @@ static int32_t initLastColArray(STSchema *pTSchema, SArray **ppColArray) { return TSDB_CODE_SUCCESS; } +static int32_t initLastColArrayPartial(STSchema *pTSchema, SArray **ppColArray, int16_t *slotIds, int nCols) { + SArray *pColArray = taosArrayInit(nCols, sizeof(SLastCol)); + if (NULL == pColArray) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < nCols; ++i) { + int16_t slotId = slotIds[i]; + SLastCol col = {.ts = 0, .colVal = COL_VAL_NULL(pTSchema->columns[slotId].colId, pTSchema->columns[slotId].type)}; + taosArrayPush(pColArray, &col); + } + *ppColArray = pColArray; + return TSDB_CODE_SUCCESS; +} + static int32_t cloneTSchema(STSchema *pSrc, STSchema **ppDst) { int32_t len = sizeof(STSchema) + sizeof(STColumn) * pSrc->numOfCols; *ppDst = taosMemoryMalloc(len); @@ -1761,6 +2293,342 @@ _err: return code; } +static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols, + int nCols, int16_t *slotIds) { + STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); + int16_t nLastCol = nCols; + int16_t noneCol = 0; + bool setNoneCol = false; + bool hasRow = false; + bool ignoreEarlierTs = false; + SArray *pColArray = NULL; + SColVal *pColVal = &(SColVal){0}; + + int32_t code = initLastColArrayPartial(pTSchema, &pColArray, slotIds, nCols); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + SArray *aColArray = taosArrayInit(nCols, sizeof(int16_t)); + if (NULL == aColArray) { + taosArrayDestroy(pColArray); + + return TSDB_CODE_OUT_OF_MEMORY; + } + + for (int i = 0; i < nCols; ++i) { + taosArrayPush(aColArray, &aCols[i]); + } + + TSKEY lastRowTs = TSKEY_MAX; + + CacheNextRowIter iter = {0}; + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader, + &pr->pDataFReaderLast, pr->lastTs); + + do { + TSDBROW *pRow = NULL; + nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); + + if (!pRow) { + break; + } + + hasRow = true; + + int32_t sversion = TSDBROW_SVERSION(pRow); + if (sversion != -1) { + code = updateTSchema(sversion, pr, uid); + if (TSDB_CODE_SUCCESS != code) { + goto _err; + } + pTSchema = pr->pCurrSchema; + } + // int16_t nCol = pTSchema->numOfCols; + + TSKEY rowTs = TSDBROW_TS(pRow); + + if (lastRowTs == TSKEY_MAX) { + lastRowTs = rowTs; + + for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { + if (iCol >= nLastCol) { + break; + } + SLastCol *pCol = taosArrayGet(pColArray, iCol); + if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { + continue; + } + if (slotIds[iCol] == 0) { + STColumn *pTColumn = &pTSchema->columns[0]; + + *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs}); + taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal}); + continue; + } + tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); + + *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; + if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); + if (pCol->colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + + if (!COL_VAL_IS_VALUE(pColVal)) { + if (!setNoneCol) { + noneCol = iCol; + setNoneCol = true; + } + } else { + int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ); + if (aColIndex >= 0) { + taosArrayRemove(aColArray, aColIndex); + } + } + } + if (!setNoneCol) { + // done, goto return pColArray + break; + } else { + continue; + } + } + + // merge into pColArray + setNoneCol = false; + for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { + if (iCol >= nLastCol) { + break; + } + // high version's column value + SLastCol *lastColVal = (SLastCol *)taosArrayGet(pColArray, iCol); + if (lastColVal->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { + continue; + } + SColVal *tColVal = &lastColVal->colVal; + + tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); + if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) { + SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal}; + if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol); + taosMemoryFree(pLastCol->colVal.value.pData); + + lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData); + if (lastCol.colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + + taosArraySet(pColArray, iCol, &lastCol); + int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ); + taosArrayRemove(aColArray, aColIndex); + } else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) { + noneCol = iCol; + setNoneCol = true; + } + } + } while (setNoneCol); + + if (!hasRow) { + if (ignoreEarlierTs) { + taosArrayDestroy(pColArray); + pColArray = NULL; + } else { + taosArrayClear(pColArray); + } + } + *ppLastArray = pColArray; + + nextRowIterClose(&iter); + taosArrayDestroy(aColArray); + + return code; + +_err: + nextRowIterClose(&iter); + // taosMemoryFreeClear(pTSchema); + *ppLastArray = NULL; + taosArrayDestroy(pColArray); + taosArrayDestroy(aColArray); + return code; +} + +static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols, + int nCols, int16_t *slotIds) { + STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1); + int16_t nLastCol = nCols; + int16_t noneCol = 0; + bool setNoneCol = false; + bool hasRow = false; + bool ignoreEarlierTs = false; + SArray *pColArray = NULL; + SColVal *pColVal = &(SColVal){0}; + + int32_t code = initLastColArrayPartial(pTSchema, &pColArray, slotIds, nCols); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + SArray *aColArray = taosArrayInit(nCols, sizeof(int16_t)); + if (NULL == aColArray) { + taosArrayDestroy(pColArray); + + return TSDB_CODE_OUT_OF_MEMORY; + } + + for (int i = 0; i < nCols; ++i) { + taosArrayPush(aColArray, &aCols[i]); + } + + TSKEY lastRowTs = TSKEY_MAX; + + CacheNextRowIter iter = {0}; + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader, + &pr->pDataFReaderLast, pr->lastTs); + + do { + TSDBROW *pRow = NULL; + nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); + + if (!pRow) { + break; + } + + hasRow = true; + + int32_t sversion = TSDBROW_SVERSION(pRow); + if (sversion != -1) { + code = updateTSchema(sversion, pr, uid); + if (TSDB_CODE_SUCCESS != code) { + goto _err; + } + pTSchema = pr->pCurrSchema; + } + // int16_t nCol = pTSchema->numOfCols; + + TSKEY rowTs = TSDBROW_TS(pRow); + + if (lastRowTs == TSKEY_MAX) { + lastRowTs = rowTs; + + for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { + if (iCol >= nLastCol) { + break; + } + SLastCol *pCol = taosArrayGet(pColArray, iCol); + if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { + continue; + } + if (slotIds[iCol] == 0) { + STColumn *pTColumn = &pTSchema->columns[0]; + + *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs}); + taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal}); + continue; + } + tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); + + *pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal}; + if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData); + if (pCol->colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + + if (COL_VAL_IS_NONE(pColVal)) { + if (!setNoneCol) { + noneCol = iCol; + setNoneCol = true; + } + } else { + int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ); + if (aColIndex >= 0) { + taosArrayRemove(aColArray, aColIndex); + } + } + } + if (!setNoneCol) { + // done, goto return pColArray + break; + } else { + continue; + } + } + + // merge into pColArray + setNoneCol = false; + for (int16_t iCol = noneCol; iCol < nCols; ++iCol) { + if (iCol >= nLastCol) { + break; + } + // high version's column value + SLastCol *lastColVal = (SLastCol *)taosArrayGet(pColArray, iCol); + if (lastColVal->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) { + continue; + } + SColVal *tColVal = &lastColVal->colVal; + + tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal); + if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) { + SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal}; + if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) { + SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol); + taosMemoryFree(pLastCol->colVal.value.pData); + + lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData); + if (lastCol.colVal.value.pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + + taosArraySet(pColArray, iCol, &lastCol); + int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ); + taosArrayRemove(aColArray, aColIndex); + } else if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal) && !setNoneCol) { + noneCol = iCol; + setNoneCol = true; + } + } + } while (setNoneCol); + + if (!hasRow) { + if (ignoreEarlierTs) { + taosArrayDestroy(pColArray); + pColArray = NULL; + } else { + taosArrayClear(pColArray); + } + } + *ppLastArray = pColArray; + + nextRowIterClose(&iter); + taosArrayDestroy(aColArray); + + return code; + +_err: + nextRowIterClose(&iter); + + *ppLastArray = NULL; + taosArrayDestroy(pColArray); + taosArrayDestroy(aColArray); + return code; +} + int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) { int32_t code = 0; char key[32] = {0}; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index d6e819d84ad3d8a6131dcf15f09afdc8ff4b7f64..0d47940582a9fed54eee0104746c52965f44c0b8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -21,47 +21,30 @@ #define HASTYPE(_type, _t) (((_type) & (_t)) == (_t)) static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds, - void** pRes, const char* idStr) { + const int32_t* dstSlotIds, void** pRes, const char* idStr) { int32_t numOfRows = pBlock->info.rows; + bool allNullRow = true; if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) { - bool allNullRow = true; - for (int32_t i = 0; i < pReader->numOfCols; ++i) { - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]); SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[i]); + int32_t slotId = slotIds[i]; + SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i); - if (slotIds[i] == -1) { // the primary timestamp - SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0); - p->ts = pColVal->ts; - p->bytes = TSDB_KEYSIZE; - *(int64_t*)p->buf = pColVal->ts; - allNullRow = false; - } else { - int32_t slotId = slotIds[i]; - // add check for null value, caused by the modification of table schema (new column added). - if (slotId >= taosArrayGetSize(pRow)) { - p->ts = 0; - p->isNull = true; - colDataSetNULL(pColInfoData, numOfRows); - continue; - } - - SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId); + p->ts = pColVal->ts; + p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); + allNullRow = p->isNull & allNullRow; - p->ts = pColVal->ts; - p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); - allNullRow = p->isNull & allNullRow; + if (!p->isNull) { + if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { + varDataSetLen(p->buf, pColVal->colVal.value.nData); - if (!p->isNull) { - if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { - varDataSetLen(p->buf, pColVal->colVal.value.nData); - memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData); - p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size - } else { - memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes); - p->bytes = pReader->pSchema->columns[slotId].bytes; - } + memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData); + p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size + } else { + memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes); + p->bytes = pReader->pSchema->columns[slotId].bytes; } } @@ -74,36 +57,31 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p pBlock->info.rows += allNullRow ? 0 : 1; } else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) { for (int32_t i = 0; i < pReader->numOfCols; ++i) { - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]); - if (slotIds[i] == -1) { - SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0); - colDataSetVal(pColInfoData, numOfRows, (const char*)&pColVal->ts, false); - } else { - int32_t slotId = slotIds[i]; - // add check for null value, caused by the modification of table schema (new column added). - if (slotId >= taosArrayGetSize(pRow)) { - colDataSetNULL(pColInfoData, numOfRows); - continue; - } - SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId); - SColVal* pVal = &pColVal->colVal; + int32_t slotId = slotIds[i]; + SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i); + SColVal* pVal = &pColVal->colVal; - if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { - if (!COL_VAL_IS_VALUE(&pColVal->colVal)) { - colDataSetNULL(pColInfoData, numOfRows); - } else { - varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData); - memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData); - colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false); - } + if (COL_VAL_IS_NONE(&pColVal->colVal)) { + continue; + } + allNullRow = false; + if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) { + if (!COL_VAL_IS_VALUE(&pColVal->colVal)) { + colDataSetNULL(pColInfoData, numOfRows); } else { - colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal)); + varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData); + + memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData); + colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false); } + } else { + colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal)); } } - pBlock->info.rows += 1; + pBlock->info.rows += allNullRow ? 0 : 1; } else { tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr); return TSDB_CODE_INVALID_PARA; @@ -143,7 +121,7 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id } int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols, - uint64_t suid, void** pReader, const char* idstr) { + SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) { *pReader = NULL; SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader)); if (p == NULL) { @@ -155,6 +133,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, p->pTsdb = p->pVnode->pTsdb; p->verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}; p->numOfCols = numOfCols; + p->pCidList = pCidList; + p->pSlotIds = pSlotIds; p->suid = suid; if (numOfTables == 0) { @@ -226,32 +206,9 @@ void* tsdbCacherowsReaderClose(void* pReader) { return NULL; } -static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, SArray** pRow, - LRUHandle** h) { - int32_t code = TSDB_CODE_SUCCESS; - *pRow = NULL; - - if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) { - code = tsdbCacheGetLastrowH(lruCache, uid, pr, h); - } else { - code = tsdbCacheGetLastH(lruCache, uid, pr, h); - } - - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - // no data in the table of Uid - if (*h != NULL) { - *pRow = (SArray*)taosLRUCacheValue(lruCache, *h); - } - - return code; -} - static void freeItem(void* pItem) { SLastCol* pCol = (SLastCol*)pItem; - if (IS_VAR_DATA_TYPE(pCol->colVal.type)) { + if (IS_VAR_DATA_TYPE(pCol->colVal.type) && pCol->colVal.value.pData) { taosMemoryFree(pCol->colVal.value.pData); } } @@ -277,19 +234,17 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) { } } -int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) { +int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds, + SArray* pTableUidList) { if (pReader == NULL || pResBlock == NULL) { return TSDB_CODE_INVALID_PARA; } SCacheRowsReader* pr = pReader; - - int32_t code = TSDB_CODE_SUCCESS; - SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache; - LRUHandle* h = NULL; - SArray* pRow = NULL; - bool hasRes = false; - SArray* pLastCols = NULL; + int32_t code = TSDB_CODE_SUCCESS; + SArray* pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol)); + bool hasRes = false; + SArray* pLastCols = NULL; void** pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES); if (pRes == NULL) { @@ -298,20 +253,22 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 } for (int32_t j = 0; j < pr->numOfCols; ++j) { - pRes[j] = taosMemoryCalloc( - 1, sizeof(SFirstLastRes) + pr->pSchema->columns[-1 == slotIds[j] ? 0 : slotIds[j]].bytes + VARSTR_HEADER_SIZE); + pRes[j] = + taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[/*-1 == slotIds[j] ? 0 : */ slotIds[j]].bytes + + VARSTR_HEADER_SIZE); SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]); p->ts = INT64_MIN; } - pLastCols = taosArrayInit(pr->pSchema->numOfCols, sizeof(SLastCol)); + pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol)); if (pLastCols == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _end; } - for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) { - struct STColumn* pCol = &pr->pSchema->columns[i]; + for (int32_t i = 0; i < pr->numOfCols; ++i) { + int32_t slotId = slotIds[i]; + struct STColumn* pCol = &pr->pSchema->columns[slotId]; SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL}; if (IS_VAR_DATA_TYPE(pCol->type)) { @@ -328,6 +285,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 pr->pDataFReader = NULL; pr->pDataFReaderLast = NULL; + int32_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3; + // retrieve the only one last row of all tables in the uid list. if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) { int64_t st = taosGetTimestampUs(); @@ -335,16 +294,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 for (int32_t i = 0; i < pr->numOfTables; ++i) { STableKeyInfo* pKeyInfo = &pr->pTableList[i]; - code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); - if (code != TSDB_CODE_SUCCESS) { - goto _end; - } - - if (h == NULL) { + tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype); + if (TARRAY_SIZE(pRow) <= 0) { + taosArrayClearEx(pRow, freeItem); continue; } - if (taosArrayGetSize(pRow) <= 0) { - tsdbCacheRelease(lruCache, h); + SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0); + if (COL_VAL_IS_NONE(&pColVal->colVal)) { + taosArrayClearEx(pRow, freeItem); continue; } @@ -352,47 +309,34 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 bool hasNotNullRow = true; int64_t singleTableLastTs = INT64_MAX; for (int32_t k = 0; k < pr->numOfCols; ++k) { - int32_t slotId = slotIds[k]; - - if (slotId == -1) { // the primary timestamp - SLastCol* p = taosArrayGet(pLastCols, 0); - SLastCol* pCol = (SLastCol*)taosArrayGet(pRow, 0); - if (pCol->ts > p->ts) { - hasRes = true; - p->ts = pCol->ts; - p->colVal = pCol->colVal; - singleTableLastTs = pCol->ts; - } - } else { - SLastCol* p = taosArrayGet(pLastCols, slotId); - SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId); - - if (pColVal->ts > p->ts) { - if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) { - if (!COL_VAL_IS_VALUE(&p->colVal)) { - hasNotNullRow = false; - } - continue; - } + SLastCol* p = taosArrayGet(pLastCols, k); + SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, k); - hasRes = true; - p->ts = pColVal->ts; - if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) { - singleTableLastTs = pColVal->ts; + if (pColVal->ts > p->ts) { + if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) { + if (!COL_VAL_IS_VALUE(&p->colVal)) { + hasNotNullRow = false; } + continue; + } - if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) { - p->colVal = pColVal->colVal; - } else { - if (COL_VAL_IS_VALUE(&pColVal->colVal)) { - memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData); - } - - p->colVal.value.nData = pColVal->colVal.value.nData; - p->colVal.type = pColVal->colVal.type; - p->colVal.flag = pColVal->colVal.flag; - p->colVal.cid = pColVal->colVal.cid; + hasRes = true; + p->ts = pColVal->ts; + if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) { + singleTableLastTs = pColVal->ts; + } + + if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) { + p->colVal = pColVal->colVal; + } else { + if (COL_VAL_IS_VALUE(&pColVal->colVal)) { + memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData); } + + p->colVal.value.nData = pColVal->colVal.value.nData; + p->colVal.type = pColVal->colVal.type; + p->colVal.flag = pColVal->colVal.flag; + p->colVal.cid = pColVal->colVal.cid; } } } @@ -414,33 +358,31 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 taosArraySet(pTableUidList, 0, &pKeyInfo->uid); } - tsdbCacheRelease(lruCache, h); + taosArrayClearEx(pRow, freeItem); } if (hasRes) { - saveOneRow(pLastCols, pResBlock, pr, slotIds, pRes, pr->idstr); + saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr); } } else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) { for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) { STableKeyInfo* pKeyInfo = &pr->pTableList[i]; - code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); - if (code != TSDB_CODE_SUCCESS) { - goto _end; - } - if (h == NULL) { + tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype); + if (TARRAY_SIZE(pRow) <= 0) { + taosArrayClearEx(pRow, freeItem); continue; } - if (taosArrayGetSize(pRow) <= 0) { - tsdbCacheRelease(lruCache, h); + SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0); + if (COL_VAL_IS_NONE(&pColVal->colVal)) { + taosArrayClearEx(pRow, freeItem); continue; } - saveOneRow(pRow, pResBlock, pr, slotIds, pRes, pr->idstr); - // TODO reset the pRes + saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr); + taosArrayClearEx(pRow, freeItem); taosArrayPush(pTableUidList, &pKeyInfo->uid); - tsdbCacheRelease(lruCache, h); pr->tableIndex += 1; if (pResBlock->info.rows >= pResBlock->info.capacity) { @@ -466,6 +408,7 @@ _end: } taosMemoryFree(pRes); + taosArrayDestroyEx(pRow, freeItem); taosArrayDestroyEx(pLastCols, freeItem); return code; } diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index d0ff403bf70a96f114b0722bfb13f7ce035edfb4..b6141abcf933819e9c71f18c811b3e92d1d9080f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -140,7 +140,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid SMemTable *pMemTable = pTsdb->mem; STbData *pTbData = NULL; SVBufPool *pPool = pTsdb->pVnode->inUse; - TSDBKEY lastKey = {.version = version, .ts = eKey}; // check if table exists SMetaInfo info; @@ -181,7 +180,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid pMemTable->nDel++; pMemTable->minVer = TMIN(pMemTable->minVer, version); pMemTable->maxVer = TMIN(pMemTable->maxVer, version); - + /* if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) { tsdbCacheDeleteLastrow(pTsdb->lruCache, pTbData->uid, eKey); } @@ -189,6 +188,10 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) { tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey); } + */ + if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) { + tsdbCacheDel(pTsdb, suid, uid, sKey, eKey); + } tsdbTrace("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64 " at version %" PRId64, @@ -284,8 +287,8 @@ bool tsdbTbDataIterNext(STbDataIter *pIter) { int64_t tsdbCountTbDataRows(STbData *pTbData) { SMemSkipListNode *pNode = pTbData->sl.pHead; - int64_t rowsNum = 0; - + int64_t rowsNum = 0; + while (NULL != pNode) { pNode = SL_GET_NODE_FORWARD(pNode, 0); if (pNode == pTbData->sl.pTail) { @@ -298,17 +301,17 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) { return rowsNum; } -void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj* pTableMap, int64_t *rowsNum) { +void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj *pTableMap, int64_t *rowsNum) { taosRLockLatch(&pMemTable->latch); for (int32_t i = 0; i < pMemTable->nBucket; ++i) { STbData *pTbData = pMemTable->aBucket[i]; while (pTbData) { - void* p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid)); + void *p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid)); if (p == NULL) { pTbData = pTbData->next; continue; } - + *rowsNum += tsdbCountTbDataRows(pTbData); pTbData = pTbData->next; } @@ -668,15 +671,8 @@ static int32_t tsdbInsertColDataToTable(SMemTable *pMemTable, STbData *pTbData, if (key.ts >= pTbData->maxKey) { pTbData->maxKey = key.ts; - - if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) { - tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true); - } - } - - if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) { - tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb); } + tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow); // SMemTable pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey); @@ -736,15 +732,8 @@ static int32_t tsdbInsertRowDataToTable(SMemTable *pMemTable, STbData *pTbData, if (key.ts >= pTbData->maxKey) { pTbData->maxKey = key.ts; - - if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) { - tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true); - } - } - - if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) { - tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb); } + tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow); // SMemTable pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey); diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 847125018c5f15baa660dc2f53677e17e18e0d7f..74168591d21ef151b0885e15076dc0842b7ab445 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -144,8 +144,8 @@ _exit: } int vnodeShouldCommit(SVnode *pVnode, bool atExit) { - bool diskAvail = osDataSpaceAvailable(); - bool needCommit = false; + bool diskAvail = osDataSpaceAvailable(); + bool needCommit = false; taosThreadMutexLock(&pVnode->mutex); if (pVnode->inUse && diskAvail) { @@ -439,6 +439,9 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) { code = tsdbCommit(pVnode->pTsdb, pInfo); TSDB_CHECK_CODE(code, lino, _exit); + code = tsdbCacheCommit(pVnode->pTsdb); + TSDB_CHECK_CODE(code, lino, _exit); + if (VND_IS_RSMA(pVnode)) { code = smaCommit(pVnode->pSma, pInfo); TSDB_CHECK_CODE(code, lino, _exit); diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 4302302d7ab6226f2eda506500c9ba0c3bb706fd..c5b9eb7475b0bdebe19451f1d3ac69a4d248abb5 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1128,6 +1128,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pMergeNode->ignoreGroupId ? "true" : "false"); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT); if (pMergeNode->groupSort) { EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc"); diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 61f024ebb70bcbbf2b2127b41d3ac5f22ec74693..925e3054505af7039a6c3d00672880aebca0ebce 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -31,18 +31,21 @@ typedef struct SCacheRowsScanInfo { void* pLastrowReader; SColMatchInfo matchInfo; int32_t* pSlotIds; + int32_t* pDstSlotIds; SExprSupp pseudoExprSup; int32_t retrieveType; int32_t currentGroupIndex; SSDataBlock* pBufferredRes; SArray* pUidList; + SArray* pCidList; int32_t indexOfBufferedRes; STableListInfo* pTableList; } SCacheRowsScanInfo; static SSDataBlock* doScanCache(SOperatorInfo* pOperator); static void destroyCacheScanOperator(void* param); -static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds); +static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds, + int32_t** pDstSlotIds); static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pColMatchInfo); #define SCAN_ROW_TYPE(_t) ((_t) ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW) @@ -71,9 +74,16 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe goto _error; } + SArray* pCidList = taosArrayInit(numOfCols, sizeof(int16_t)); + for (int i = 0; i < TARRAY_SIZE(pInfo->matchInfo.pList); ++i) { + SColMatchItem* pColInfo = taosArrayGet(pInfo->matchInfo.pList, i); + taosArrayPush(pCidList, &pColInfo->colId); + } + pInfo->pCidList = pCidList; + removeRedundantTsCol(pScanNode, &pInfo->matchInfo); - code = extractCacheScanSlotId(pInfo->matchInfo.pList, pTaskInfo, &pInfo->pSlotIds); + code = extractCacheScanSlotId(pInfo->matchInfo.pList, pTaskInfo, &pInfo->pSlotIds, &pInfo->pDstSlotIds); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -91,8 +101,8 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe uint64_t suid = tableListGetSuid(pTableListInfo); code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables, - taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, - pTaskInfo->id.str); + taosArrayGetSize(pInfo->matchInfo.pList), pCidList, pInfo->pSlotIds, suid, + &pInfo->pLastrowReader, pTaskInfo->id.str); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -160,8 +170,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { blockDataCleanup(pInfo->pBufferredRes); taosArrayClear(pInfo->pUidList); - int32_t code = - tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList); + int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, + pInfo->pDstSlotIds, pInfo->pUidList); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } @@ -227,8 +237,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { } code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, - taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, - pTaskInfo->id.str); + taosArrayGetSize(pInfo->matchInfo.pList), pInfo->pCidList, pInfo->pSlotIds, suid, + &pInfo->pLastrowReader, pTaskInfo->id.str); if (code != TSDB_CODE_SUCCESS) { pInfo->currentGroupIndex += 1; taosArrayClear(pInfo->pUidList); @@ -237,7 +247,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { taosArrayClear(pInfo->pUidList); - code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); + code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pDstSlotIds, + pInfo->pUidList); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } @@ -280,6 +291,8 @@ void destroyCacheScanOperator(void* param) { blockDataDestroy(pInfo->pRes); blockDataDestroy(pInfo->pBufferredRes); taosMemoryFree(pInfo->pSlotIds); + taosMemoryFree(pInfo->pDstSlotIds); + taosArrayDestroy(pInfo->pCidList); taosArrayDestroy(pInfo->pUidList); taosArrayDestroy(pInfo->matchInfo.pList); tableListDestroy(pInfo->pTableList); @@ -292,7 +305,8 @@ void destroyCacheScanOperator(void* param) { taosMemoryFreeClear(param); } -int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds) { +int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds, + int32_t** pDstSlotIds) { size_t numOfCols = taosArrayGetSize(pColMatchInfo); *pSlotIds = taosMemoryMalloc(numOfCols * sizeof(int32_t)); @@ -300,18 +314,25 @@ int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTask return TSDB_CODE_OUT_OF_MEMORY; } + *pDstSlotIds = taosMemoryMalloc(numOfCols * sizeof(int32_t)); + if (*pDstSlotIds == NULL) { + taosMemoryFree(*pSlotIds); + return TSDB_CODE_OUT_OF_MEMORY; + } + SSchemaWrapper* pWrapper = pTaskInfo->schemaInfo.sw; for (int32_t i = 0; i < numOfCols; ++i) { SColMatchItem* pColMatch = taosArrayGet(pColMatchInfo, i); for (int32_t j = 0; j < pWrapper->nCols; ++j) { - if (pColMatch->colId == pWrapper->pSchema[j].colId && pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + /* if (pColMatch->colId == pWrapper->pSchema[j].colId && pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { (*pSlotIds)[pColMatch->dstSlotId] = -1; break; - } + }*/ if (pColMatch->colId == pWrapper->pSchema[j].colId) { - (*pSlotIds)[pColMatch->dstSlotId] = j; + (*pSlotIds)[i] = j; + (*pDstSlotIds)[i] = pColMatch->dstSlotId; break; } } diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index cb0f1aa06845575e40d69ff4dca2ccaf96809b0a..df62a7fa77570011a13a0b91c355e3604708ecc7 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -545,6 +545,7 @@ typedef struct SMultiwayMergeOperatorInfo { SSDataBlock* pIntermediateBlock; // to hold the intermediate result int64_t startTs; // sort start time bool groupSort; + bool ignoreGroupId; uint64_t groupId; STupleHandle* prefetchedTuple; } SMultiwayMergeOperatorInfo; @@ -694,7 +695,11 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData } pDataBlock->info.rows = p->info.rows; - pDataBlock->info.id.groupId = pInfo->groupId; + if (pInfo->ignoreGroupId) { + pDataBlock->info.id.groupId = 0; + } else { + pDataBlock->info.id.groupId = pInfo->groupId; + } pDataBlock->info.dataLoad = 1; } @@ -785,6 +790,7 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); pInfo->groupSort = pMergePhyNode->groupSort; + pInfo->ignoreGroupId = pMergePhyNode->ignoreGroupId; pInfo->pSortInfo = createSortInfo(pMergePhyNode->pMergeKeys); pInfo->pInputBlock = pInputBlock; size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index d9a4c5178fab59c5fb481fb8a67206a54fb54a07..0f4e7bde638e7007383e90951f3dd05268971301 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -455,6 +455,7 @@ static int32_t logicMergeCopy(const SMergeLogicNode* pSrc, SMergeLogicNode* pDst COPY_SCALAR_FIELD(numOfChannels); COPY_SCALAR_FIELD(srcGroupId); COPY_SCALAR_FIELD(groupSort); + COPY_SCALAR_FIELD(ignoreGroupId); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 0aeb83ce0812cf52bd3add4b2b0276fe051d00f3..3e83ef4291febdf07c5acc623af97c36197d7d7e 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2027,6 +2027,7 @@ static const char* jkMergePhysiPlanTargets = "Targets"; static const char* jkMergePhysiPlanNumOfChannels = "NumOfChannels"; static const char* jkMergePhysiPlanSrcGroupId = "SrcGroupId"; static const char* jkMergePhysiPlanGroupSort = "GroupSort"; +static const char* jkMergePhysiPlanIgnoreGroupID = "IgnoreGroupID"; static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) { const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj; @@ -2047,6 +2048,9 @@ static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkMergePhysiPlanGroupSort, pNode->groupSort); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkMergePhysiPlanIgnoreGroupID, pNode->ignoreGroupId); + } return code; } @@ -2070,6 +2074,9 @@ static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkMergePhysiPlanGroupSort, &pNode->groupSort); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkMergePhysiPlanIgnoreGroupID, &pNode->ignoreGroupId); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 6c6b6c0e81fec21b995b836d351307729d3b65a9..c06eb62771b1c96065c3ef3d9f0c3917d9297843 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2512,7 +2512,8 @@ enum { PHY_MERGE_CODE_TARGETS, PHY_MERGE_CODE_NUM_OF_CHANNELS, PHY_MERGE_CODE_SRC_GROUP_ID, - PHY_MERGE_CODE_GROUP_SORT + PHY_MERGE_CODE_GROUP_SORT, + PHY_MERGE_CODE_IGNORE_GROUP_ID, }; static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -2534,6 +2535,9 @@ static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_GROUP_SORT, pNode->groupSort); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId); + } return code; } @@ -2563,6 +2567,9 @@ static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) { case PHY_MERGE_CODE_GROUP_SORT: code = tlvDecodeBool(pTlv, &pNode->groupSort); break; + case PHY_MERGE_CODE_IGNORE_GROUP_ID: + code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId); + break; default: break; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index e2c2e4c65563e3b4a49add42d73c004f1ea39bb7..be43bb008c7ece11cd2cf1eb9695b2a6a410415d 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1559,6 +1559,7 @@ static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pM pMerge->numOfChannels = pMergeLogicNode->numOfChannels; pMerge->srcGroupId = pMergeLogicNode->srcGroupId; pMerge->groupSort = pMergeLogicNode->groupSort; + pMerge->ignoreGroupId = pMergeLogicNode->ignoreGroupId; int32_t code = addDataBlockSlots(pCxt, pMergeLogicNode->pInputs, pMerge->node.pOutputDataBlockDesc); diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index eed59c8236089b1a3f3a868e1f685fe8acb404f3..504db0d07b8ce4988b3cd40b4089d24dc88dc03f 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -538,7 +538,8 @@ static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* p switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_PROJECT: { SProjectLogicNode *pLogicNode = (SProjectLogicNode*)pNode; - if (pMerge->node.pLimit || pMerge->node.pSlimit) { + if (pLogicNode->ignoreGroupId && (pMerge->node.pLimit || pMerge->node.pSlimit)) { + pMerge->ignoreGroupId = true; pLogicNode->ignoreGroupId = false; } break; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 260dfd486e0d10b772b0342cfb13123b1a40c67a..7b533f2cc4c53e61f19d4d19849d0d28da0144b7 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -134,7 +134,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py -,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py +#,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_stable.py diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 80236cf604d63e4d9c8fdddfa81aeaf16fb64944..0fc29c241b24db24b92862b57a9e61278e2c6fe0 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -68,7 +68,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=true;make -j || exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1" # -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \ if [[ -d ${WORKDIR}/debugNoSan ]] ;then @@ -97,7 +97,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=true;make -j || exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true -DJEMALLOC_ENABLED=0;make -j || exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan diff --git a/tests/script/tsim/query/partitionby.sim b/tests/script/tsim/query/partitionby.sim index 8babd1aa8de8e0ad943312e6712d297c3c5242b4..4c221e02d39098ed078e25e5aa4739764f8c0fe9 100644 --- a/tests/script/tsim/query/partitionby.sim +++ b/tests/script/tsim/query/partitionby.sim @@ -36,4 +36,34 @@ if $rows != 0 then return -1 endi +sql insert into tb0 values (now, 0); +sql insert into tb1 values (now, 1); +sql insert into tb2 values (now, 2); +sql insert into tb3 values (now, 3); +sql insert into tb4 values (now, 4); +sql insert into tb5 values (now, 5); +sql insert into tb6 values (now, 6); +sql insert into tb7 values (now, 7); + +sql select * from (select 1 from $mt1 where ts is not null partition by tbname limit 1); +if $rows != 8 then + return -1 +endi + +sql select count(*) from (select ts from $mt1 where ts is not null partition by tbname slimit 2); +if $rows != 1 then + return -1 +endi +if $data00 != 2 then + return -1 +endi + +sql select count(*) from (select ts from $mt1 where ts is not null partition by tbname limit 2); +if $rows != 1 then + return -1 +endi +if $data00 != 8 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT