提交 9f9788c2 编写于 作者: wmmhello's avatar wmmhello

Merge branch '3.0' into feature/TD-14761

...@@ -118,3 +118,4 @@ contrib/* ...@@ -118,3 +118,4 @@ contrib/*
!contrib/test !contrib/test
sql sql
debug*/ debug*/
.env
\ No newline at end of file
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 8a5e336 GIT_TAG 3c7dafe
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taosws-rs # taosws-rs
ExternalProject_Add(taosws-rs ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
GIT_TAG 648cc62 GIT_TAG 29424d5
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -2,6 +2,7 @@ package main ...@@ -2,6 +2,7 @@ package main
import ( import (
"fmt" "fmt"
"log"
"github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af"
) )
...@@ -10,7 +11,7 @@ func main() { ...@@ -10,7 +11,7 @@ func main() {
conn, err := af.Open("localhost", "root", "taosdata", "", 6030) conn, err := af.Open("localhost", "root", "taosdata", "", 6030)
defer conn.Close() defer conn.Close()
if err != nil { if err != nil {
fmt.Println("failed to connect, err:", err) log.Fatalln("failed to connect, err:", err)
} else { } else {
fmt.Println("connected") fmt.Println("connected")
} }
......
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"log"
_ "github.com/taosdata/driver-go/v3/taosSql" _ "github.com/taosdata/driver-go/v3/taosSql"
) )
...@@ -11,7 +12,7 @@ func main() { ...@@ -11,7 +12,7 @@ func main() {
var taosDSN = "root:taosdata@tcp(localhost:6030)/" var taosDSN = "root:taosdata@tcp(localhost:6030)/"
taos, err := sql.Open("taosSql", taosDSN) taos, err := sql.Open("taosSql", taosDSN)
if err != nil { if err != nil {
fmt.Println("failed to connect TDengine, err:", err) log.Fatalln("failed to connect TDengine, err:", err)
return return
} }
fmt.Println("Connected") fmt.Println("Connected")
......
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"log"
_ "github.com/taosdata/driver-go/v3/taosRestful" _ "github.com/taosdata/driver-go/v3/taosRestful"
) )
...@@ -11,7 +12,7 @@ func main() { ...@@ -11,7 +12,7 @@ func main() {
var taosDSN = "root:taosdata@http(localhost:6041)/" var taosDSN = "root:taosdata@http(localhost:6041)/"
taos, err := sql.Open("taosRestful", taosDSN) taos, err := sql.Open("taosRestful", taosDSN)
if err != nil { if err != nil {
fmt.Println("failed to connect TDengine, err:", err) log.Fatalln("failed to connect TDengine, err:", err)
return return
} }
fmt.Println("Connected") fmt.Println("Connected")
......
package main
import (
"fmt"
"github.com/taosdata/driver-go/v3/wrapper"
)
func main() {
conn, err := wrapper.TaosConnect("localhost", "root", "taosdata", "", 6030)
defer wrapper.TaosClose(conn)
if err != nil {
fmt.Println("fail to connect, err:", err)
} else {
fmt.Println("connected")
}
}
package main package main
import ( import (
"fmt" "log"
"github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af"
) )
...@@ -20,7 +20,7 @@ func prepareDatabase(conn *af.Connector) { ...@@ -20,7 +20,7 @@ func prepareDatabase(conn *af.Connector) {
func main() { func main() {
conn, err := af.Open("localhost", "root", "taosdata", "", 6030) conn, err := af.Open("localhost", "root", "taosdata", "", 6030)
if err != nil { if err != nil {
fmt.Println("fail to connect, err:", err) log.Fatalln("fail to connect, err:", err)
} }
defer conn.Close() defer conn.Close()
prepareDatabase(conn) prepareDatabase(conn)
...@@ -32,6 +32,6 @@ func main() { ...@@ -32,6 +32,6 @@ func main() {
err = conn.OpenTSDBInsertJsonPayload(payload) err = conn.OpenTSDBInsertJsonPayload(payload)
if err != nil { if err != nil {
fmt.Println("insert error:", err) log.Fatalln("insert error:", err)
} }
} }
...@@ -2,6 +2,7 @@ package main ...@@ -2,6 +2,7 @@ package main
import ( import (
"fmt" "fmt"
"log"
"github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af"
) )
...@@ -33,6 +34,6 @@ func main() { ...@@ -33,6 +34,6 @@ func main() {
err = conn.InfluxDBInsertLines(lines, "ms") err = conn.InfluxDBInsertLines(lines, "ms")
if err != nil { if err != nil {
fmt.Println("insert error:", err) log.Fatalln("insert error:", err)
} }
} }
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"log"
_ "github.com/taosdata/driver-go/v3/taosRestful" _ "github.com/taosdata/driver-go/v3/taosRestful"
) )
...@@ -10,28 +11,26 @@ import ( ...@@ -10,28 +11,26 @@ import (
func createStable(taos *sql.DB) { func createStable(taos *sql.DB) {
_, err := taos.Exec("CREATE DATABASE power") _, err := taos.Exec("CREATE DATABASE power")
if err != nil { if err != nil {
fmt.Println("failed to create database, err:", err) log.Fatalln("failed to create database, err:", err)
} }
_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") _, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
if err != nil { if err != nil {
fmt.Println("failed to create stable, err:", err) log.Fatalln("failed to create stable, err:", err)
} }
} }
func insertData(taos *sql.DB) { func insertData(taos *sql.DB) {
sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) sql := `INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`
result, err := taos.Exec(sql) result, err := taos.Exec(sql)
if err != nil { if err != nil {
fmt.Println("failed to insert, err:", err) log.Fatalln("failed to insert, err:", err)
return
} }
rowsAffected, err := result.RowsAffected() rowsAffected, err := result.RowsAffected()
if err != nil { if err != nil {
fmt.Println("failed to get affected rows, err:", err) log.Fatalln("failed to get affected rows, err:", err)
return
} }
fmt.Println("RowsAffected", rowsAffected) fmt.Println("RowsAffected", rowsAffected)
} }
...@@ -40,8 +39,7 @@ func main() { ...@@ -40,8 +39,7 @@ func main() {
var taosDSN = "root:taosdata@http(localhost:6041)/" var taosDSN = "root:taosdata@http(localhost:6041)/"
taos, err := sql.Open("taosRestful", taosDSN) taos, err := sql.Open("taosRestful", taosDSN)
if err != nil { if err != nil {
fmt.Println("failed to connect TDengine, err:", err) log.Fatalln("failed to connect TDengine, err:", err)
return
} }
defer taos.Close() defer taos.Close()
createStable(taos) createStable(taos)
......
...@@ -5,8 +5,8 @@ import ( ...@@ -5,8 +5,8 @@ import (
"time" "time"
"github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/param"
"github.com/taosdata/driver-go/v3/common" "github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
) )
func checkErr(err error, prompt string) { func checkErr(err error, prompt string) {
......
package main package main
import ( import (
"fmt" "log"
"github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af"
) )
...@@ -20,7 +20,7 @@ func prepareDatabase(conn *af.Connector) { ...@@ -20,7 +20,7 @@ func prepareDatabase(conn *af.Connector) {
func main() { func main() {
conn, err := af.Open("localhost", "root", "taosdata", "", 6030) conn, err := af.Open("localhost", "root", "taosdata", "", 6030)
if err != nil { if err != nil {
fmt.Println("fail to connect, err:", err) log.Fatalln("fail to connect, err:", err)
} }
defer conn.Close() defer conn.Close()
prepareDatabase(conn) prepareDatabase(conn)
...@@ -37,6 +37,6 @@ func main() { ...@@ -37,6 +37,6 @@ func main() {
err = conn.OpenTSDBInsertTelnetLines(lines) err = conn.OpenTSDBInsertTelnetLines(lines)
if err != nil { if err != nil {
fmt.Println("insert error:", err) log.Fatalln("insert error:", err)
} }
} }
...@@ -2,7 +2,7 @@ package main ...@@ -2,7 +2,7 @@ package main
import ( import (
"database/sql" "database/sql"
"fmt" "log"
"time" "time"
_ "github.com/taosdata/driver-go/v3/taosRestful" _ "github.com/taosdata/driver-go/v3/taosRestful"
...@@ -12,14 +12,12 @@ func main() { ...@@ -12,14 +12,12 @@ func main() {
var taosDSN = "root:taosdata@http(localhost:6041)/power" var taosDSN = "root:taosdata@http(localhost:6041)/power"
taos, err := sql.Open("taosRestful", taosDSN) taos, err := sql.Open("taosRestful", taosDSN)
if err != nil { if err != nil {
fmt.Println("failed to connect TDengine, err:", err) log.Fatalln("failed to connect TDengine, err:", err)
return
} }
defer taos.Close() defer taos.Close()
rows, err := taos.Query("SELECT ts, current FROM meters LIMIT 2") rows, err := taos.Query("SELECT ts, current FROM meters LIMIT 2")
if err != nil { if err != nil {
fmt.Println("failed to select from table, err:", err) log.Fatalln("failed to select from table, err:", err)
return
} }
defer rows.Close() defer rows.Close()
...@@ -30,9 +28,9 @@ func main() { ...@@ -30,9 +28,9 @@ func main() {
} }
err := rows.Scan(&r.ts, &r.current) err := rows.Scan(&r.ts, &r.current)
if err != nil { if err != nil {
fmt.Println("scan error:\n", err) log.Fatalln("scan error:\n", err)
return return
} }
fmt.Println(r.ts, r.current) log.Fatalln(r.ts, r.current)
} }
} }
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ host: "localhost", database: "power" }); const conn = taos.connect({ host: "localhost", database: "power" });
const cursor = conn.cursor(); const cursor = conn.cursor();
...@@ -18,4 +18,3 @@ try { ...@@ -18,4 +18,3 @@ try {
conn.close(); conn.close();
}, 2000); }, 2000);
} }
// bug here: jira 14506
const taos = require("td2.0-connector"); const { options, connect } = require("@tdengine/rest");
var conn = taos.connect({ async function test() {
host: "localhost", options.path = "/rest/sql";
port: 6030, options.host = "localhost";
user: "root", let conn = connect(options);
password: "taosdata", let cursor = conn.cursor();
}); try {
conn.close(); let res = await cursor.query("SELECT server_version()");
res.toString();
} catch (err) {
console.log(err);
}
}
test();
// run with: node connect.js
// output: // output:
// Successfully connected to TDengine // server_version() |
// ===================
// 3.0.0.0 |
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
...@@ -11,11 +11,11 @@ try { ...@@ -11,11 +11,11 @@ try {
cursor.execute( cursor.execute(
"CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)" "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
); );
var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) var sql = `INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`;
cursor.execute(sql); cursor.execute(sql,{'quiet':false});
} finally { } finally {
cursor.close(); cursor.close();
conn.close(); conn.close();
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
...@@ -24,10 +24,10 @@ function insertData() { ...@@ -24,10 +24,10 @@ function insertData() {
); );
// bind table name and tags // bind table name and tags
let tagBind = new taos.TaosBind(2); let tagBind = new taos.TaosMultiBindArr(2);
tagBind.bindBinary("California.SanFrancisco"); tagBind.multiBindBinary(["California.SanFrancisco"]);
tagBind.bindInt(2); tagBind.multiBindInt([2]);
cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); cursor.stmtSetTbnameTags("d1001", tagBind.getMultiBindArr());
// bind values // bind values
let valueBind = new taos.TaosMultiBindArr(4); let valueBind = new taos.TaosMultiBindArr(4);
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ const conn = taos.connect({
host: "localhost", host: "localhost",
...@@ -23,25 +23,22 @@ function insertData() { ...@@ -23,25 +23,22 @@ function insertData() {
); );
// bind table name and tags // bind table name and tags
let tagBind = new taos.TaosBind(2); let tagBind = new taos.TaosMultiBindArr(2);
tagBind.bindBinary("California.SanFrancisco"); tagBind.multiBindBinary(["California.SanFrancisco"]);
tagBind.bindInt(2); tagBind.multiBindInt([2]);
cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); cursor.stmtSetTbnameTags("d1001", tagBind.getMultiBindArr());
// bind values // bind values
let rows = [ let rows = [[1648432611249, 1648432611749], [10.3, 12.6], [219, 218], [0.31, 0.33]];
[1648432611249, 10.3, 219, 0.31],
[1648432611749, 12.6, 218, 0.33], let valueBind = new taos.TaosMultiBindArr(4);
]; valueBind.multiBindTimestamp(rows[0]);
for (let row of rows) { valueBind.multiBindFloat(rows[1]);
let valueBind = new taos.TaosBind(4); valueBind.multiBindInt(rows[2]);
valueBind.bindTimestamp(row[0]); valueBind.multiBindFloat(rows[3]);
valueBind.bindFloat(row[1]); cursor.stmtBindParamBatch(valueBind.getMultiBindArr());
valueBind.bindInt(row[2]); cursor.stmtAddBatch();
valueBind.bindFloat(row[3]);
cursor.stmtBindParam(valueBind.getBind());
cursor.stmtAddBatch();
}
// execute // execute
cursor.stmtExecute(); cursor.stmtExecute();
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ host: "localhost", database: "power" }); const conn = taos.connect({ host: "localhost", database: "power" });
const cursor = conn.cursor(); const cursor = conn.cursor();
...@@ -9,8 +9,6 @@ query.execute().then(function (result) { ...@@ -9,8 +9,6 @@ query.execute().then(function (result) {
// output: // output:
// Successfully connected to TDengine // Successfully connected to TDengine
// Query OK, 2 row(s) in set (0.00317767s)
// ts | current | // ts | current |
// ======================================================= // =======================================================
// 2018-10-03 14:38:05.000 | 10.3 | // 2018-10-03 14:38:05.000 | 10.3 |
......
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
const conn = taos.connect({ host: "localhost", database: "power" }); const conn = taos.connect({ host: "localhost", database: "power" });
// 未完成 var cursor = conn.cursor();
\ No newline at end of file
function runConsumer() {
// create topic
cursor.execute("create topic topic_name_example as select * from meters");
let consumer = taos.consumer({
'group.id': 'tg2',
'td.connect.user': 'root',
'td.connect.pass': 'taosdata',
'msg.with.table.name': 'true',
'enable.auto.commit': 'true'
});
// subscribe the topic just created.
consumer.subscribe("topic_name_example");
// get subscribe topic list
let topicList = consumer.subscription();
console.log(topicList);
for (let i = 0; i < 5; i++) {
let msg = consumer.consume(100);
console.log(msg.topicPartition);
console.log(msg.block);
console.log(msg.fields)
consumer.commit(msg);
console.log(`=======consumer ${i} done`)
}
consumer.unsubscribe();
consumer.close();
// drop topic
cursor.execute("drop topic topic_name_example");
}
try {
runConsumer();
} finally {
setTimeout(() => {
cursor.close();
conn.close();
}, 2000);
}
\ No newline at end of file
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
"main": "index.js", "main": "index.js",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"td2.0-connector": "^2.0.12", "@tdengine/client": "^3.0.0",
"td2.0-rest-connector": "^1.0.0" "@tdengine/rest": "^3.0.0"
} }
} }
const { options, connect } = require("td2.0-rest-connector"); const { options, connect } = require("@tdengine/rest");
async function test() { async function test() {
options.path = "/rest/sqlt"; options.path = "/rest/sqlt";
...@@ -17,4 +17,4 @@ test(); ...@@ -17,4 +17,4 @@ test();
// output: // output:
// server_version() | // server_version() |
// =================== // ===================
// 2.4.0.12 | // 3.0.0.0 |
...@@ -3,6 +3,6 @@ ...@@ -3,6 +3,6 @@
``` ```
:::tip :::tip
driver-go 的模块 `github.com/taosdata/driver-go/v2/wrapper` 是 C 接口的底层封装。使用这个模块也可以实现参数绑定写入。 driver-go 的模块 `github.com/taosdata/driver-go/v3/wrapper` 是 C 接口的底层封装。使用这个模块也可以实现参数绑定写入。
::: :::
--- ---
sidebar_label: 消息队列 sidebar_label: 数据订阅
description: "数据订阅与推送服务。连续写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
title: 消息队列 title: 数据订阅
--- ---
基于数据天然的时间序列特性,TDengine 的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine 在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine 中每一张表均可视为一个标准的消息队列 为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本
TDengine 内嵌支持消息订阅与推送服务(下文都简称TMQ)。使用系统提供的 API,用户可使用普通查询语句订阅数据库中的一张或多张表,或整个库。客户端启动订阅后,定时或按需轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户 与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 可以是一张超级表,或一张子表。不仅如此,你可以通过标签、表名、列、表达式等多种方法过滤所需数据,并且支持对数据进行函数变换、预处理(包括标量udf计算)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤交给 TDengine,而不是应用完成,有效的减少传输的数据量
TMQ提供了提交机制来保证消息队列的可靠性和正确性。在调用方法上,支持自动提交和手动提交。 消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程分布式的消费数据,提高数据通吐率。但不同消费者组即使消费同一个topic, 并不共享消费进度。一个消费者组可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的vnode上,也就是多个shard上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保at least once消费。
为了实现上述功能,TDengine 采用了灵活的 WAL (Write-Ahead-Log) 文件切换与保留机制:可以按照时间或文件大小来保留WAL文件(详见create database语句)。在消费时,TDengine 从 WAL 中获取数据,并经过过滤、变换等操作,将数据推送给消费者。
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
## 主要数据结构和API
TMQ 的 API 中,与订阅相关的主要数据结构和API如下: TMQ 的 API 中,与订阅相关的主要数据结构和API如下:
...@@ -47,7 +53,9 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm ...@@ -47,7 +53,9 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码可以在 [tmq.c](https://github.com/taosdata/TDengine/blob/3.0/examples/c/tmq.c) 看到。 这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码可以在 [tmq.c](https://github.com/taosdata/TDengine/blob/3.0/examples/c/tmq.c) 看到。
一、首先完成建库、建一张超级表和多张子表,并每个子表插入若干条数据记录: ## 写入数据
首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
```sql ```sql
drop database if exists tmqdb; drop database if exists tmqdb;
...@@ -63,14 +71,15 @@ insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22'); ...@@ -63,14 +71,15 @@ insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22');
insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33'); insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33');
``` ```
二、创建topic: ## 创建topic:
```sql ```sql
create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
``` ```
注:TMQ支持多种订阅类型: TMQ支持多种订阅类型:
1、列订阅
### 列订阅
语法:CREATE TOPIC topic_name as subquery 语法:CREATE TOPIC topic_name as subquery
通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合) 通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)
...@@ -79,25 +88,18 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -79,25 +88,18 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
- 被订阅或用于计算的column和tag不可被删除、修改 - 被订阅或用于计算的column和tag不可被删除、修改
- 若发生schema变更,新增的column不出现在结果中 - 若发生schema变更,新增的column不出现在结果中
2、超级表订阅 ### 超级表订阅
语法:CREATE TOPIC topic_name AS STABLE stbName 语法:CREATE TOPIC topic_name AS STABLE stbName
- 订阅某超级表的全部数据,schema变更不受限,schema变更后写入的数据将以最新schema返回 与select * from stbName订阅的区别是:
- 在tmq的返回消息中schema是块级别的,每块的schema可能不一样 - 不会限制用户的schema变更
- 列变更后写入的数据若未落盘,将以写入时的schema返回 - 返回的是非结构化的数据:返回数据的schema会随之超级表的schema变化而变化
- 列变更后写入的数据若已落盘,将以落盘时的schema返回 - 用户对于要处理的每一个数据块都可能有不同的schema,因此,必须重新获取schema
- 返回数据不带有tag
3、db订阅
语法:CREATE TOPIC topic_name AS DATABASE db_name
- 订阅某一db的全部数据,schema变更不受限
- 在tmq的返回消息中schema是块级别的,每块的schema可能不一样
- 列变更后写入的数据若未落盘,将以写入时的schema返回
- 列变更后写入的数据若已落盘,将以落盘时的schema返回
三、创建consumer ## 创建 consumer 以及consumer group
目前支持的config 对于consumer, 目前支持的config包括
| 参数名称 | 参数值 | 备注 | | 参数名称 | 参数值 | 备注 |
| ---------------------------- | ------------------------------ | ------------------------------------------------------ | | ---------------------------- | ------------------------------ | ------------------------------------------------------ |
...@@ -121,7 +123,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -121,7 +123,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
tmq_conf_set(conf, "group.id", "cgrpName"); tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root"); tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata"); tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest"); tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "experimental.snapshot.enable", "true"); tmq_conf_set(conf, "experimental.snapshot.enable", "true");
tmq_conf_set(conf, "msg.with.table.name", "true"); tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
...@@ -131,7 +133,12 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -131,7 +133,12 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
return tmq; return tmq;
``` ```
四、创建订阅主题列表 上述配置中包括consumer group ID,如果多个 consumer 指定的 consumer group ID一样,则自动形成一个consumer group,共享消费进度。
## 创建 topic 列表
单个consumer支持同时订阅多个topic。
```sql ```sql
tmq_list_t* topicList = tmq_list_new(); tmq_list_t* topicList = tmq_list_new();
...@@ -139,9 +146,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -139,9 +146,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
return topicList; return topicList;
``` ```
单个consumer支持同时订阅多个topic。 ## 启动订阅并开始消费
五、启动订阅并开始消费
```sql ```sql
/* 启动订阅 */ /* 启动订阅 */
...@@ -151,9 +156,9 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -151,9 +156,9 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
/* 循环poll消息 */ /* 循环poll消息 */
int32_t totalRows = 0; int32_t totalRows = 0;
int32_t msgCnt = 0; int32_t msgCnt = 0;
int32_t consumeDelay = 5000; int32_t timeOut = 5000;
while (running) { while (running) {
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, consumeDelay); TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeOut);
if (tmqmsg) { if (tmqmsg) {
msgCnt++; msgCnt++;
totalRows += msg_process(tmqmsg); totalRows += msg_process(tmqmsg);
...@@ -190,7 +195,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -190,7 +195,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
int32_t* length = taos_fetch_lengths(msg); int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg); int32_t precision = taos_result_precision(msg);
const char* tbName = tmq_get_table_name(msg); const char* tbName = tmq_get_table_name(msg);
rows++; rows++;
taos_print_row(buf, row, fields, numOfFields); taos_print_row(buf, row, fields, numOfFields);
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf); printf("row content from %s: %s\n", (tbName != NULL ? tbName : "null table"), buf);
} }
...@@ -199,7 +204,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -199,7 +204,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
} }
``` ```
五、结束消费 ## 结束消费
```sql ```sql
/* 取消订阅 */ /* 取消订阅 */
...@@ -209,7 +214,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -209,7 +214,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
tmq_consumer_close(tmq); tmq_consumer_close(tmq);
``` ```
六、删除topic ## 删除topic
如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。 如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。
...@@ -218,7 +223,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; ...@@ -218,7 +223,7 @@ create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
drop topic topicName; drop topic topicName;
``` ```
七、状态查看 ## 状态查看
1、topics:查询已经创建的topic 1、topics:查询已经创建的topic
......
...@@ -124,52 +124,49 @@ gcc -g -O0 -fPIC -shared add_one.c -o add_one.so ...@@ -124,52 +124,49 @@ gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外, UDF 支持输入与输出类型不一致,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。
- 创建标量函数 - 创建标量函数
```sql ```sql
CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
``` ```
- ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致;
- ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - output_type:此函数计算结果的数据类型名称;
- B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。
例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: 例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
```sql ```sql
CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
``` ```
- 创建聚合函数: - 创建聚合函数:
```sql ```sql
CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
``` ```
- ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
- ids(Y):包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
- B:中间计算结果的缓冲区大小,单位是字节,最小 0,最大 512,如果不使用可以不设置。 - buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
关于中间计算结果的使用,可以参考示例程序[demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) 例如,如下语句可以把 libsqrsum.so 创建为系统中可用的 UDF:
例如,如下语句可以把 demo.so 创建为系统中可用的 UDF:
```sql ```sql
CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; CREATE AGGREGATE FUNCTION sqr_sum AS "/home/taos/udf_example/libsqrsum.so" OUTPUTTYPE DOUBLE bufsize 8;
``` ```
### 管理 UDF ### 管理 UDF
- 删除指定名称的用户定义函数: - 删除指定名称的用户定义函数:
``` ```
DROP FUNCTION ids(X); DROP FUNCTION function_name;
``` ```
- ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 - function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如
```sql ```sql
DROP FUNCTION add_one; DROP FUNCTION bit_and;
``` ```
- 显示系统中当前可用的所有 UDF: - 显示系统中当前可用的所有 UDF:
```sql ```sql
...@@ -180,53 +177,32 @@ SHOW FUNCTIONS; ...@@ -180,53 +177,32 @@ SHOW FUNCTIONS;
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: 在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
```sql ```sql
SELECT X(c) FROM table/stable; SELECT X(c1,c2) FROM table/stable;
``` ```
表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
## UDF 的一些使用限制
在当前版本下,使用 UDF 存在如下这些限制:
1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统;
2. UDF 不能与系统内建的 SQL 函数混合使用,暂不支持在一条 SQL 语句中使用多个不同名的 UDF ;
3. UDF 只支持以单个数据列作为输入;
4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中;
5. 无法通过 RESTful 接口来创建 UDF;
6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。
## 示例代码 ## 示例代码
### 标量函数示例 [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) ### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
<details>
<summary>add_one.c</summary>
```c
{{#include tests/script/sh/add_one.c}}
```
</details>
### 向量函数示例 [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c)
<details> <details>
<summary>abs_max.c</summary> <summary>bit_and.c</summary>
```c ```c
{{#include tests/script/sh/abs_max.c}} {{#include tests/script/sh/bit_and.c}}
``` ```
</details> </details>
### 使用中间计算结果示例 [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) ### 聚合函数示例 [sqr_sum](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sqr_sum.c)
<details> <details>
<summary>demo.c</summary> <summary>sqr_sum.c</summary>
```c ```c
{{#include tests/script/sh/demo.c}} {{#include tests/script/sh/sqr_sum.c}}
``` ```
</details> </details>
...@@ -8,21 +8,30 @@ title: 用户自定义函数 ...@@ -8,21 +8,30 @@ title: 用户自定义函数
## 创建函数 ## 创建函数
```sql ```sql
CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE buffer_size]
``` ```
语法说明: 语法说明:
AGGREGATE:标识此函数是标量函数还是聚集函数。 AGGREGATE:标识此函数是标量函数还是聚集函数。
func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。 func_name:函数名,必须与函数实现中 udf 的实际名称一致。
library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。
OUTPUTTYPE:标识此函数的返回类型。 type_name:标识此函数的返回类型。
BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节 buffer_size:中间结果的缓冲区大小,单位是字节。不设置则默认为0
关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf) 关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)
## 删除自定义函数 ## 删除自定义函数
```
DROP FUNCTION function_name;
```
- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如
## 显示 UDF
```sql ```sql
DROP FUNCTION func_name SHOW FUNCTION;
``` ```
\ No newline at end of file
...@@ -15,11 +15,11 @@ import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet. ...@@ -15,11 +15,11 @@ import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件 `@tdengine/client` 和 `@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。注意:从 TDengine 3.0 开始 Node.js 原生连接器的包名由 `td2.0-connector` 改名为 `@tdengine/client` 而 rest 连接器的包名由 `td2.0-rest-connector` 改为 `@tdengine/rest`。并且不与 TDengine 2.x 兼容
`td2.0-connector` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`td2.0-rest-connector` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。 `@tdengine/client` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`@tdengine/rest` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。
Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node)。 Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0)。
## 支持的平台 ## 支持的平台
...@@ -58,7 +58,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 ...@@ -58,7 +58,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
<TabItem value="Linux" label="Linux 系统安装依赖工具"> <TabItem value="Linux" label="Linux 系统安装依赖工具">
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) - `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- `td2.0-connector` 2.0.6 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;2.0.5 及更早版本支持 Node.js LTS v10.x 版本。其他版本可能存在包兼容性的问题 - `@tdengine/client` 3.0.0 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;其他版本可能存在包兼容性的问题
- `make` - `make`
- C 语言编译器,[GCC](https://gcc.gnu.org) v4.8.5 或更高版本 - C 语言编译器,[GCC](https://gcc.gnu.org) v4.8.5 或更高版本
...@@ -90,14 +90,14 @@ REST 连接器支持所有能运行 Node.js 的平台。 ...@@ -90,14 +90,14 @@ REST 连接器支持所有能运行 Node.js 的平台。
<TabItem value="install_native" label="安装原生连接器"> <TabItem value="install_native" label="安装原生连接器">
```bash ```bash
npm install td2.0-connector npm install @tdengine/client
``` ```
</TabItem> </TabItem>
<TabItem value="install_rest" label="安装 REST 连接器"> <TabItem value="install_rest" label="安装 REST 连接器">
```bash ```bash
npm i td2.0-rest-connector npm install @tdengine/rest
``` ```
</TabItem> </TabItem>
...@@ -109,13 +109,13 @@ npm i td2.0-rest-connector ...@@ -109,13 +109,13 @@ npm i td2.0-rest-connector
验证方法: 验证方法:
- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/develop/examples/nodejs/nodejsChecker.js)到本地。 - 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/nodejsChecker.js)到本地。
- 在命令行中执行以下命令。 - 在命令行中执行以下命令。
```bash ```bash
npm init -y npm init -y
npm install td2.0-connector npm install @tdengine/client
node nodejsChecker.js host=localhost node nodejsChecker.js host=localhost
``` ```
...@@ -128,11 +128,11 @@ node nodejsChecker.js host=localhost ...@@ -128,11 +128,11 @@ node nodejsChecker.js host=localhost
<Tabs defaultValue="native"> <Tabs defaultValue="native">
<TabItem value="native" label="原生连接"> <TabItem value="native" label="原生连接">
安装并引用 `td2.0-connector` 包。 安装并引用 `@tdengine/client` 包。
```javascript ```javascript
//A cursor also needs to be initialized in order to interact with TDengine from Node.js. //A cursor also needs to be initialized in order to interact with TDengine from Node.js.
const taos = require("td2.0-connector"); const taos = require("@tdengine/client");
var conn = taos.connect({ var conn = taos.connect({
host: "127.0.0.1", host: "127.0.0.1",
user: "root", user: "root",
...@@ -149,12 +149,12 @@ conn.close(); ...@@ -149,12 +149,12 @@ conn.close();
</TabItem> </TabItem>
<TabItem value="rest" label="REST 连接"> <TabItem value="rest" label="REST 连接">
安装并引用 `td2.0-rest-connector` 包。 安装并引用 `@tdengine/rest` 包。
```javascript ```javascript
//A cursor also needs to be initialized in order to interact with TDengine from Node.js. //A cursor also needs to be initialized in order to interact with TDengine from Node.js.
import { options, connect } from "td2.0-rest-connector"; import { options, connect } from "@tdengine/rest";
options.path = "/rest/sqlt"; options.path = "/rest/sql";
// set host // set host
options.host = "localhost"; options.host = "localhost";
// set other options like user/passwd // set other options like user/passwd
...@@ -190,26 +190,23 @@ let cursor = conn.cursor(); ...@@ -190,26 +190,23 @@ let cursor = conn.cursor();
<NodeQuery /> <NodeQuery />
## 更多示例程序 ## 更多示例程序
| 示例程序 | 示例程序描述 | | 示例程序 | 示例程序描述 |
| ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- | | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- |
| [connection](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/cursorClose.js) | 建立连接的示例。 | | [basicUse](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/queryExample.js) | 基本的使用如如建立连接,执行 SQL 等操作。 |
| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamBatchSample.js) | 绑定多行参数插入的示例。 | | [stmtBindBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindParamBatch.js) | 绑定多行参数插入的示例。 | |
| [stmtBind](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamSample.js) | 一行一行绑定参数插入的示例。 | | [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindSingleParamBatch.js) | 按列绑定参数插入的示例。 |
| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindSingleParamBatchSample.js) | 按列绑定参数插入的示例。 | | [stmtQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/stmtQuery.js) | 绑定参数查询的示例。 |
| [stmtUseResult](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtUseResultSample.js) | 绑定参数查询的示例。 | | [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | schemless 插入的示例。 |
| [json tag](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testJsonTag.js) | Json tag 的使用示例。 | | [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | 订阅的使用示例。 |
| [Nanosecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testNanoseconds.js) | 时间戳为纳秒精度的使用的示例。 | | [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | 异步查询的使用示例。 |
| [Microsecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testMicroseconds.js) | 时间戳为微秒精度的使用的示例。 | | [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 |
| [schemless insert](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSchemalessInsert.js) | schemless 插入的示例。 |
| [subscribe](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSubscribe.js) | 订阅的使用示例。 |
| [asyncQuery](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/tset.js) | 异步查询的使用示例。 |
| [REST](https://github.com/taosdata/taos-connector-node/blob/develop/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 |
## 使用限制 ## 使用限制
Node.js 连接器 >= v2.0.6 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。 native 连接器(`@tdengine/client`) >= v3.0.0 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。
## 其他说明 ## 其他说明
...@@ -225,7 +222,7 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202 ...@@ -225,7 +222,7 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202
2. Node.js 版本 2. Node.js 版本
连接器 >v2.0.6 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 原生连接器 `@tdengine/client` 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1
3. "Unable to establish connection","Unable to resolve FQDN" 3. "Unable to establish connection","Unable to resolve FQDN"
...@@ -235,18 +232,22 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202 ...@@ -235,18 +232,22 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202
### 原生连接器 ### 原生连接器
| td2.0-connector 版本 | 说明 | | package name | version | TDengine version | 说明 |
| -------------------- | ---------------------------------------------------------------- | |------------------|---------|---------------------|------------------------------------------------------------------|
| 2.0.12 | 修复 cursor.close() 报错的 bug。 | | @tdengine/client | 3.0.0 | 3.0.0 | 支持TDengine 3.0 且不与2.x 兼容。 |
| 2.0.11 | 支持绑定参数、json tag、schemaless 接口等功能。 | | td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | 修复 cursor.close() 报错的 bug。 |
| 2.0.10 | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | | td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | 支持绑定参数、json tag、schemaless 接口等功能。 |
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 |
### REST 连接器 ### REST 连接器
| td2.0-rest-connector 版本 | 说明 | | package name | version | TDengine version | 说明 |
| ------------------------- | ---------------------------------------------------------------- | |----------------------|---------|---------------------|---------------------------------------------------------------------------|
| 1.0.3 | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | | @tdengine/rest | 3.0.0 | 3.0.0 | 支持 TDegnine 3.0,且不与2.x 兼容。 |
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | 移除默认端口 6041。 |
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | 修复create,insert,update,alter 等SQL 执行返回的 affectRows 错误的bug。 |
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | 支持云服务 cloud Token; |
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 |
## API 参考 ## API 参考
[API 参考](https://docs.taosdata.com/api/td2.0-connector/) [API 参考](https://docs.taosdata.com/api/td2.0-connector/)
\ No newline at end of file
...@@ -233,11 +233,28 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -233,11 +233,28 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **drop** : 插入前是否删除数据库,默认为 true。 - **drop** : 插入前是否删除数据库,默认为 true。
#### 流式计算相关配置参数
创建流式计算的相关参数在 json 配置文件中的 `stream` 中配置,具体参数如下。
- **stream_name** : 流式计算的名称,必填项。
- **stream_stb** : 流式计算对应的超级表名称,必填项。
- **stream_sql** : 流式计算的sql语句,必填项。
- **trigger_mode** : 流式计算的触发模式,可选项。
- **watermark** : 流式计算的水印,可选项。
- **drop** : 是否创建流式计算,可选项为 "yes" 或者 "no", 为 "no" 时不创建。
#### 超级表相关配置参数 #### 超级表相关配置参数
创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下 创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下。
- **name**: 超级表名,必须配置,没有默认值。 - **name**: 超级表名,必须配置,没有默认值。
- **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。 - **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。
- **child_table_count** : 子表的数量,默认值为 10。 - **child_table_count** : 子表的数量,默认值为 10。
...@@ -288,6 +305,22 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -288,6 +305,22 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **tags_file** : 仅当 insert_mode 为 taosc, rest 的模式下生效。 最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。 - **tags_file** : 仅当 insert_mode 为 taosc, rest 的模式下生效。 最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。
#### tsma配置参数
指定tsma的配置参数在 `super_tables` 中的 `tsmas` 中,具体参数如下。
- **name** : 指定 tsma 的名字,必选项。
- **function** : 指定 tsma 的函数,必选项。
- **interval** : 指定 tsma 的时间间隔,必选项。
- **sliding** : 指定 tsma 的窗口时间位移,必选项。
- **custom** : 指定 tsma 的创建语句结尾追加的自定义配置,可选项。
- **start_when_inserted** : 指定当插入多少行时创建 tsma,可选项,默认为 0。
#### 标签列与数据列配置参数 #### 标签列与数据列配置参数
指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns``tag` 中。 指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns``tag` 中。
......
...@@ -147,7 +147,7 @@ import ( ...@@ -147,7 +147,7 @@ import (
"fmt" "fmt"
"time" "time"
_ "github.com/taosdata/driver-go/v2/taosSql" _ "github.com/taosdata/driver-go/v3/taosSql"
) )
type config struct { type config struct {
......
---
sidebar_label: 发布历史
title: 发布历史
---
import Release from "/components/Release";
<Release versionPrefix="3.0" />
...@@ -107,6 +107,7 @@ typedef struct SDataBlockInfo { ...@@ -107,6 +107,7 @@ typedef struct SDataBlockInfo {
int32_t childId; // used for stream, do not serialize int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize STimeWindow calWin; // used for stream, do not serialize
TSKEY watermark;// used for stream
} SDataBlockInfo; } SDataBlockInfo;
typedef struct SSDataBlock { typedef struct SSDataBlock {
......
...@@ -1369,6 +1369,7 @@ typedef struct { ...@@ -1369,6 +1369,7 @@ typedef struct {
int64_t skey; int64_t skey;
int64_t ekey; int64_t ekey;
int64_t version; // for stream int64_t version; // for stream
TSKEY watermark;// for stream
char data[]; char data[];
} SRetrieveTableRsp; } SRetrieveTableRsp;
...@@ -2656,6 +2657,34 @@ typedef struct { ...@@ -2656,6 +2657,34 @@ typedef struct {
SEpSet epSet; SEpSet epSet;
} SVgEpSet; } SVgEpSet;
typedef struct {
int64_t refId;
int64_t suid;
int8_t level;
} SRSmaFetchMsg;
static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI64(pCoder, pReq->refId) < 0) return -1;
if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->level) < 0) return -1;
tEndEncode(pCoder);
return 0;
}
static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->refId) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->level) < 0) return -1;
tEndDecode(pCoder);
return 0;
}
typedef struct { typedef struct {
int8_t version; // for compatibility(default 0) int8_t version; // for compatibility(default 0)
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
...@@ -3035,6 +3064,7 @@ int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes); ...@@ -3035,6 +3064,7 @@ int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes); int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
typedef struct { typedef struct {
int32_t msgIdx;
int32_t msgType; int32_t msgType;
int32_t msgLen; int32_t msgLen;
void* msg; void* msg;
...@@ -3048,6 +3078,7 @@ typedef struct { ...@@ -3048,6 +3078,7 @@ typedef struct {
typedef struct { typedef struct {
int32_t reqType; int32_t reqType;
int32_t msgIdx;
int32_t msgLen; int32_t msgLen;
int32_t rspCode; int32_t rspCode;
void* msg; void* msg;
......
...@@ -58,12 +58,17 @@ typedef struct SDbInfo { ...@@ -58,12 +58,17 @@ typedef struct SDbInfo {
int64_t dbId; int64_t dbId;
} SDbInfo; } SDbInfo;
typedef struct STablesReq {
char dbFName[TSDB_DB_FNAME_LEN];
SArray* pTables;
} STablesReq;
typedef struct SCatalogReq { typedef struct SCatalogReq {
SArray* pDbVgroup; // element is db full name SArray* pDbVgroup; // element is db full name
SArray* pDbCfg; // element is db full name SArray* pDbCfg; // element is db full name
SArray* pDbInfo; // element is db full name SArray* pDbInfo; // element is db full name
SArray* pTableMeta; // element is SNAME SArray* pTableMeta; // element is STablesReq
SArray* pTableHash; // element is SNAME SArray* pTableHash; // element is STablesReq
SArray* pUdf; // element is udf name SArray* pUdf; // element is udf name
SArray* pIndex; // element is index name SArray* pIndex; // element is index name
SArray* pUser; // element is SUserAuthInfo SArray* pUser; // element is SUserAuthInfo
......
...@@ -107,7 +107,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo ...@@ -107,7 +107,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
* @return * @return
*/ */
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, struct SSubplan* pPlan, int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, struct SSubplan* pPlan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, const char* sql, EOPTR_EXEC_MODEL model); qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, char* sql, EOPTR_EXEC_MODEL model);
/** /**
* *
......
...@@ -202,6 +202,7 @@ bool fmIsForbidStreamFunc(int32_t funcId); ...@@ -202,6 +202,7 @@ bool fmIsForbidStreamFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId); bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId); bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId); bool fmIsLastRowFunc(int32_t funcId);
bool fmIsNotNullOutputFunc(int32_t funcId);
bool fmIsSelectValueFunc(int32_t funcId); bool fmIsSelectValueFunc(int32_t funcId);
bool fmIsSystemInfoFunc(int32_t funcId); bool fmIsSystemInfoFunc(int32_t funcId);
bool fmIsImplicitTsFunc(int32_t funcId); bool fmIsImplicitTsFunc(int32_t funcId);
......
...@@ -41,12 +41,13 @@ typedef struct { ...@@ -41,12 +41,13 @@ typedef struct {
typedef struct SRpcHandleInfo { typedef struct SRpcHandleInfo {
// rpc info // rpc info
void *handle; // rpc handle returned to app void *handle; // rpc handle returned to app
int64_t refId; // refid, used by server int64_t refId; // refid, used by server
int32_t noResp; // has response or not(default 0, 0: resp, 1: no resp); int8_t noResp; // has response or not(default 0, 0: resp, 1: no resp)
int32_t persistHandle; // persist handle or not int8_t persistHandle; // persist handle or not
int8_t hasEpSet;
STraceId traceId; STraceId traceId;
int8_t hasEpSet;
// app info // app info
void *ahandle; // app handle set by client void *ahandle; // app handle set by client
...@@ -69,8 +70,9 @@ typedef struct SRpcMsg { ...@@ -69,8 +70,9 @@ typedef struct SRpcMsg {
SRpcHandleInfo info; SRpcHandleInfo info;
} SRpcMsg; } SRpcMsg;
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf); typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *epset);
typedef bool (*RpcRfp)(int32_t code, tmsg_t msgType); typedef bool (*RpcRfp)(int32_t code, tmsg_t msgType);
typedef bool (*RpcTfp)(int32_t code, tmsg_t msgType);
typedef struct SRpcInit { typedef struct SRpcInit {
char localFqdn[TSDB_FQDN_LEN]; char localFqdn[TSDB_FQDN_LEN];
...@@ -84,12 +86,15 @@ typedef struct SRpcInit { ...@@ -84,12 +86,15 @@ typedef struct SRpcInit {
// the following is for client app ecurity only // the following is for client app ecurity only
char *user; // user name char *user; // user name
// call back to process incoming msg, code shall be ignored by server app // call back to process incoming msg
RpcCfp cfp; RpcCfp cfp;
// user defined retry func // retry not not for particular msg
RpcRfp rfp; RpcRfp rfp;
// set up timeout for particular msg
RpcTfp tfp;
void *parent; void *parent;
} SRpcInit; } SRpcInit;
......
...@@ -46,6 +46,7 @@ int32_t* taosGetErrno(); ...@@ -46,6 +46,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) #define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015)
#define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0017) #define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0017)
#define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) #define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018)
#define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019)
//common & util //common & util
#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013) #define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013)
......
...@@ -25,9 +25,9 @@ extern "C" { ...@@ -25,9 +25,9 @@ extern "C" {
// reference counting // reference counting
typedef void (*_ref_fn_t)(const void *pObj); typedef void (*_ref_fn_t)(const void *pObj);
#define T_REF_DECLARE() \ #define T_REF_DECLARE() \
struct { \ struct { \
int32_t val; \ volatile int32_t val; \
} _ref; } _ref;
#define T_REF_REGISTER_FUNC(s, e) \ #define T_REF_REGISTER_FUNC(s, e) \
......
...@@ -60,7 +60,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) { ...@@ -60,7 +60,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
} }
static void deregisterRequest(SRequestObj *pRequest) { static void deregisterRequest(SRequestObj *pRequest) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
assert(pRequest != NULL); assert(pRequest != NULL);
STscObj *pTscObj = pRequest->pTscObj; STscObj *pTscObj = pRequest->pTscObj;
...@@ -77,13 +77,13 @@ static void deregisterRequest(SRequestObj *pRequest) { ...@@ -77,13 +77,13 @@ static void deregisterRequest(SRequestObj *pRequest) {
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
} }
if (duration >= SLOW_QUERY_INTERVAL) { if (duration >= SLOW_QUERY_INTERVAL) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1); atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
} }
releaseTscObj(pTscObj->id); releaseTscObj(pTscObj->id);
} }
...@@ -109,6 +109,14 @@ static bool clientRpcRfp(int32_t code, tmsg_t msgType) { ...@@ -109,6 +109,14 @@ static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
} }
} }
// start timer for particular msgType
static bool clientRpcTfp(int32_t code, tmsg_t msgType) {
if (msgType == TDMT_VND_SUBMIT || msgType == TDMT_VND_CREATE_TABLE) {
return true;
}
return false;
}
// TODO refactor // TODO refactor
void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
SRpcInit rpcInit; SRpcInit rpcInit;
...@@ -118,6 +126,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { ...@@ -118,6 +126,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
rpcInit.numOfThreads = numOfThread; rpcInit.numOfThreads = numOfThread;
rpcInit.cfp = processMsgFromServer; rpcInit.cfp = processMsgFromServer;
rpcInit.rfp = clientRpcRfp; rpcInit.rfp = clientRpcRfp;
rpcInit.tfp = clientRpcTfp;
rpcInit.sessions = 1024; rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char *)user; rpcInit.user = (char *)user;
...@@ -375,7 +384,7 @@ void taos_init_imp(void) { ...@@ -375,7 +384,7 @@ void taos_init_imp(void) {
initQueryModuleMsgHandle(); initQueryModuleMsgHandle();
taosConvInit(); taosConvInit();
rpcInit(); rpcInit();
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100}; SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
......
...@@ -1308,8 +1308,8 @@ int32_t doProcessMsgFromServer(void* param) { ...@@ -1308,8 +1308,8 @@ int32_t doProcessMsgFromServer(void* param) {
char tbuf[40] = {0}; char tbuf[40] = {0};
TRACE_TO_STR(trace, tbuf); TRACE_TO_STR(trace, tbuf);
tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle, tscDebug("processMsgFromServer handle %p, message: %s, size:%d, code: %s, gtid: %s", pMsg->info.handle,
TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), tbuf); TMSG_INFO(pMsg->msgType), pMsg->contLen, tstrerror(pMsg->code), tbuf);
if (pSendInfo->requestObjRefId != 0) { if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
...@@ -1922,7 +1922,7 @@ _OVER: ...@@ -1922,7 +1922,7 @@ _OVER:
return code; return code;
} }
int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, int32_t appendTbToReq(SHashObj* pHash, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str,
int32_t acctId, char* db) { int32_t acctId, char* db) {
SName name; SName name;
...@@ -1957,20 +1957,33 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i ...@@ -1957,20 +1957,33 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i
return -1; return -1;
} }
taosArrayPush(pList, &name); char dbFName[TSDB_DB_FNAME_LEN];
sprintf(dbFName, "%d.%.*s", acctId, dbLen, dbName);
STablesReq* pDb = taosHashGet(pHash, dbFName, strlen(dbFName));
if (pDb) {
taosArrayPush(pDb->pTables, &name);
} else {
STablesReq db;
db.pTables = taosArrayInit(20, sizeof(SName));
strcpy(db.dbFName, dbFName);
taosArrayPush(db.pTables, &name);
taosHashPut(pHash, dbFName, strlen(dbFName), &db, sizeof(db));
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq) { int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq) {
*pReq = taosArrayInit(10, sizeof(SName)); SHashObj* pHash = taosHashInit(3, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if (NULL == *pReq) { if (NULL == pHash) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno; return terrno;
} }
bool inEscape = false; bool inEscape = false;
int32_t code = 0; int32_t code = 0;
void *pIter = NULL;
int32_t vIdx = 0; int32_t vIdx = 0;
int32_t vPos[2]; int32_t vPos[2];
...@@ -1985,7 +1998,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, ...@@ -1985,7 +1998,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
vLen[vIdx] = i - vPos[vIdx]; vLen[vIdx] = i - vPos[vIdx];
} }
code = appendTbToReq(*pReq, vPos[0], vLen[0], vPos[1], vLen[1], tbList, acctId, dbName); code = appendTbToReq(pHash, vPos[0], vLen[0], vPos[1], vLen[1], tbList, acctId, dbName);
if (code) { if (code) {
goto _return; goto _return;
} }
...@@ -2035,7 +2048,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, ...@@ -2035,7 +2048,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
vLen[vIdx] = i - vPos[vIdx]; vLen[vIdx] = i - vPos[vIdx];
} }
code = appendTbToReq(*pReq, vPos[0], vLen[0], vPos[1], vLen[1], tbList, acctId, dbName); code = appendTbToReq(pHash, vPos[0], vLen[0], vPos[1], vLen[1], tbList, acctId, dbName);
if (code) { if (code) {
goto _return; goto _return;
} }
...@@ -2067,14 +2080,31 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, ...@@ -2067,14 +2080,31 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
goto _return; goto _return;
} }
int32_t dbNum = taosHashGetSize(pHash);
*pReq = taosArrayInit(dbNum, sizeof(STablesReq));
pIter = taosHashIterate(pHash, NULL);
while (pIter) {
STablesReq* pDb = (STablesReq*)pIter;
taosArrayPush(*pReq, pDb);
pIter = taosHashIterate(pHash, pIter);
}
taosHashCleanup(pHash);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
_return: _return:
terrno = TSDB_CODE_TSC_INVALID_OPERATION; terrno = TSDB_CODE_TSC_INVALID_OPERATION;
taosArrayDestroy(*pReq); pIter = taosHashIterate(pHash, NULL);
*pReq = NULL; while (pIter) {
STablesReq* pDb = (STablesReq*)pIter;
taosArrayDestroy(pDb->pTables);
pIter = taosHashIterate(pHash, pIter);
}
taosHashCleanup(pHash);
return terrno; return terrno;
} }
......
...@@ -308,9 +308,9 @@ static const SSysDbTableSchema offsetSchema[] = { ...@@ -308,9 +308,9 @@ static const SSysDbTableSchema offsetSchema[] = {
}; };
static const SSysDbTableSchema querySchema[] = { static const SSysDbTableSchema querySchema[] = {
{.name = "query_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "req_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
{.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
{.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
...@@ -354,11 +354,19 @@ static const SSysTableMeta perfsMeta[] = { ...@@ -354,11 +354,19 @@ static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}}; {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}};
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) {
*pInfosTableMeta = infosMeta; if (pInfosTableMeta) {
*size = tListLen(infosMeta); *pInfosTableMeta = infosMeta;
}
if (size) {
*size = tListLen(infosMeta);
}
} }
void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) { void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) {
*pPerfsTableMeta = perfsMeta; if (pPerfsTableMeta) {
*size = tListLen(perfsMeta); *pPerfsTableMeta = perfsMeta;
}
if (size) {
*size = tListLen(perfsMeta);
}
} }
...@@ -1273,8 +1273,7 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { ...@@ -1273,8 +1273,7 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
colDataAssign(pDst, pSrc, src->info.rows, &src->info); colDataAssign(pDst, pSrc, src->info.rows, &src->info);
} }
dst->info.rows = src->info.rows; dst->info = src->info;
dst->info.capacity = src->info.rows;
return 0; return 0;
} }
......
...@@ -89,7 +89,7 @@ bool tsSmlDataFormat = ...@@ -89,7 +89,7 @@ bool tsSmlDataFormat =
// query // query
int32_t tsQueryPolicy = 1; int32_t tsQueryPolicy = 1;
int32_t tsQuerySmaOptimize = 1; int32_t tsQuerySmaOptimize = 0;
/* /*
* denote if the server needs to compress response message at the application layer to client, including query rsp, * denote if the server needs to compress response message at the application layer to client, including query rsp,
......
...@@ -1691,13 +1691,17 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc ...@@ -1691,13 +1691,17 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
if (!pShow->sysDbRsp) { if (!pShow->sysDbRsp) {
SDbObj infoschemaDb = {0}; SDbObj infoschemaDb = {0};
setInformationSchemaDbCfg(&infoschemaDb); setInformationSchemaDbCfg(&infoschemaDb);
dumpDbInfoData(pBlock, &infoschemaDb, pShow, numOfRows, 14, true, 0, 1); size_t numOfTables = 0;
getInfosDbMeta(NULL, &numOfTables);
dumpDbInfoData(pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
numOfRows += 1; numOfRows += 1;
SDbObj perfschemaDb = {0}; SDbObj perfschemaDb = {0};
setPerfSchemaDbCfg(&perfschemaDb); setPerfSchemaDbCfg(&perfschemaDb);
dumpDbInfoData(pBlock, &perfschemaDb, pShow, numOfRows, 3, true, 0, 1); numOfTables = 0;
getPerfDbMeta(NULL, &numOfTables);
dumpDbInfoData(pBlock, &perfschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
numOfRows += 1; numOfRows += 1;
pShow->sysDbRsp = true; pShow->sysDbRsp = true;
......
...@@ -84,6 +84,9 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) { ...@@ -84,6 +84,9 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
} }
for (int32_t i = 0; i < msgNum; ++i) { for (int32_t i = 0; i < msgNum; ++i) {
req.msgIdx = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgIdx);
req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset)); req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgType); offset += sizeof(req.msgType);
...@@ -111,6 +114,7 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) { ...@@ -111,6 +114,7 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
} else { } else {
rsp.rspCode = 0; rsp.rspCode = 0;
} }
rsp.msgIdx = req.msgIdx;
rsp.reqType = reqMsg.msgType; rsp.reqType = reqMsg.msgType;
rsp.msgLen = reqMsg.info.rspLen; rsp.msgLen = reqMsg.info.rspLen;
rsp.msg = reqMsg.info.rsp; rsp.msg = reqMsg.info.rsp;
...@@ -136,6 +140,8 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) { ...@@ -136,6 +140,8 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
*(int32_t*)((char*)pRsp + offset) = htonl(p->reqType); *(int32_t*)((char*)pRsp + offset) = htonl(p->reqType);
offset += sizeof(p->reqType); offset += sizeof(p->reqType);
*(int32_t*)((char*)pRsp + offset) = htonl(p->msgIdx);
offset += sizeof(p->msgIdx);
*(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen); *(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen);
offset += sizeof(p->msgLen); offset += sizeof(p->msgLen);
*(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode); *(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode);
......
...@@ -489,7 +489,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea ...@@ -489,7 +489,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN);
ASSERT(smaObj.uid != 0); ASSERT(smaObj.uid != 0);
char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0}; char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0};
snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "td.tsma.rst.tb.%s", pCreate->name); snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb",pCreate->name);
memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN); memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN);
smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN); smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN);
smaObj.stbUid = pStb->uid; smaObj.stbUid = pStb->uid;
...@@ -603,6 +603,9 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea ...@@ -603,6 +603,9 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER; if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
mDebug("mndSma: create sma index %s %" PRIi64 " on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d",
pCreate->name, smaObj.uid, smaObj.stbUid, smaObj.dstTbUid, smaObj.dstTbName, smaObj.dstVgId);
code = 0; code = 0;
_OVER: _OVER:
...@@ -795,11 +798,12 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p ...@@ -795,11 +798,12 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
pStb = mndAcquireStb(pMnode, pSma->stb); pStb = mndAcquireStb(pMnode, pSma->stb);
if (pStb == NULL) goto _OVER; if (pStb == NULL) goto _OVER;
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq); pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
mndTransSetDbName(pTrans, pDb->name, NULL); mndTransSetDbName(pTrans, pDb->name, NULL);
mndTransSetSerial(pTrans);
char streamName[TSDB_TABLE_FNAME_LEN] = {0}; char streamName[TSDB_TABLE_FNAME_LEN] = {0};
mndGetStreamNameFromSmaName(streamName, pSma->name); mndGetStreamNameFromSmaName(streamName, pSma->name);
...@@ -834,9 +838,6 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p ...@@ -834,9 +838,6 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
code = 0; code = 0;
_OVER: _OVER:
if(code != 0) {
ASSERT(0);
}
mndTransDrop(pTrans); mndTransDrop(pTrans);
mndReleaseVgroup(pMnode, pVgroup); mndReleaseVgroup(pMnode, pVgroup);
mndReleaseStb(pMnode, pStb); mndReleaseStb(pMnode, pStb);
...@@ -855,6 +856,7 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p ...@@ -855,6 +856,7 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
if (pIter == NULL) break; if (pIter == NULL) break;
if (pSma->stbUid == pStb->uid) { if (pSma->stbUid == pStb->uid) {
mndTransSetSerial(pTrans);
pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId);
if (pVgroup == NULL) goto _OVER; if (pVgroup == NULL) goto _OVER;
...@@ -935,7 +937,6 @@ static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) { ...@@ -935,7 +937,6 @@ static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) {
goto _OVER; goto _OVER;
} else { } else {
terrno = TSDB_CODE_MND_SMA_NOT_EXIST; terrno = TSDB_CODE_MND_SMA_NOT_EXIST;
ASSERT(0);
goto _OVER; goto _OVER;
} }
} }
......
...@@ -115,24 +115,29 @@ struct SSmaStat { ...@@ -115,24 +115,29 @@ struct SSmaStat {
#define RSMA_FS_LOCK(r) (&(r)->lock) #define RSMA_FS_LOCK(r) (&(r)->lock)
struct SRSmaInfoItem { struct SRSmaInfoItem {
void *taskInfo; // qTaskInfo_t
int64_t refId;
tmr_h tmrId;
int32_t maxDelay;
int8_t level; int8_t level;
int8_t triggerStat; int8_t triggerStat;
int32_t maxDelay;
tmr_h tmrId;
}; };
struct SRSmaInfo { struct SRSmaInfo {
STSchema *pTSchema; STSchema *pTSchema;
int64_t suid; int64_t suid;
int64_t refId; // refId of SRSmaStat
int8_t delFlag; int8_t delFlag;
T_REF_DECLARE() T_REF_DECLARE()
SRSmaInfoItem items[TSDB_RETENTION_L2]; SRSmaInfoItem items[TSDB_RETENTION_L2];
void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
void *iTaskInfo[TSDB_RETENTION_L2]; // immutable
}; };
#define RSMA_INFO_HEAD_LEN 24
#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1) #define RSMA_INFO_HEAD_LEN 32
#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1) #define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1)
#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1)
#define RSMA_INFO_QTASK(r, i) ((r)->taskInfo[i])
#define RSMA_INFO_IQTASK(r, i) ((r)->iTaskInfo[i])
#define RSMA_INFO_ITEM(r, i) (&(r)->items[i])
enum { enum {
TASK_TRIGGER_STAT_INIT = 0, TASK_TRIGGER_STAT_INIT = 0,
...@@ -168,8 +173,8 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat); ...@@ -168,8 +173,8 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat);
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); void *tdAcquireSmaRef(int32_t rsetId, int64_t refId);
int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId);
int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType); int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
...@@ -223,12 +228,11 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) { ...@@ -223,12 +228,11 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName); void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName); void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc); int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc);
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level); void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree); void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash); int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName); int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
......
...@@ -381,6 +381,8 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { ...@@ -381,6 +381,8 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
metaReaderClear(&mr); metaReaderClear(&mr);
return -1; return -1;
} else if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
terrno = TSDB_CODE_SUCCESS;
} }
metaReaderClear(&mr); metaReaderClear(&mr);
......
...@@ -308,12 +308,12 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) { ...@@ -308,12 +308,12 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
* @return int32_t * @return int32_t
*/ */
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) { if (!pEnv) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// step 1: set rsma stat // step 1: set rsma stat
...@@ -337,18 +337,26 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { ...@@ -337,18 +337,26 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
} }
// step 3: swap rsmaInfoHash and iRsmaInfoHash // step 3: swap rsmaInfoHash and iRsmaInfoHash
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat)); // lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(RSMA_INFO_HASH(pRSmaStat)); ASSERT(RSMA_INFO_HASH(pRSmaStat));
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat); RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
RSMA_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat) =
taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_INFO_HASH(pRSmaStat)) { if (!RSMA_INFO_HASH(pRSmaStat)) {
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr()); smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
// step 4: others // step 4: others
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
...@@ -383,26 +391,52 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) { ...@@ -383,26 +391,52 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
* @return int32_t * @return int32_t
*/ */
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) { if (!pEnv) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// step 1: merge rsmaInfoHash and iRsmaInfoHash // step 1: merge rsmaInfoHash and iRsmaInfoHash
taosWLockLatch(SMA_ENV_LOCK(pSmaEnv)); // lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
#if 0
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) { if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
// TODO: optimization - just switch the hash pointer if rsmaInfoHash is empty // just switch the hash pointer if rsmaInfoHash is empty
} if (taosHashGetSize(RSMA_IMU_INFO_HASH(pRSmaStat)) > 0) {
SHashObj *infoHash = RSMA_INFO_HASH(pRSmaStat);
RSMA_INFO_HASH(pRSmaStat) = RSMA_IMU_INFO_HASH(pRSmaStat);
RSMA_IMU_INFO_HASH(pRSmaStat) = infoHash;
}
} else {
#endif
#if 1
void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL); void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
while (pIter) { while (pIter) {
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) { if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
if (refVal == 0) {
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
smaDebug(
"vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
"table:%" PRIi64,
SMA_VID(pSma), *pSuid);
} else {
smaDebug(
"vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
"table:%" PRIi64,
SMA_VID(pSma), refVal, *pSuid);
}
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
continue;
}
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter)); taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
*pSuid); *pSuid);
...@@ -416,11 +450,14 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { ...@@ -416,11 +450,14 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter); pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
} }
#endif
// }
taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat)); taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = NULL; RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
taosWUnLockLatch(SMA_ENV_LOCK(pSmaEnv)); // unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
// step 2: cleanup outdated qtaskinfo files // step 2: cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat); tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
typedef struct SSmaStat SSmaStat; typedef struct SSmaStat SSmaStat;
#define SMA_MGMT_REF_NUM 10240 #define SMA_MGMT_REF_NUM 10240
extern SSmaMgmt smaMgmt; extern SSmaMgmt smaMgmt;
...@@ -171,7 +171,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) { ...@@ -171,7 +171,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
if (!pRSmaInfo) return 0; if (!pRSmaInfo) return 0;
int ref = T_REF_INC(pRSmaInfo); int ref = T_REF_INC(pRSmaInfo);
smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
return 0; return 0;
...@@ -183,9 +183,6 @@ int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { ...@@ -183,9 +183,6 @@ int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
int ref = T_REF_DEC(pRSmaInfo); int ref = T_REF_DEC(pRSmaInfo);
smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
if (ref == 0) {
tdRemoveRSmaInfoBySuid(pSma, pRSmaInfo->suid);
}
return 0; return 0;
} }
......
...@@ -116,8 +116,10 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) { ...@@ -116,8 +116,10 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
} }
// create stable to save tsma result in dstVgId // create stable to save tsma result in dstVgId
SName stbFullName = {0};
tNameFromString(&stbFullName, pCfg->dstTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
SVCreateStbReq pReq = {0}; SVCreateStbReq pReq = {0};
pReq.name = pCfg->dstTbName; pReq.name = (char*)tNameGetTableName(&stbFullName);
pReq.suid = pCfg->dstTbUid; pReq.suid = pCfg->dstTbUid;
pReq.schemaRow = pCfg->schemaRow; pReq.schemaRow = pCfg->schemaRow;
pReq.schemaTag = pCfg->schemaTag; pReq.schemaTag = pCfg->schemaTag;
...@@ -125,6 +127,12 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) { ...@@ -125,6 +127,12 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
if (metaCreateSTable(SMA_META(pSma), version, &pReq) < 0) { if (metaCreateSTable(SMA_META(pSma), version, &pReq) < 0) {
return -1; return -1;
} }
smaDebug("vgId:%d, success to create sma index %s %" PRIi64 " on stb:%" PRIi64 ", dstSuid:%" PRIi64
" dstTb:%s dstVg:%d",
SMA_VID(pSma), pCfg->indexName, pCfg->indexUid, pCfg->tableUid, pCfg->dstTbUid, pReq.name, pCfg->dstVgId);
} else {
ASSERT(0);
} }
return 0; return 0;
......
...@@ -287,22 +287,22 @@ int32_t tdRemoveTFile(STFile *pTFile) { ...@@ -287,22 +287,22 @@ int32_t tdRemoveTFile(STFile *pTFile) {
} }
// smaXXXUtil ================ // smaXXXUtil ================
void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln) { void *tdAcquireSmaRef(int32_t rsetId, int64_t refId) {
void *pResult = taosAcquireRef(rsetId, refId); void *pResult = taosAcquireRef(rsetId, refId);
if (!pResult) { if (!pResult) {
smaWarn("%s:%d taosAcquireRef for rsetId:%" PRIi64 " refId:%d failed since %s", tags, ln, rsetId, refId, terrstr()); smaWarn("rsma acquire ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
} else { } else {
smaDebug("%s:%d taosAcquireRef for rsetId:%" PRIi64 " refId:%d success", tags, ln, rsetId, refId); smaDebug("rsma acquire ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
} }
return pResult; return pResult;
} }
int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln) { int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId) {
if (taosReleaseRef(rsetId, refId) < 0) { if (taosReleaseRef(rsetId, refId) < 0) {
smaWarn("%s:%d taosReleaseRef for rsetId:%" PRIi64 " refId:%d failed since %s", tags, ln, rsetId, refId, terrstr()); smaWarn("rsma release ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
smaDebug("%s:%d taosReleaseRef for rsetId:%" PRIi64 " refId:%d success", tags, ln, rsetId, refId); smaDebug("rsma release ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -313,7 +313,7 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t ...@@ -313,7 +313,7 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t
char *pOutput = NULL; char *pOutput = NULL;
int32_t len = 0; int32_t len = 0;
if (qSerializeTaskStatus(srcTaskInfo, &pOutput, &len) < 0) { if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) {
smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid, smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
terrstr()); terrstr());
goto _err; goto _err;
...@@ -337,41 +337,34 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t ...@@ -337,41 +337,34 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t
goto _err; goto _err;
} }
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid); smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
taosMemoryFreeClear(pOutput); taosMemoryFreeClear(pOutput);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
_err: _err:
taosMemoryFreeClear(pOutput); taosMemoryFreeClear(pOutput);
tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1); tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
terrstr());
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
/** /**
* @brief pTSchema is shared * @brief pTSchema is shared
* *
* @param pSma * @param pSma
* @param pDest * @param pDest
* @param pSrc * @param pSrc
* @return int32_t * @return int32_t
*/ */
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) { int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc) {
SVnode *pVnode = pSma->pVnode; SVnode *pVnode = pSma->pVnode;
SRSmaParam *param = NULL; SRSmaParam *param = NULL;
if (!pSrc) { if (!pSrc) {
*pDest = NULL;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (!pDest) {
pDest = taosMemoryCalloc(1, sizeof(SRSmaInfo));
if (!pDest) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
}
}
memcpy(pDest, pSrc, sizeof(SRSmaInfo));
SMetaReader mr = {0}; SMetaReader mr = {0};
metaReaderInit(&mr, SMA_META(pSma), 0); metaReaderInit(&mr, SMA_META(pSma), 0);
smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid); smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
...@@ -384,21 +377,22 @@ int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) { ...@@ -384,21 +377,22 @@ int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) {
ASSERT(mr.me.uid == pSrc->suid); ASSERT(mr.me.uid == pSrc->suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) { if (TABLE_IS_ROLLUP(mr.me.flags)) {
param = &mr.me.stbEntry.rsmaParam; param = &mr.me.stbEntry.rsmaParam;
for (int i = 0; i < TSDB_RETENTION_L2; ++i) { for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
SRSmaInfoItem *pItem = &pSrc->items[i]; if (tdCloneQTaskInfo(pSma, pSrc->iTaskInfo[i], pSrc->taskInfo[i], param, pSrc->suid, i) < 0) {
if (pItem->taskInfo) { goto _err;
tdCloneQTaskInfo(pSma, pDest->items[i].taskInfo, pItem->taskInfo, param, pSrc->suid, i);
} }
} }
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid); smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
} }
metaReaderClear(&mr); metaReaderClear(&mr);
*pDest = pSrc; // pointer copy
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
_err: _err:
*pDest = NULL;
metaReaderClear(&mr); metaReaderClear(&mr);
tdFreeRSmaInfo(pSma, pDest, false); smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, terrstr());
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
\ No newline at end of file
// ...
\ No newline at end of file
...@@ -223,6 +223,8 @@ int vnodeCommit(SVnode *pVnode) { ...@@ -223,6 +223,8 @@ int vnodeCommit(SVnode *pVnode) {
vnodeBufPoolUnRef(pVnode->inUse); vnodeBufPoolUnRef(pVnode->inUse);
pVnode->inUse = NULL; pVnode->inUse = NULL;
pVnode->state.commitTerm = pVnode->state.applyTerm;
// save info // save info
info.config = pVnode->config; info.config = pVnode->config;
info.state.committed = pVnode->state.applied; info.state.committed = pVnode->state.applied;
......
...@@ -273,6 +273,9 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) { ...@@ -273,6 +273,9 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
} }
for (int32_t i = 0; i < msgNum; ++i) { for (int32_t i = 0; i < msgNum; ++i) {
req.msgIdx = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
offset += sizeof(req.msgIdx);
req.msgType = ntohl(*(int32_t *)((char *)pMsg->pCont + offset)); req.msgType = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
offset += sizeof(req.msgType); offset += sizeof(req.msgType);
...@@ -301,6 +304,7 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) { ...@@ -301,6 +304,7 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
break; break;
} }
rsp.msgIdx = req.msgIdx;
rsp.reqType = reqMsg.msgType; rsp.reqType = reqMsg.msgType;
rsp.msgLen = reqMsg.contLen; rsp.msgLen = reqMsg.contLen;
rsp.rspCode = reqMsg.code; rsp.rspCode = reqMsg.code;
...@@ -327,6 +331,8 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) { ...@@ -327,6 +331,8 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
*(int32_t *)((char *)pRsp + offset) = htonl(p->reqType); *(int32_t *)((char *)pRsp + offset) = htonl(p->reqType);
offset += sizeof(p->reqType); offset += sizeof(p->reqType);
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgIdx);
offset += sizeof(p->msgIdx);
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgLen); *(int32_t *)((char *)pRsp + offset) = htonl(p->msgLen);
offset += sizeof(p->msgLen); offset += sizeof(p->msgLen);
*(int32_t *)((char *)pRsp + offset) = htonl(p->rspCode); *(int32_t *)((char *)pRsp + offset) = htonl(p->rspCode);
......
...@@ -351,7 +351,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { ...@@ -351,7 +351,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
// TODO: remove the function // TODO: remove the function
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
// TODO // TODO
blockDebugShowDataBlocks(data, __func__); blockDebugShowDataBlocks(data, __func__);
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
} }
...@@ -852,6 +851,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq ...@@ -852,6 +851,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) { if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
submitBlkRsp.code = terrno; submitBlkRsp.code = terrno;
pRsp->code = terrno;
tDecoderClear(&decoder); tDecoderClear(&decoder);
taosArrayDestroy(createTbReq.ctb.tagName); taosArrayDestroy(createTbReq.ctb.tagName);
goto _exit; goto _exit;
......
...@@ -708,8 +708,8 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { ...@@ -708,8 +708,8 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
} }
setPingTimerMS(pVnode->sync, 5000); setPingTimerMS(pVnode->sync, 5000);
setElectTimerMS(pVnode->sync, 2800); setElectTimerMS(pVnode->sync, 4000);
setHeartbeatTimerMS(pVnode->sync, 900); setHeartbeatTimerMS(pVnode->sync, 700);
return 0; return 0;
} }
......
...@@ -32,6 +32,7 @@ extern "C" { ...@@ -32,6 +32,7 @@ extern "C" {
#define CTG_DEFAULT_RENT_SLOT_SIZE 10 #define CTG_DEFAULT_RENT_SLOT_SIZE 10
#define CTG_DEFAULT_MAX_RETRY_TIMES 3 #define CTG_DEFAULT_MAX_RETRY_TIMES 3
#define CTG_DEFAULT_BATCH_NUM 64 #define CTG_DEFAULT_BATCH_NUM 64
#define CTG_DEFAULT_FETCH_NUM 8
#define CTG_RENT_SLOT_SECOND 1.5 #define CTG_RENT_SLOT_SECOND 1.5
...@@ -80,6 +81,8 @@ typedef enum { ...@@ -80,6 +81,8 @@ typedef enum {
CTG_TASK_GET_UDF, CTG_TASK_GET_UDF,
CTG_TASK_GET_USER, CTG_TASK_GET_USER,
CTG_TASK_GET_SVR_VER, CTG_TASK_GET_SVR_VER,
CTG_TASK_GET_TB_META_BATCH,
CTG_TASK_GET_TB_HASH_BATCH,
} CTG_TASK_TYPE; } CTG_TASK_TYPE;
typedef enum { typedef enum {
...@@ -110,6 +113,23 @@ typedef struct SCtgTbMetaCtx { ...@@ -110,6 +113,23 @@ typedef struct SCtgTbMetaCtx {
int32_t flag; int32_t flag;
} SCtgTbMetaCtx; } SCtgTbMetaCtx;
typedef struct SCtgFetch {
int32_t dbIdx;
int32_t tbIdx;
int32_t fetchIdx;
int32_t resIdx;
int32_t flag;
SCtgTbCacheInfo tbInfo;
int32_t vgId;
} SCtgFetch;
typedef struct SCtgTbMetasCtx {
int32_t fetchNum;
SArray* pNames;
SArray* pResList;
SArray* pFetchs;
} SCtgTbMetasCtx;
typedef struct SCtgTbIndexCtx { typedef struct SCtgTbIndexCtx {
SName* pName; SName* pName;
} SCtgTbIndexCtx; } SCtgTbIndexCtx;
...@@ -137,6 +157,14 @@ typedef struct SCtgTbHashCtx { ...@@ -137,6 +157,14 @@ typedef struct SCtgTbHashCtx {
SName* pName; SName* pName;
} SCtgTbHashCtx; } SCtgTbHashCtx;
typedef struct SCtgTbHashsCtx {
int32_t fetchNum;
SArray* pNames;
SArray* pResList;
SArray* pFetchs;
} SCtgTbHashsCtx;
typedef struct SCtgIndexCtx { typedef struct SCtgIndexCtx {
char indexFName[TSDB_INDEX_FNAME_LEN]; char indexFName[TSDB_INDEX_FNAME_LEN];
} SCtgIndexCtx; } SCtgIndexCtx;
...@@ -211,6 +239,7 @@ typedef struct SCtgBatch { ...@@ -211,6 +239,7 @@ typedef struct SCtgBatch {
SRequestConnInfo conn; SRequestConnInfo conn;
char dbFName[TSDB_DB_FNAME_LEN]; char dbFName[TSDB_DB_FNAME_LEN];
SArray* pTaskIds; SArray* pTaskIds;
SArray* pMsgIdxs;
} SCtgBatch; } SCtgBatch;
typedef struct SCtgJob { typedef struct SCtgJob {
...@@ -218,6 +247,7 @@ typedef struct SCtgJob { ...@@ -218,6 +247,7 @@ typedef struct SCtgJob {
int32_t batchId; int32_t batchId;
SHashObj* pBatchs; SHashObj* pBatchs;
SArray* pTasks; SArray* pTasks;
int32_t subTaskNum;
int32_t taskDone; int32_t taskDone;
SMetaData jobRes; SMetaData jobRes;
int32_t jobResCode; int32_t jobResCode;
...@@ -258,6 +288,7 @@ typedef struct SCtgTaskCallbackParam { ...@@ -258,6 +288,7 @@ typedef struct SCtgTaskCallbackParam {
SArray* taskId; SArray* taskId;
int32_t reqType; int32_t reqType;
int32_t batchId; int32_t batchId;
SArray* msgIdx;
} SCtgTaskCallbackParam; } SCtgTaskCallbackParam;
...@@ -276,6 +307,7 @@ typedef struct SCtgTask { ...@@ -276,6 +307,7 @@ typedef struct SCtgTask {
int32_t taskId; int32_t taskId;
SCtgJob* pJob; SCtgJob* pJob;
void* taskCtx; void* taskCtx;
SArray* msgCtxs;
SCtgMsgCtx msgCtx; SCtgMsgCtx msgCtx;
int32_t code; int32_t code;
void* res; void* res;
...@@ -286,9 +318,14 @@ typedef struct SCtgTask { ...@@ -286,9 +318,14 @@ typedef struct SCtgTask {
SHashObj* pBatchs; SHashObj* pBatchs;
} SCtgTask; } SCtgTask;
typedef struct SCtgTaskReq {
SCtgTask* pTask;
int32_t msgIdx;
} SCtgTaskReq;
typedef int32_t (*ctgInitTaskFp)(SCtgJob*, int32_t, void*); typedef int32_t (*ctgInitTaskFp)(SCtgJob*, int32_t, void*);
typedef int32_t (*ctgLanchTaskFp)(SCtgTask*); typedef int32_t (*ctgLanchTaskFp)(SCtgTask*);
typedef int32_t (*ctgHandleTaskMsgRspFp)(SCtgTask*, int32_t, const SDataBuf *, int32_t); typedef int32_t (*ctgHandleTaskMsgRspFp)(SCtgTaskReq*, int32_t, const SDataBuf *, int32_t);
typedef int32_t (*ctgDumpTaskResFp)(SCtgTask*); typedef int32_t (*ctgDumpTaskResFp)(SCtgTask*);
typedef int32_t (*ctgCloneTaskResFp)(SCtgTask*, void**); typedef int32_t (*ctgCloneTaskResFp)(SCtgTask*, void**);
typedef int32_t (*ctgCompTaskFp)(SCtgTask*, void*, bool*); typedef int32_t (*ctgCompTaskFp)(SCtgTask*, void*, bool*);
...@@ -487,6 +524,8 @@ typedef struct SCtgOperation { ...@@ -487,6 +524,8 @@ typedef struct SCtgOperation {
#define CTG_FLAG_MAKE_STB(_isStb) (((_isStb) == 1) ? CTG_FLAG_STB : ((_isStb) == 0 ? CTG_FLAG_NOT_STB : CTG_FLAG_UNKNOWN_STB)) #define CTG_FLAG_MAKE_STB(_isStb) (((_isStb) == 1) ? CTG_FLAG_STB : ((_isStb) == 0 ? CTG_FLAG_NOT_STB : CTG_FLAG_UNKNOWN_STB))
#define CTG_FLAG_MATCH_STB(_flag, tbType) (CTG_FLAG_IS_UNKNOWN_STB(_flag) || (CTG_FLAG_IS_STB(_flag) && (tbType) == TSDB_SUPER_TABLE) || (CTG_FLAG_IS_NOT_STB(_flag) && (tbType) != TSDB_SUPER_TABLE)) #define CTG_FLAG_MATCH_STB(_flag, tbType) (CTG_FLAG_IS_UNKNOWN_STB(_flag) || (CTG_FLAG_IS_STB(_flag) && (tbType) == TSDB_SUPER_TABLE) || (CTG_FLAG_IS_NOT_STB(_flag) && (tbType) != TSDB_SUPER_TABLE))
#define CTG_GET_TASK_MSGCTX(_task, _id) (((CTG_TASK_GET_TB_META_BATCH == (_task)->type) || (CTG_TASK_GET_TB_HASH_BATCH == (_task)->type)) ? taosArrayGet((_task)->msgCtxs, (_id)) : &(_task)->msgCtx)
#define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema)) #define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema))
#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST) #define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
...@@ -586,6 +625,7 @@ int32_t ctgdShowCacheInfo(void); ...@@ -586,6 +625,7 @@ int32_t ctgdShowCacheInfo(void);
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq);
int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetasCtx* ctx, int32_t dbIdx, int32_t *fetchIdx, int32_t baseResIdx, SArray* pList);
int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action); int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action);
int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action);
...@@ -631,7 +671,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg ...@@ -631,7 +671,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target); int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target);
int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask); int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTaskReq* tReq);
int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask); int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask);
int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask); int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask);
int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask); int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask);
...@@ -639,9 +679,9 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const ...@@ -639,9 +679,9 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *name, STableIndex* out, SCtgTask* pTask); int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *name, STableIndex* out, SCtgTask* pTask);
int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *funcName, SFuncInfo *out, SCtgTask* pTask); int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *funcName, SFuncInfo *out, SCtgTask* pTask);
int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *user, SGetUserAuthRsp *out, SCtgTask* pTask); int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *user, SGetUserAuthRsp *out, SCtgTask* pTask);
int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char *dbFName, char* tbName, STableMetaOutput* out, SCtgTask* pTask); int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char *dbFName, char* tbName, STableMetaOutput* out, SCtgTaskReq* tReq);
int32_t ctgGetTbMetaFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableMetaOutput* out, SCtgTask* pTask); int32_t ctgGetTbMetaFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableMetaOutput* out, SCtgTaskReq* tReq);
int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableMetaOutput* out, SCtgTask* pTask); int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableMetaOutput* out, SCtgTaskReq* tReq);
int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableCfg **out, SCtgTask* pTask); int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg **out, SCtgTask* pTask); int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask); int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask);
...@@ -664,6 +704,7 @@ void ctgFreeJob(void* job); ...@@ -664,6 +704,7 @@ void ctgFreeJob(void* job);
void ctgFreeHandleImpl(SCatalog* pCtg); void ctgFreeHandleImpl(SCatalog* pCtg);
void ctgFreeVgInfo(SDBVgInfo *vgInfo); void ctgFreeVgInfo(SDBVgInfo *vgInfo);
int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup); int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup);
int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update);
void ctgResetTbMetaTask(SCtgTask* pTask); void ctgResetTbMetaTask(SCtgTask* pTask);
void ctgFreeDbCache(SCtgDBCache *dbCache); void ctgFreeDbCache(SCtgDBCache *dbCache);
int32_t ctgStbVersionSortCompare(const void* key1, const void* key2); int32_t ctgStbVersionSortCompare(const void* key1, const void* key2);
...@@ -672,8 +713,11 @@ int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2); ...@@ -672,8 +713,11 @@ int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2);
int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2); int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2);
void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput); void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput);
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target); int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target);
int32_t ctgAddMsgCtx(SArray* pCtxs, int32_t reqType, void* out, char* target);
char * ctgTaskTypeStr(CTG_TASK_TYPE type); char * ctgTaskTypeStr(CTG_TASK_TYPE type);
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId); int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId);
int32_t ctgGetTablesReqNum(SArray *pList);
int32_t ctgAddFetch(SArray** pFetchs, int32_t dbIdx, int32_t tbIdx, int32_t *fetchIdx, int32_t resIdx, int32_t flag);
int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes); int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes);
void ctgFreeSTableIndex(void *info); void ctgFreeSTableIndex(void *info);
void ctgClearSubTaskRes(SCtgSubRes *pRes); void ctgClearSubTaskRes(SCtgSubRes *pRes);
...@@ -682,6 +726,7 @@ void ctgClearHandle(SCatalog* pCtg); ...@@ -682,6 +726,7 @@ void ctgClearHandle(SCatalog* pCtg);
void ctgFreeTbCacheImpl(SCtgTbCache *pCache); void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName); int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup); int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup);
SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch);
extern SCatalogMgmt gCtgMgmt; extern SCatalogMgmt gCtgMgmt;
......
此差异已折叠。
...@@ -242,7 +242,6 @@ int32_t ctgAcquireTbMetaFromCache(SCatalog* pCtg, char *dbFName, char* tbName, S ...@@ -242,7 +242,6 @@ int32_t ctgAcquireTbMetaFromCache(SCatalog* pCtg, char *dbFName, char* tbName, S
goto _return; goto _return;
} }
int32_t sz = 0;
pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName)); pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
if (NULL == pCache) { if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName); ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
...@@ -282,7 +281,6 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid, ...@@ -282,7 +281,6 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid,
goto _return; goto _return;
} }
int32_t sz = 0;
char* stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid)); char* stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
if (NULL == stName) { if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName); ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
...@@ -2152,6 +2150,254 @@ int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMet ...@@ -2152,6 +2150,254 @@ int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMet
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#if 0
int32_t ctgGetTbMetaBFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetasCtx* ctx, SArray** pResList) {
int32_t tbNum = taosArrayGetSize(ctx->pNames);
SName* fName = taosArrayGet(ctx->pNames, 0);
int32_t fIdx = 0;
for (int32_t i = 0; i < tbNum; ++i) {
SName* pName = taosArrayGet(ctx->pNames, i);
SCtgTbMetaCtx nctx = {0};
nctx.flag = CTG_FLAG_UNKNOWN_STB;
nctx.pName = pName;
if (IS_SYS_DBNAME(pName->dbname)) {
CTG_FLAG_SET_SYS_DB(nctx.flag);
}
STableMeta *pTableMeta = NULL;
CTG_ERR_RET(ctgReadTbMetaFromCache(pCtg, &nctx, &pTableMeta));
SMetaRes res = {0};
if (pTableMeta) {
if (CTG_FLAG_MATCH_STB(nctx.flag, pTableMeta->tableType) &&
((!CTG_FLAG_IS_FORCE_UPDATE(nctx.flag)) || (CTG_FLAG_IS_SYS_DB(nctx.flag)))) {
res.pRes = pTableMeta;
} else {
taosMemoryFreeClear(pTableMeta);
}
}
if (NULL == res.pRes) {
if (NULL == ctx->pFetchs) {
ctx->pFetchs = taosArrayInit(tbNum, sizeof(SCtgFetch));
}
if (CTG_FLAG_IS_UNKNOWN_STB(nctx.flag)) {
CTG_FLAG_SET_STB(nctx.flag, nctx.tbInfo.tbType);
}
SCtgFetch fetch = {0};
fetch.tbIdx = i;
fetch.fetchIdx = fIdx++;
fetch.flag = nctx.flag;
taosArrayPush(ctx->pFetchs, &fetch);
}
taosArrayPush(ctx->pResList, &res);
}
if (NULL == ctx->pFetchs) {
TSWAP(*pResList, ctx->pResList);
}
return TSDB_CODE_SUCCESS;
}
#endif
int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetasCtx* ctx, int32_t dbIdx, int32_t *fetchIdx, int32_t baseResIdx, SArray* pList) {
int32_t tbNum = taosArrayGetSize(pList);
SName* pName = taosArrayGet(pList, 0);
char dbFName[TSDB_DB_FNAME_LEN] = {0};
int32_t flag = CTG_FLAG_UNKNOWN_STB;
uint64_t lastSuid = 0;
STableMeta* lastTableMeta = NULL;
if (IS_SYS_DBNAME(pName->dbname)) {
CTG_FLAG_SET_SYS_DB(flag);
strcpy(dbFName, pName->dbname);
} else {
tNameGetFullDbName(pName, dbFName);
}
SCtgDBCache *dbCache = NULL;
SCtgTbCache* pCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
for (int32_t i = 0; i < tbNum; ++i) {
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
}
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < tbNum; ++i) {
SName* pName = taosArrayGet(pList, i);
pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
continue;
}
CTG_LOCK(CTG_READ, &pCache->metaLock);
if (NULL == pCache->pMeta) {
ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
continue;
}
STableMeta* tbMeta = pCache->pMeta;
SCtgTbMetaCtx nctx = {0};
nctx.flag = flag;
nctx.tbInfo.inCache = true;
nctx.tbInfo.dbId = dbCache->dbId;
nctx.tbInfo.suid = tbMeta->suid;
nctx.tbInfo.tbType = tbMeta->tableType;
SMetaRes res = {0};
STableMeta* pTableMeta = NULL;
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
int32_t metaSize = CTG_META_SIZE(tbMeta);
pTableMeta = taosMemoryCalloc(1, metaSize);
if (NULL == pTableMeta) {
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(pTableMeta, tbMeta, metaSize);
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
continue;
}
// PROCESS FOR CHILD TABLE
if (lastSuid && tbMeta->suid == lastSuid && lastTableMeta) {
cloneTableMeta(lastTableMeta, &pTableMeta);
memcpy(pTableMeta, tbMeta, sizeof(SCTableMeta));
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
continue;
}
int32_t metaSize = sizeof(SCTableMeta);
pTableMeta = taosMemoryCalloc(1, metaSize);
if (NULL == pTableMeta) {
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(pTableMeta, tbMeta, metaSize);
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s",
pName->tname, nctx.tbInfo.tbType, dbFName);
char* stName = taosHashAcquire(dbCache->stbCache, &pTableMeta->suid, sizeof(pTableMeta->suid));
if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosMemoryFreeClear(pTableMeta);
continue;
}
pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName));
if (NULL == pCache) {
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", pTableMeta->suid, stName, dbFName);
taosHashRelease(dbCache->stbCache, stName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosMemoryFreeClear(pTableMeta);
continue;
}
taosHashRelease(dbCache->stbCache, stName);
CTG_LOCK(CTG_READ, &pCache->metaLock);
if (NULL == pCache->pMeta) {
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", pTableMeta->suid, dbFName);
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosMemoryFreeClear(pTableMeta);
continue;
}
STableMeta* stbMeta = pCache->pMeta;
if (stbMeta->suid != nctx.tbInfo.suid) {
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%"PRIx64 , stbMeta->suid, nctx.tbInfo.suid);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosMemoryFreeClear(pTableMeta);
continue;
}
metaSize = CTG_META_SIZE(stbMeta);
pTableMeta = taosMemoryRealloc(pTableMeta, metaSize);
if (NULL == pTableMeta) {
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(&pTableMeta->sversion, &stbMeta->sversion, metaSize - sizeof(SCTableMeta));
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
taosHashRelease(dbCache->tbCache, pCache);
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
lastSuid = pTableMeta->suid;
lastTableMeta = pTableMeta;
}
ctgReleaseDBCache(pCtg, dbCache);
return TSDB_CODE_SUCCESS;
}
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq) { int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq) {
int32_t code = 0; int32_t code = 0;
......
此差异已折叠。
...@@ -26,6 +26,7 @@ void ctgFreeMsgSendParam(void* param) { ...@@ -26,6 +26,7 @@ void ctgFreeMsgSendParam(void* param) {
SCtgTaskCallbackParam* pParam = (SCtgTaskCallbackParam*)param; SCtgTaskCallbackParam* pParam = (SCtgTaskCallbackParam*)param;
taosArrayDestroy(pParam->taskId); taosArrayDestroy(pParam->taskId);
taosArrayDestroy(pParam->msgIdx);
taosMemoryFree(param); taosMemoryFree(param);
} }
...@@ -88,6 +89,10 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) { ...@@ -88,6 +89,10 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
return "[get user]"; return "[get user]";
case CTG_TASK_GET_SVR_VER: case CTG_TASK_GET_SVR_VER:
return "[get svr ver]"; return "[get svr ver]";
case CTG_TASK_GET_TB_META_BATCH:
return "[bget table meta]";
case CTG_TASK_GET_TB_HASH_BATCH:
return "[bget table hash]";
default: default:
return "unknown"; return "unknown";
} }
...@@ -460,6 +465,25 @@ void ctgResetTbMetaTask(SCtgTask* pTask) { ...@@ -460,6 +465,25 @@ void ctgResetTbMetaTask(SCtgTask* pTask) {
taosMemoryFreeClear(pTask->res); taosMemoryFreeClear(pTask->res);
} }
void ctgFreeBatchMeta(void* meta) {
if (NULL == meta) {
return;
}
SMetaRes* pRes = (SMetaRes*)meta;
taosMemoryFreeClear(pRes->pRes);
}
void ctgFreeBatchHash(void* hash) {
if (NULL == hash) {
return;
}
SMetaRes* pRes = (SMetaRes*)hash;
taosMemoryFreeClear(pRes->pRes);
}
void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) { void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
switch (type) { switch (type) {
case CTG_TASK_GET_QNODE: case CTG_TASK_GET_QNODE:
...@@ -500,6 +524,24 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) { ...@@ -500,6 +524,24 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
taosMemoryFreeClear(*pRes); taosMemoryFreeClear(*pRes);
break; break;
} }
case CTG_TASK_GET_TB_META_BATCH: {
SArray* pArray = (SArray*)*pRes;
int32_t num = taosArrayGetSize(pArray);
for (int32_t i = 0; i < num; ++i) {
ctgFreeBatchMeta(taosArrayGet(pArray, i));
}
*pRes = NULL; // no need to free it
break;
}
case CTG_TASK_GET_TB_HASH_BATCH: {
SArray* pArray = (SArray*)*pRes;
int32_t num = taosArrayGetSize(pArray);
for (int32_t i = 0; i < num; ++i) {
ctgFreeBatchHash(taosArrayGet(pArray, i));
}
*pRes = NULL; // no need to free it
break;
}
default: default:
qError("invalid task type %d", type); qError("invalid task type %d", type);
break; break;
...@@ -554,6 +596,16 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) { ...@@ -554,6 +596,16 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) {
taosMemoryFreeClear(*pRes); taosMemoryFreeClear(*pRes);
break; break;
} }
case CTG_TASK_GET_TB_META_BATCH: {
taosArrayDestroyEx(*pRes, ctgFreeBatchMeta);
*pRes = NULL;
break;
}
case CTG_TASK_GET_TB_HASH_BATCH: {
taosArrayDestroyEx(*pRes, ctgFreeBatchHash);
*pRes = NULL;
break;
}
default: default:
qError("invalid task type %d", type); qError("invalid task type %d", type);
break; break;
...@@ -583,12 +635,38 @@ void ctgFreeTaskCtx(SCtgTask* pTask) { ...@@ -583,12 +635,38 @@ void ctgFreeTaskCtx(SCtgTask* pTask) {
taosMemoryFreeClear(pTask->taskCtx); taosMemoryFreeClear(pTask->taskCtx);
break; break;
} }
case CTG_TASK_GET_TB_META_BATCH: {
SCtgTbMetasCtx* taskCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
taosArrayDestroyEx(taskCtx->pResList, ctgFreeBatchMeta);
taosArrayDestroy(taskCtx->pFetchs);
// NO NEED TO FREE pNames
taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeMsgCtx);
if (pTask->msgCtx.lastOut) {
ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut);
pTask->msgCtx.lastOut = NULL;
}
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_TB_HASH: { case CTG_TASK_GET_TB_HASH: {
SCtgTbHashCtx* taskCtx = (SCtgTbHashCtx*)pTask->taskCtx; SCtgTbHashCtx* taskCtx = (SCtgTbHashCtx*)pTask->taskCtx;
taosMemoryFreeClear(taskCtx->pName); taosMemoryFreeClear(taskCtx->pName);
taosMemoryFreeClear(pTask->taskCtx); taosMemoryFreeClear(pTask->taskCtx);
break; break;
} }
case CTG_TASK_GET_TB_HASH_BATCH: {
SCtgTbHashsCtx* taskCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
taosArrayDestroyEx(taskCtx->pResList, ctgFreeBatchHash);
taosArrayDestroy(taskCtx->pFetchs);
// NO NEED TO FREE pNames
taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeMsgCtx);
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_TB_INDEX: { case CTG_TASK_GET_TB_INDEX: {
SCtgTbIndexCtx* taskCtx = (SCtgTbIndexCtx*)pTask->taskCtx; SCtgTbIndexCtx* taskCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
taosMemoryFreeClear(taskCtx->pName); taosMemoryFreeClear(taskCtx->pName);
...@@ -679,6 +757,23 @@ int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* targ ...@@ -679,6 +757,23 @@ int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* targ
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t ctgAddMsgCtx(SArray* pCtxs, int32_t reqType, void* out, char* target) {
SCtgMsgCtx ctx = {0};
ctx.reqType = reqType;
ctx.out = out;
if (target) {
ctx.target = strdup(target);
if (NULL == ctx.target) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
taosArrayPush(pCtxs, &ctx);
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetHashFunction(int8_t hashMethod, tableNameHashFp *fp) { int32_t ctgGetHashFunction(int8_t hashMethod, tableNameHashFp *fp) {
switch (hashMethod) { switch (hashMethod) {
...@@ -780,6 +875,104 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName ...@@ -780,6 +875,104 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
CTG_RET(code); CTG_RET(code);
} }
int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SMetaRes res = {0};
int32_t vgNum = taosHashGetSize(dbInfo->vgHash);
if (vgNum <= 0) {
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum);
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
}
tableNameHashFp fp = NULL;
SVgroupInfo *vgInfo = NULL;
CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp));
int32_t tbNum = taosArrayGetSize(pNames);
if (1 == vgNum) {
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
for (int32_t i = 0; i < tbNum; ++i) {
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
if (NULL == vgInfo) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
*vgInfo = *(SVgroupInfo*)pIter;
ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
if (update) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx);
SMetaRes *pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i);
pRes->pRes = vgInfo;
} else {
res.pRes = vgInfo;
taosArrayPush(pCtx->pResList, &res);
}
}
return TSDB_CODE_SUCCESS;
}
char tbFullName[TSDB_TABLE_FNAME_LEN];
sprintf(tbFullName, "%s.", dbFName);
int32_t offset = strlen(tbFullName);
SName* pName = NULL;
int32_t tbNameLen = 0;
for (int32_t i = 0; i < tbNum; ++i) {
pName = taosArrayGet(pNames, i);
tbNameLen = offset + strlen(pName->tname);
strcpy(tbFullName + offset, pName->tname);
uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
vgInfo = pIter;
if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) {
taosHashCancelIterate(dbInfo->vgHash, pIter);
break;
}
pIter = taosHashIterate(dbInfo->vgHash, pIter);
vgInfo = NULL;
}
if (NULL == vgInfo) {
ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, dbFName, taosHashGetSize(dbInfo->vgHash));
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
SVgroupInfo* pNewVg = taosMemoryMalloc(sizeof(SVgroupInfo));
if (NULL == pNewVg) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
*pNewVg = *vgInfo;
ctgDebug("Got tb %s hash vgroup, vgId:%d, epNum %d, current %s port %d", tbFullName, vgInfo->vgId, vgInfo->epSet.numOfEps,
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
if (update) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx);
SMetaRes *pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i);
pRes->pRes = pNewVg;
} else {
res.pRes = pNewVg;
taosArrayPush(pCtx->pResList, &res);
}
}
CTG_RET(code);
}
int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2) { int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2) {
if (*(uint64_t *)key1 < ((SSTableVersion*)key2)->suid) { if (*(uint64_t *)key1 < ((SSTableVersion*)key2)->suid) {
return -1; return -1;
...@@ -921,4 +1114,41 @@ int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, cha ...@@ -921,4 +1114,41 @@ int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, cha
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t ctgGetTablesReqNum(SArray *pList) {
if (NULL == pList) {
return 0;
}
int32_t total = 0;
int32_t n = taosArrayGetSize(pList);
for (int32_t i = 0; i < n; ++i) {
STablesReq *pReq = taosArrayGet(pList, i);
total += taosArrayGetSize(pReq->pTables);
}
return total;
}
int32_t ctgAddFetch(SArray** pFetchs, int32_t dbIdx, int32_t tbIdx, int32_t *fetchIdx, int32_t resIdx, int32_t flag) {
if (NULL == (*pFetchs)) {
*pFetchs = taosArrayInit(CTG_DEFAULT_FETCH_NUM, sizeof(SCtgFetch));
}
SCtgFetch fetch = {0};
fetch.dbIdx = dbIdx;
fetch.tbIdx = tbIdx;
fetch.fetchIdx = (*fetchIdx)++;
fetch.resIdx = resIdx;
fetch.flag = flag;
taosArrayPush(*pFetchs, &fetch);
return TSDB_CODE_SUCCESS;
}
SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch) {
STablesReq* pReq = (STablesReq*)taosArrayGet(pNames, pFetch->dbIdx);
return (SName*)taosArrayGet(pReq->pTables, pFetch->tbIdx);
}
...@@ -755,6 +755,9 @@ typedef struct STimeSliceOperatorInfo { ...@@ -755,6 +755,9 @@ typedef struct STimeSliceOperatorInfo {
SInterval interval; SInterval interval;
int64_t current; int64_t current;
SArray* pPrevRow; // SArray<SGroupValue> SArray* pPrevRow; // SArray<SGroupValue>
SArray* pNextRow; // SArray<SGroupValue>
bool isPrevRowSet;
bool isNextRowSet;
int32_t fillType; // fill type int32_t fillType; // fill type
SColumn tsCol; // primary timestamp column SColumn tsCol; // primary timestamp column
SExprSupp scalarSup; // scalar calculation SExprSupp scalarSup; // scalar calculation
...@@ -1007,7 +1010,7 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length); ...@@ -1007,7 +1010,7 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
const char* sql, EOPTR_EXEC_MODEL model); char* sql, EOPTR_EXEC_MODEL model);
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle); int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
......
...@@ -191,6 +191,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) { ...@@ -191,6 +191,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
pBlock->info.blockId = pNode->dataBlockId; pBlock->info.blockId = pNode->dataBlockId;
pBlock->info.type = STREAM_INVALID; pBlock->info.type = STREAM_INVALID;
pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
pBlock->info.watermark = INT64_MIN;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i); SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
......
...@@ -357,7 +357,7 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table ...@@ -357,7 +357,7 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
} }
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan, int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, const char* sql, EOPTR_EXEC_MODEL model) { qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, char* sql, EOPTR_EXEC_MODEL model) {
assert(pSubplan != NULL); assert(pSubplan != NULL);
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo; SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
......
...@@ -1437,7 +1437,8 @@ static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, u ...@@ -1437,7 +1437,8 @@ static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, u
pAggInfo->groupId = groupId; pAggInfo->groupId = groupId;
} }
static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_t* rowCellOffset) { static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowCellOffset) {
bool returnNotNull = false;
for (int32_t j = 0; j < numOfExprs; ++j) { for (int32_t j = 0; j < numOfExprs; ++j) {
struct SResultRowEntryInfo* pResInfo = getResultEntryInfo(pRow, j, rowCellOffset); struct SResultRowEntryInfo* pResInfo = getResultEntryInfo(pRow, j, rowCellOffset);
if (!isRowEntryInitialized(pResInfo)) { if (!isRowEntryInitialized(pResInfo)) {
...@@ -1447,6 +1448,15 @@ static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_ ...@@ -1447,6 +1448,15 @@ static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_
if (pRow->numOfRows < pResInfo->numOfRes) { if (pRow->numOfRows < pResInfo->numOfRes) {
pRow->numOfRows = pResInfo->numOfRes; pRow->numOfRows = pResInfo->numOfRes;
} }
if (fmIsNotNullOutputFunc(pCtx[j].functionId)) {
returnNotNull = true;
}
}
// if all expr skips all blocks, e.g. all null inputs for max function, output one row in final result.
// except for first/last, which require not null output, output no rows
if (pRow->numOfRows == 0 && !returnNotNull) {
pRow->numOfRows = 1;
} }
} }
...@@ -1458,7 +1468,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi ...@@ -1458,7 +1468,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId);
SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset); SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset);
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset); doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
if (pRow->numOfRows == 0) { if (pRow->numOfRows == 0) {
releaseBufPage(pBuf, page); releaseBufPage(pBuf, page);
return 0; return 0;
...@@ -1514,7 +1524,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI ...@@ -1514,7 +1524,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset); SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset); doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
if (pRow->numOfRows == 0) { if (pRow->numOfRows == 0) {
pGroupResInfo->index += 1; pGroupResInfo->index += 1;
releaseBufPage(pBuf, page); releaseBufPage(pBuf, page);
...@@ -4181,7 +4191,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo ...@@ -4181,7 +4191,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
int32_t children = 0; int32_t children = 0;
pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION == type) { } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION == type) {
int32_t children = 1; int32_t children = pHandle->numOfVgroups;
pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) { } else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo); pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
...@@ -4493,7 +4503,7 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pT ...@@ -4493,7 +4503,7 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pT
} }
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
const char* sql, EOPTR_EXEC_MODEL model) { char* sql, EOPTR_EXEC_MODEL model) {
uint64_t queryId = pPlan->id.queryId; uint64_t queryId = pPlan->id.queryId;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
...@@ -4504,6 +4514,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead ...@@ -4504,6 +4514,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
} }
(*pTaskInfo)->sql = sql; (*pTaskInfo)->sql = sql;
sql = NULL;
(*pTaskInfo)->pSubplan = pPlan; (*pTaskInfo)->pSubplan = pPlan;
(*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, &(*pTaskInfo)->tableqinfoList, (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, &(*pTaskInfo)->tableqinfoList,
pPlan->pTagCond, pPlan->pTagIndexCond, pPlan->user); pPlan->pTagCond, pPlan->pTagIndexCond, pPlan->user);
...@@ -4516,6 +4527,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead ...@@ -4516,6 +4527,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
return code; return code;
_complete: _complete:
taosMemoryFree(sql);
doDestroyTask(*pTaskInfo); doDestroyTask(*pTaskInfo);
terrno = code; terrno = code;
return code; return code;
......
...@@ -64,6 +64,9 @@ typedef struct SMinmaxResInfo { ...@@ -64,6 +64,9 @@ typedef struct SMinmaxResInfo {
bool assign; // assign the first value or not bool assign; // assign the first value or not
int64_t v; int64_t v;
STuplePos tuplePos; STuplePos tuplePos;
STuplePos nullTuplePos;
bool nullTupleSaved;
} SMinmaxResInfo; } SMinmaxResInfo;
typedef struct STopBotResItem { typedef struct STopBotResItem {
...@@ -75,6 +78,10 @@ typedef struct STopBotResItem { ...@@ -75,6 +78,10 @@ typedef struct STopBotResItem {
typedef struct STopBotRes { typedef struct STopBotRes {
int32_t maxSize; int32_t maxSize;
int16_t type; int16_t type;
STuplePos nullTuplePos;
bool nullTupleSaved;
STopBotResItem* pItems; STopBotResItem* pItems;
} STopBotRes; } STopBotRes;
...@@ -221,6 +228,10 @@ typedef struct SSampleInfo { ...@@ -221,6 +228,10 @@ typedef struct SSampleInfo {
int32_t numSampled; int32_t numSampled;
uint8_t colType; uint8_t colType;
int16_t colBytes; int16_t colBytes;
STuplePos nullTuplePos;
bool nullTupleSaved;
char* data; char* data;
STuplePos* tuplePos; STuplePos* tuplePos;
} SSampleInfo; } SSampleInfo;
...@@ -1134,6 +1145,9 @@ bool minmaxFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) ...@@ -1134,6 +1145,9 @@ bool minmaxFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo)
SMinmaxResInfo* buf = GET_ROWCELL_INTERBUF(pResultInfo); SMinmaxResInfo* buf = GET_ROWCELL_INTERBUF(pResultInfo);
buf->assign = false; buf->assign = false;
buf->tuplePos.pageId = -1; buf->tuplePos.pageId = -1;
buf->nullTupleSaved = false;
buf->nullTuplePos.pageId = -1;
return true; return true;
} }
...@@ -1575,6 +1589,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { ...@@ -1575,6 +1589,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
} }
_min_max_over: _min_max_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
pBuf->nullTupleSaved = true;
}
return numOfElems; return numOfElems;
} }
...@@ -1615,7 +1633,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { ...@@ -1615,7 +1633,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
if (pEntryInfo->numOfRes > 0) { if (pEntryInfo->numOfRes > 0) {
setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow);
} else { } else {
setNullSelectivityValue(pCtx, pBlock, currentRow); setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
} }
return pEntryInfo->numOfRes; return pEntryInfo->numOfRes;
...@@ -3366,6 +3384,8 @@ bool topBotFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { ...@@ -3366,6 +3384,8 @@ bool topBotFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
pRes->maxSize = pCtx->param[1].param.i; pRes->maxSize = pCtx->param[1].param.i;
pRes->nullTupleSaved = false;
pRes->nullTuplePos.pageId = -1;
return true; return true;
} }
...@@ -3403,6 +3423,10 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { ...@@ -3403,6 +3423,10 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, true); doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, true);
} }
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -3427,6 +3451,11 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { ...@@ -3427,6 +3451,11 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, false); doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, false);
} }
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -3625,6 +3654,11 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { ...@@ -3625,6 +3654,11 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
// todo assign the tag value and the corresponding row data // todo assign the tag value and the corresponding row data
int32_t currentRow = pBlock->info.rows; int32_t currentRow = pBlock->info.rows;
if (pEntryInfo->numOfRes <= 0) {
colDataAppendNULL(pCol, currentRow);
setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
return pEntryInfo->numOfRes;
}
for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) { for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) {
STopBotResItem* pItem = &pRes->pItems[i]; STopBotResItem* pItem = &pRes->pItems[i];
if (type == TSDB_DATA_TYPE_FLOAT) { if (type == TSDB_DATA_TYPE_FLOAT) {
...@@ -4897,7 +4931,8 @@ bool sampleFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) ...@@ -4897,7 +4931,8 @@ bool sampleFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo)
pInfo->numSampled = 0; pInfo->numSampled = 0;
pInfo->colType = pCtx->resDataInfo.type; pInfo->colType = pCtx->resDataInfo.type;
pInfo->colBytes = pCtx->resDataInfo.bytes; pInfo->colBytes = pCtx->resDataInfo.bytes;
pInfo->nullTuplePos.pageId = -1;
pInfo->nullTupleSaved = false;
pInfo->data = (char*)pInfo + sizeof(SSampleInfo); pInfo->data = (char*)pInfo + sizeof(SSampleInfo);
pInfo->tuplePos = (STuplePos*)((char*)pInfo + sizeof(SSampleInfo) + pInfo->samples * pInfo->colBytes); pInfo->tuplePos = (STuplePos*)((char*)pInfo + sizeof(SSampleInfo) + pInfo->samples * pInfo->colBytes);
...@@ -4943,6 +4978,11 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { ...@@ -4943,6 +4978,11 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
doReservoirSample(pCtx, pInfo, data, i); doReservoirSample(pCtx, pInfo, data, i);
} }
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
pInfo->nullTupleSaved = true;
}
SET_VAL(pResInfo, pInfo->numSampled, pInfo->numSampled); SET_VAL(pResInfo, pInfo->numSampled, pInfo->numSampled);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -4957,6 +4997,11 @@ int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { ...@@ -4957,6 +4997,11 @@ int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
int32_t currentRow = pBlock->info.rows; int32_t currentRow = pBlock->info.rows;
if (pInfo->numSampled == 0) {
colDataAppendNULL(pCol, currentRow);
setSelectivityValue(pCtx, pBlock, &pInfo->nullTuplePos, currentRow);
return pInfo->numSampled;
}
for (int32_t i = 0; i < pInfo->numSampled; ++i) { for (int32_t i = 0; i < pInfo->numSampled; ++i) {
colDataAppend(pCol, currentRow + i, pInfo->data + i * pInfo->colBytes, false); colDataAppend(pCol, currentRow + i, pInfo->data + i * pInfo->colBytes, false);
setSelectivityValue(pCtx, pBlock, &pInfo->tuplePos[i], currentRow + i); setSelectivityValue(pCtx, pBlock, &pInfo->tuplePos[i], currentRow + i);
......
...@@ -221,6 +221,18 @@ bool fmIsLastRowFunc(int32_t funcId) { ...@@ -221,6 +221,18 @@ bool fmIsLastRowFunc(int32_t funcId) {
return FUNCTION_TYPE_LAST_ROW == funcMgtBuiltins[funcId].type; return FUNCTION_TYPE_LAST_ROW == funcMgtBuiltins[funcId].type;
} }
bool fmIsNotNullOutputFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;
}
return FUNCTION_TYPE_LAST == funcMgtBuiltins[funcId].type ||
FUNCTION_TYPE_LAST_PARTIAL == funcMgtBuiltins[funcId].type ||
FUNCTION_TYPE_LAST_MERGE == funcMgtBuiltins[funcId].type ||
FUNCTION_TYPE_FIRST == funcMgtBuiltins[funcId].type ||
FUNCTION_TYPE_FIRST_PARTIAL == funcMgtBuiltins[funcId].type ||
FUNCTION_TYPE_FIRST_MERGE == funcMgtBuiltins[funcId].type;
}
bool fmIsSelectValueFunc(int32_t funcId) { bool fmIsSelectValueFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) { if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false; return false;
......
...@@ -39,6 +39,11 @@ typedef struct SMsgBuf { ...@@ -39,6 +39,11 @@ typedef struct SMsgBuf {
char* buf; char* buf;
} SMsgBuf; } SMsgBuf;
typedef struct SParseTablesMetaReq {
char dbFName[TSDB_DB_FNAME_LEN];
SHashObj* pTables;
} SParseTablesMetaReq;
typedef struct SParseMetaCache { typedef struct SParseMetaCache {
SHashObj* pTableMeta; // key is tbFName, element is STableMeta* SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
SHashObj* pDbVgroup; // key is dbFName, element is SArray<SVgroupInfo>* SHashObj* pDbVgroup; // key is dbFName, element is SArray<SVgroupInfo>*
...@@ -95,7 +100,7 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun ...@@ -95,7 +100,7 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun
int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes); int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes);
int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput); int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput);
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes); int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
void destoryParseMetaCache(SParseMetaCache* pMetaCache); void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
此差异已折叠。
...@@ -85,13 +85,13 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { ...@@ -85,13 +85,13 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) { if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
taosMemoryFreeClear(pVal->datum.p); taosMemoryFreeClear(pVal->datum.p);
} }
if (pParam->is_null && 1 == *(pParam->is_null)) { if (pParam->is_null && 1 == *(pParam->is_null)) {
pVal->node.resType.type = TSDB_DATA_TYPE_NULL; pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes; pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes); int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes);
pVal->node.resType.type = pParam->buffer_type; pVal->node.resType.type = pParam->buffer_type;
pVal->node.resType.bytes = inputSize; pVal->node.resType.bytes = inputSize;
...@@ -187,7 +187,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq ...@@ -187,7 +187,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq(&metaCache, pCatalogReq); code = buildCatalogReq(&metaCache, pCatalogReq);
} }
destoryParseMetaCache(&metaCache); destoryParseMetaCache(&metaCache, true);
terrno = code; terrno = code;
return code; return code;
} }
...@@ -203,7 +203,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata ...@@ -203,7 +203,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
code = analyseSemantic(pCxt, pQuery, &metaCache); code = analyseSemantic(pCxt, pQuery, &metaCache);
} }
} }
destoryParseMetaCache(&metaCache); destoryParseMetaCache(&metaCache, false);
terrno = code; terrno = code;
return code; return code;
} }
......
...@@ -50,6 +50,7 @@ struct MockTableMeta { ...@@ -50,6 +50,7 @@ struct MockTableMeta {
class MockCatalogServiceImpl; class MockCatalogServiceImpl;
class MockCatalogService { class MockCatalogService {
public: public:
static void destoryTablesReq(void* p);
static void destoryCatalogReq(SCatalogReq* pReq); static void destoryCatalogReq(SCatalogReq* pReq);
static void destoryMetaRes(void* p); static void destoryMetaRes(void* p);
static void destoryMetaArrayRes(void* p); static void destoryMetaArrayRes(void* p);
......
...@@ -85,7 +85,7 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown ...@@ -85,7 +85,7 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown
} }
int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) { int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) {
planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.groupId, groupId); planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId);
return setSubplanExecutionNode(subplan->pNode, groupId, pSource); return setSubplanExecutionNode(subplan->pNode, groupId, pSource);
} }
...@@ -104,7 +104,10 @@ static void clearSubplanExecutionNode(SPhysiNode* pNode) { ...@@ -104,7 +104,10 @@ static void clearSubplanExecutionNode(SPhysiNode* pNode) {
FOREACH(pChild, pNode->pChildren) { clearSubplanExecutionNode((SPhysiNode*)pChild); } FOREACH(pChild, pNode->pChildren) { clearSubplanExecutionNode((SPhysiNode*)pChild); }
} }
void qClearSubplanExecutionNode(SSubplan* pSubplan) { clearSubplanExecutionNode(pSubplan->pNode); } void qClearSubplanExecutionNode(SSubplan* pSubplan) {
planDebug("QID:0x%" PRIx64 " clear subplan execution node, groupId:%d", pSubplan->id.queryId, pSubplan->id.groupId);
clearSubplanExecutionNode(pSubplan->pNode);
}
int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) { int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) {
if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) { if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册