diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index b1e675cdf6aa5f3ffe891467e285467bb72966cf..50e8b357719fc6d1f4707e474afdf58fb4531970 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -68,6 +68,7 @@ public class SubscribeDemo {
System.out.println(meter);
}
}
+ consumer.unsubscribe();
}
} catch (ClassNotFoundException | SQLException e) {
e.printStackTrace();
diff --git a/docs/examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js
index c4f7e6df84933f8f8541814cabd231fcf5c2db68..5b65e1c90758f208b4fc32359e97f3fd83a3a380 100644
--- a/docs/examples/node/nativeexample/subscribe_demo.js
+++ b/docs/examples/node/nativeexample/subscribe_demo.js
@@ -28,7 +28,8 @@ function runConsumer() {
console.log(msg.topicPartition);
console.log(msg.block);
console.log(msg.fields)
- consumer.commit(msg);
+ // fixme(@xiaolei): commented temp, should be fixed.
+ //consumer.commit(msg);
console.log(`=======consumer ${i} done`)
}
@@ -48,4 +49,4 @@ try {
cursor.close();
conn.close();
}, 2000);
-}
\ No newline at end of file
+}
diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md
deleted file mode 100644
index 25d468cad3658190f6b9409637543061ac22f958..0000000000000000000000000000000000000000
--- a/docs/zh/07-develop/07-tmq.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-sidebar_label: 数据订阅
-description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
-title: 数据订阅
----
-
-import Tabs from "@theme/Tabs";
-import TabItem from "@theme/TabItem";
-import Java from "./_sub_java.mdx";
-import Python from "./_sub_python.mdx";
-import Go from "./_sub_go.mdx";
-import Rust from "./_sub_rust.mdx";
-import Node from "./_sub_node.mdx";
-import CSharp from "./_sub_cs.mdx";
-import CDemo from "./_sub_c.mdx";
-
-
-为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
-
-与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 SELECT 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
-
-消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保 at least once 消费。
-
-为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
-
-本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
-
-## 主要数据结构和API
-
-TMQ 的 API 中,与订阅相关的主要数据结构和API如下:
-
-```c
-typedef struct tmq_t tmq_t;
-typedef struct tmq_conf_t tmq_conf_t;
-typedef struct tmq_list_t tmq_list_t;
-
-typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
-
-DLL_EXPORT tmq_list_t *tmq_list_new();
-DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
-DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
-DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
-DLL_EXPORT const char *tmq_err2str(int32_t code);
-
-DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
-DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
-DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
-DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
-DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
-DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
-
-enum tmq_conf_res_t {
- TMQ_CONF_UNKNOWN = -2,
- TMQ_CONF_INVALID = -1,
- TMQ_CONF_OK = 0,
-};
-typedef enum tmq_conf_res_t tmq_conf_res_t;
-
-DLL_EXPORT tmq_conf_t *tmq_conf_new();
-DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
-DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
-DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
-```
-
-这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面C语言的示例代码。
-
-## 写入数据
-
-首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
-
-```sql
-drop database if exists tmqdb;
-create database tmqdb;
-create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16) tags(t1 int, t3 varchar(16));
-create table tmqdb.ctb0 using tmqdb.stb tags(0, "subtable0");
-create table tmqdb.ctb1 using tmqdb.stb tags(1, "subtable1");
-insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
-insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
-```
-
-## 创建topic:
-
-```sql
-create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
-```
-
-TMQ支持多种订阅类型:
-
-### 列订阅
-
-语法:CREATE TOPIC topic_name as subquery
-通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)
-
-- TOPIC一旦创建则schema确定
-- 被订阅或用于计算的column和tag不可被删除、修改
-- 若发生schema变更,新增的column不出现在结果中
-
-### 超级表订阅
-语法:CREATE TOPIC topic_name AS STABLE stbName
-
-与select * from stbName订阅的区别是:
-- 不会限制用户的schema变更
-- 返回的是非结构化的数据:返回数据的schema会随之超级表的schema变化而变化
-- 用户对于要处理的每一个数据块都可能有不同的schema,因此,必须重新获取schema
-- 返回数据不带有tag
-
-## 创建 consumer 以及consumer group
-
-对于consumer, 目前支持的config包括:
-
-| 参数名称 | 参数值 | 备注 |
-| ---------------------------- | ------------------------------ | ------------------------------------------------------ |
-| group.id | 最大长度:192 | |
-| enable.auto.commit | 合法值:true, false | |
-| auto.commit.interval.ms | | |
-| auto.offset.reset | 合法值:earliest, latest, none | |
-| td.connect.ip | 用于连接,同taos_connect的参数 | |
-| td.connect.user | 用于连接,同taos_connect的参数 | |
-| td.connect.pass | 用于连接,同taos_connect的参数 | |
-| td.connect.port | 用于连接,同taos_connect的参数 | |
-| enable.heartbeat.background | 合法值:true, false | 开启后台心跳,即consumer不会因为长时间不poll而认为离线 |
-| experimental.snapshot.enable | 合法值:true, false | 从wal开始消费,还是从tsbs开始消费 |
-| msg.with.table.name | 合法值:true, false | 从消息中能否解析表名 |
-
-```sql
-/* 根据需要,设置消费组(group.id)、自动提交(enable.auto.commit)、自动提交时间间隔(auto.commit.interval.ms)、用户名(td.connect.user)、密码(td.connect.pass)等参数 */
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
- tmq_conf_set(conf, "group.id", "cgrpName");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "auto.offset.reset", "earliest");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
-
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- tmq_conf_destroy(conf);
-```
-
-上述配置中包括consumer group ID,如果多个 consumer 指定的 consumer group ID一样,则自动形成一个consumer group,共享消费进度。
-
-
-## 创建 topic 列表
-
-单个consumer支持同时订阅多个topic。
-
-```sql
- tmq_list_t* topicList = tmq_list_new();
- tmq_list_append(topicList, "topicName");
-```
-
-## 启动订阅并开始消费
-
-```
- /* 启动订阅 */
- tmq_subscribe(tmq, topicList);
- tmq_list_destroy(topicList);
-
- /* 循环poll消息 */
- while (running) {
- TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeOut);
- msg_process(tmqmsg);
- }
-```
-
-这里是一个 **while** 循环,每调用一次tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析API完成消息内容的解析。
-
-## 结束消费
-
-```sql
- /* 取消订阅 */
- tmq_unsubscribe(tmq);
-
- /* 关闭消费 */
- tmq_consumer_close(tmq);
-```
-
-## 删除topic
-
-如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。
-
-```sql
- /* 删除topic */
- drop topic topicName;
-```
-
-## 状态查看
-
-1、topics:查询已经创建的topic
-
-```sql
- show topics;
-```
-
-2、consumers:查询consumer的状态及其订阅的topic
-
-```sql
- show consumers;
-```
-
-3、subscriptions:查询consumer与vgroup之间的分配关系
-
-```sql
- show subscriptions;
-```
-
-## 示例代码
-
-本节展示各种语言的示例代码。
-
-
-
-
-```c
-{{#include examples/c/tmq.c}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-```python
-{{#include docs/examples/python/tmq_example.py}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..f36f76fd8565966dae8e20a31cc2d6903b5d26db
--- /dev/null
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -0,0 +1,747 @@
+---
+sidebar_label: 数据订阅
+description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
+title: 数据订阅
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import Java from "./_sub_java.mdx";
+import Python from "./_sub_python.mdx";
+import Go from "./_sub_go.mdx";
+import Rust from "./_sub_rust.mdx";
+import Node from "./_sub_node.mdx";
+import CSharp from "./_sub_cs.mdx";
+import CDemo from "./_sub_c.mdx";
+
+为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine 提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
+
+与 kafka 一样,你需要定义 *topic*, 但 TDengine 的 *topic* 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 `SELECT` 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
+
+消费者订阅 *topic* 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的 ACK 机制,在宕机、重启等复杂环境下确保 at least once 消费。
+
+为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
+
+本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
+
+## 主要数据结构和 API
+
+不同语言下, TMQ 订阅相关的 API 及数据结构如下:
+
+
+
+
+```c
+typedef struct tmq_t tmq_t;
+typedef struct tmq_conf_t tmq_conf_t;
+typedef struct tmq_list_t tmq_list_t;
+
+typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
+
+DLL_EXPORT tmq_list_t *tmq_list_new();
+DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
+DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
+DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
+DLL_EXPORT const char *tmq_err2str(int32_t code);
+
+DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
+DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
+DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
+DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
+DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
+DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
+
+enum tmq_conf_res_t {
+ TMQ_CONF_UNKNOWN = -2,
+ TMQ_CONF_INVALID = -1,
+ TMQ_CONF_OK = 0,
+};
+typedef enum tmq_conf_res_t tmq_conf_res_t;
+
+DLL_EXPORT tmq_conf_t *tmq_conf_new();
+DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
+DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
+DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
+```
+
+这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
+
+
+
+
+```java
+void subscribe(Collection topics) throws SQLException;
+
+void unsubscribe() throws SQLException;
+
+Set subscription() throws SQLException;
+
+ConsumerRecords poll(Duration timeout) throws SQLException;
+
+void commitAsync();
+
+void commitAsync(OffsetCommitCallback callback);
+
+void commitSync() throws SQLException;
+
+void close() throws SQLException;
+```
+
+
+
+
+## 写入数据
+
+首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
+
+```sql
+DROP DATABASE IF EXISTS tmqdb;
+CREATE DATABASE tmqdb;
+CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
+CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
+CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
+INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
+INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
+```
+
+## 创建 *topic*
+
+TDengine 使用 SQL 创建一个 topic:
+
+```sql
+CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
+```
+
+TMQ 支持多种订阅类型:
+
+### 列订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name as subquery
+```
+
+通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定列订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是:
+
+- 该类型 TOPIC 一旦创建则订阅数据的结构确定。
+- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。
+- 若发生表结构变更,新增的列不出现在结果中,若发生列删除则会报错。
+
+### 超级表订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name AS STABLE stb_name
+```
+
+与 `SELECT * from stbName` 订阅的区别是:
+
+- 不会限制用户的表结构变更。
+- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
+- 用户对于要处理的每一个数据块都可能有不同的表结构。
+- 返回数据不包含标签。
+
+### 数据库订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
+```
+
+通过该语句可创建一个包含数据库所有表数据的订阅,`WITH META` 可选择将数据库结构变动信息加入到订阅消息流,TMQ 将消费当前数据库下所有表结构的变动,包括超级表的创建与删除,列添加、删除或修改,子表的创建、删除及 TAG 变动信息等等。消费者可通过 API 来判断具体的消息类型。这一点也是与 Kafka 不同的地方。
+
+## 创建消费者 *consumer*
+
+消费者需要通过一系列配置选项创建,基础配置项如下表所示:
+
+| 参数名称 | 类型 | 参数说明 | 备注 |
+| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
+| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` |
+| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` |
+| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
+| `client.id` | string | 客户端 ID | 最大长度:192。 |
+| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) |
+| `enable.auto.commit` | boolean | 启用自动提交 | 合法值:`true`, `false`。 |
+| `auto.commit.interval.ms` | integer | 以毫秒为单位的自动提交时间间隔 |
+| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | |
+| `experimental.snapshot.enable` | boolean | 从 WAL 开始消费,还是从 TSBS 开始消费 | |
+| `msg.with.table.name` | boolean | 是否允许从消息中解析表名 |
+
+对于不同编程语言,其设置方式如下:
+
+
+
+
+```c
+/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
+ 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */
+tmq_conf_t* conf = tmq_conf_new();
+tmq_conf_set(conf, "enable.auto.commit", "true");
+tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+tmq_conf_set(conf, "group.id", "cgrpName");
+tmq_conf_set(conf, "td.connect.user", "root");
+tmq_conf_set(conf, "td.connect.pass", "taosdata");
+tmq_conf_set(conf, "auto.offset.reset", "earliest");
+tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+tmq_conf_set(conf, "msg.with.table.name", "true");
+tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+tmq_conf_destroy(conf);
+```
+
+
+
+
+对于 Java 程序,使用如下配置项:
+
+| 参数名称 | 类型 | 参数说明 |
+| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
+| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
+| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
+| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
+
+需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。
+
+```java
+Properties properties = new Properties();
+properties.setProperty("enable.auto.commit", "true");
+properties.setProperty("auto.commit.interval.ms", "1000");
+properties.setProperty("group.id", "cgrpName");
+properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
+properties.setProperty("td.connect.user", "root");
+properties.setProperty("td.connect.pass", "taosdata");
+properties.setProperty("auto.offset.reset", "earliest");
+properties.setProperty("msg.with.table.name", "true");
+properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
+
+TaosConsumer consumer = new TaosConsumer<>(properties);
+
+/* value deserializer definition. */
+import com.taosdata.jdbc.tmq.ReferenceDeserializer;
+
+public class MetersDeserializer extends ReferenceDeserializer {
+}
+```
+
+
+
+
+上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
+
+## 订阅 *topics*
+
+一个 consumer 支持同时订阅多个 topic。
+
+
+
+
+```c
+// 创建订阅 topics 列表
+tmq_list_t* topicList = tmq_list_new();
+tmq_list_append(topicList, "topicName");
+// 启动订阅
+tmq_subscribe(tmq, topicList);
+tmq_list_destroy(topicList);
+
+```
+
+
+
+
+```java
+List topics = new ArrayList<>();
+topics.add("tmq_topic");
+consumer.subscribe(topics);
+```
+
+
+
+
+
+## 消费
+
+以下代码展示了不同语言下如何对 TMQ 消息进行消费。
+
+
+
+
+```c
+// 消费数据
+while (running) {
+ TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut);
+ msg_process(msg);
+}
+```
+
+这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。
+
+
+
+
+```java
+while(running){
+ ConsumerRecords meters = consumer.poll(Duration.ofMillis(100));
+ for (Meters meter : meters) {
+ processMsg(meter);
+ }
+}
+```
+
+
+
+
+## 结束消费
+
+消费结束后,应当取消订阅。
+
+
+
+
+```c
+/* 取消订阅 */
+tmq_unsubscribe(tmq);
+
+/* 关闭消费者对象 */
+tmq_consumer_close(tmq);
+```
+
+
+
+
+```java
+/* 取消订阅 */
+consumer.unsubscribe();
+
+/* 关闭消费 */
+consumer.close();
+```
+
+
+
+
+## 删除 *topic*
+
+如果不再需要订阅数据,可以删除 topic,需要注意:只有当前未在订阅中的 TOPIC 才能被删除。
+
+```sql
+/* 删除 topic */
+DROP TOPIC topic_name;
+```
+
+## 状态查看
+
+1、*topics*:查询已经创建的 topic
+
+```sql
+SHOW TOPICS;
+```
+
+2、consumers:查询 consumer 的状态及其订阅的 topic
+
+```sql
+SHOW CONSUMERS;
+```
+
+3、subscriptions:查询 consumer 与 vgroup 之间的分配关系
+
+```sql
+SHOW SUBSCRIPTIONS;
+```
+
+## 示例代码
+
+以下是各语言的完整示例代码。
+
+
+
+
+```c
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "taos.h"
+
+static int running = 1;
+static char dbName[64] = "tmqdb";
+static char stbName[64] = "stb";
+static char topicName[64] = "topicname";
+
+static int32_t msg_process(TAOS_RES* msg) {
+ char buf[1024];
+ int32_t rows = 0;
+
+ const char* topicName = tmq_get_topic_name(msg);
+ const char* dbName = tmq_get_db_name(msg);
+ int32_t vgroupId = tmq_get_vgroup_id(msg);
+
+ printf("topic: %s\n", topicName);
+ printf("db: %s\n", dbName);
+ printf("vgroup id: %d\n", vgroupId);
+
+ while (1) {
+ TAOS_ROW row = taos_fetch_row(msg);
+ if (row == NULL) break;
+
+ TAOS_FIELD* fields = taos_fetch_fields(msg);
+ int32_t numOfFields = taos_field_count(msg);
+ int32_t* length = taos_fetch_lengths(msg);
+ int32_t precision = taos_result_precision(msg);
+ const char* tbName = tmq_get_table_name(msg);
+ rows++;
+ taos_print_row(buf, row, fields, numOfFields);
+ printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
+ }
+
+ return rows;
+}
+
+static int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes;
+ // drop database if exists
+ printf("create database\n");
+ pRes = taos_query(pConn, "drop database if exists tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create database
+ pRes = taos_query(pConn, "create database tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create super table
+ printf("create super table\n");
+ pRes = taos_query(
+ pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create sub tables
+ printf("create sub tables\n");
+ pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // insert data
+ printf("insert data into sub tables\n");
+ pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+int32_t create_topic() {
+ printf("create topic\n");
+ TAOS_RES* pRes;
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ pRes = taos_query(pConn, "use tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
+ printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
+}
+
+tmq_t* build_consumer() {
+ tmq_conf_res_t code;
+ tmq_conf_t* conf = tmq_conf_new();
+ code = tmq_conf_set(conf, "enable.auto.commit", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "group.id", "cgrpName");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "client.id", "user defined name");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.user", "root");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "msg.with.table.name", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+
+ tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+ tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+ tmq_conf_destroy(conf);
+ return tmq;
+}
+
+tmq_list_t* build_topic_list() {
+ tmq_list_t* topicList = tmq_list_new();
+ int32_t code = tmq_list_append(topicList, "topicname");
+ if (code) {
+ return NULL;
+ }
+ return topicList;
+}
+
+void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
+ int32_t code;
+
+ if ((code = tmq_subscribe(tmq, topicList))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
+ return;
+ }
+
+ int32_t totalRows = 0;
+ int32_t msgCnt = 0;
+ int32_t timeout = 5000;
+ while (running) {
+ TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
+ if (tmqmsg) {
+ msgCnt++;
+ totalRows += msg_process(tmqmsg);
+ taos_free_result(tmqmsg);
+ /*} else {*/
+ /*break;*/
+ }
+ }
+
+ fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
+}
+
+int main(int argc, char* argv[]) {
+ int32_t code;
+
+ if (init_env() < 0) {
+ return -1;
+ }
+
+ if (create_topic() < 0) {
+ return -1;
+ }
+
+ tmq_t* tmq = build_consumer();
+ if (NULL == tmq) {
+ fprintf(stderr, "%% build_consumer() fail!\n");
+ return -1;
+ }
+
+ tmq_list_t* topic_list = build_topic_list();
+ if (NULL == topic_list) {
+ return -1;
+ }
+
+ basic_consume_loop(tmq, topic_list);
+
+ code = tmq_unsubscribe(tmq);
+ if (code) {
+ fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
+ } else {
+ fprintf(stderr, "%% unsubscribe\n");
+ }
+
+ code = tmq_consumer_close(tmq);
+ if (code) {
+ fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
+ } else {
+ fprintf(stderr, "%% Consumer closed\n");
+ }
+
+ return 0;
+}
+
+```
+
+[查看源码](https://github.com/taosdata/TDengine/blob/develop/examples/c/tmq.c)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```python
+import taos
+from taos.tmq import *
+
+conn = taos.connect()
+
+# create database
+conn.execute("drop database if exists py_tmq")
+conn.execute("create database if not exists py_tmq vgroups 2")
+
+# create table and stables
+conn.select_db("py_tmq")
+conn.execute("create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
+conn.execute("create table if not exists tb1 using stb1 tags(1)")
+conn.execute("create table if not exists tb2 using stb1 tags(2)")
+conn.execute("create table if not exists tb3 using stb1 tags(3)")
+
+# create topic
+conn.execute("drop topic if exists topic_ctb_column")
+conn.execute("create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1")
+
+# set consumer configure options
+conf = TaosTmqConf()
+conf.set("group.id", "tg2")
+conf.set("td.connect.user", "root")
+conf.set("td.connect.pass", "taosdata")
+conf.set("enable.auto.commit", "true")
+conf.set("msg.with.table.name", "true")
+
+def tmq_commit_cb_print(tmq, resp, offset, param=None):
+ print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
+
+conf.set_auto_commit_cb(tmq_commit_cb_print, None)
+
+# build consumer
+tmq = conf.new_consumer()
+
+# build topic list
+topic_list = TaosTmqList()
+topic_list.append("topic_ctb_column")
+
+# subscribe consumer
+tmq.subscribe(topic_list)
+
+# check subscriptions
+sub_list = tmq.subscription()
+print("subscribed topics: ",sub_list)
+
+# start subscribe
+while 1:
+ res = tmq.poll(1000)
+ if res:
+ topic = res.get_topic_name()
+ vg = res.get_vgroup_id()
+ db = res.get_db_name()
+ print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
+ for row in res:
+ print(row)
+ tb = res.get_table_name()
+ print(f"from table: {tb}")
+
+```
+
+[查看源码](https://github.com/taosdata/TDengine/blob/develop/docs/examples/python/tmq_example.py)
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 8aa6c43747dffba640ff8715643f70d89d827f41..5312d7d2f3597ca63d9d3c43bc2264ca75877fb7 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -103,7 +103,7 @@ SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
在超级表和子表的查询中可以指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```sql
-ELECT location, groupid, current FROM d1001 LIMIT 2;
+SELECT location, groupid, current FROM d1001 LIMIT 2;
```
### 结果去重
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
new file mode 100644
index 0000000000000000000000000000000000000000..68f3b5cc293d1aaf181266e9e20868ff3a3fe5cc
--- /dev/null
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -0,0 +1,95 @@
+---
+sidebar_label: 3.0 版本语法变更
+title: 3.0 版本语法变更
+description: "TDengine 3.0 版本的语法变更说明"
+---
+
+## SQL 基本元素变更
+
+| # | **元素** | **差异性** | **说明** |
+| - | :------- | :--------: | :------- |
+| 1 | VARCHAR |
新增
| BINARY类型的别名。
+| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
+| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
+| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
+| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
+| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
+| 7 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
+| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
+| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+
+## SQL 语句变更
+
+在 TDengine 中,普通表的数据模型中可使用以下数据类型。
+
+| # | **语句** | **差异性** | **说明** |
+| - | :------- | :--------: | :------- |
+| 1 | ALTER ACCOUNT | 废除
| 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
+| 3 | ALTER DATABASE | 调整 | 废除- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改。
- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- COMP:3.0版本暂不支持修改。
新增 - CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
调整 - REPLICA:3.0.0版本暂不支持修改。
- KEEP:3.0版本新增支持带单位的设置方式。
+| 4 | ALTER STABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改超级表的注释。
+| 5 | ALTER TABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改表的注释。
- TTL:修改表的生命周期。
+| 6 | ALTER USER | 调整 | 废除- PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。
新增 - ENABLE:启用或停用此用户。
- SYSINFO:修改用户是否可查看系统信息。
+| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
+| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 9 | CREATE DATABASE | 调整 | 废除- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。
- FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。
- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。
新增 - BUFFER:一个 VNODE 写入内存池大小。
- CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- DURATION:代替原DAYS参数。新增支持带单位的设置方式。
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。
- RETENTIONS:表示数据的聚合周期和保存时长。
- STRICT:表示数据同步的一致性要求。
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。
- VGROUPS:数据库中初始VGROUP的数目。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
- WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。
- WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。
- WAL_ROLL_PERIOD:wal文件切换时长。
- WAL_SEGMENT_SIZE:wal单个文件大小。
调整 - KEEP:3.0版本新增支持带单位的设置方式。
+| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法- CREATE DNODE dnode_host_name PORT port_val
+| 11 | CREATE INDEX | 新增 | 创建SMA索引。
+| 12 | CREATE MNODE | 新增 | 创建管理节点。
+| 13 | CREATE QNODE | 新增 | 创建查询节点。
+| 14 | CREATE STABLE | 调整 | 新增表参数语法COMMENT:表注释。
+| 15 | CREATE STREAM | 新增 | 创建流。
+| 16 | CREATE TABLE | 调整 | 新增表参数语法- COMMENT:表注释。
- WATERMARK:指定窗口的关闭时间。
- MAX_DELAY:用于控制推送计算结果的最大延迟。
- ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。
- SMA:提供基于数据块的自定义预计算功能。
- TTL:用来指定表的生命周期的参数。
+| 17 | CREATE TOPIC | 新增 | 创建订阅主题。
+| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。
+| 20 | DROP INDEX | 新增 | 删除索引。
+| 21 | DROP MNODE | 新增 | 创建管理节点。
+| 22 | DROP QNODE | 新增 | 创建查询节点。
+| 23 | DROP STREAM | 新增 | 删除流。
+| 24 | DROP TABLE | 调整 | 新增批量删除语法
+| 25 | DROP TOPIC | 新增 | 删除订阅主题。
+| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。
+| 27 | GRANT | 新增 | 授予用户权限。
+| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
+| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
+| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
+| 31 | REVOKE | 新增 | 回收用户权限。
+| 32 | SELECT | 调整 | - SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。
- DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。
- JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。
- FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。
- WHERE后可以使用任意的标量表达式。
- GROUP BY功能增强。支持任意标量表达式及其组合的分组。
- SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。
- 新增PARTITION BY语法。替代原来的GROUP BY tags。
+| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。
+| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。
+| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。
+| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。
+| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。
+| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。
+| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。
+| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。
+| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。
+| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。
+| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。
+| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系
+| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。
+| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM { tb_name | stb_name }方式。
+| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。
+| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
+| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
+| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
+| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
+| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
+
+## SQL 函数变更
+
+| # | **函数** | **差异性** | **说明** |
+| - | :------- | :--------: | :------- |
+| 1 | TWA | 增强
| 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。
+| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 900fff1ba250198d03ff1d8f37261c36f7efa150..821679551c453b1a3f2937ac5d2409dd733cd593 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -3,7 +3,7 @@ title: TAOS SQL
description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。
+本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx
index ef9da4beee5fa94397176977f9d2b4b6e51e8c32..c0b83e7d4a96a82f4e741b7fa7918c5bc5f618c9 100644
--- a/docs/zh/14-reference/03-connector/java.mdx
+++ b/docs/zh/14-reference/03-connector/java.mdx
@@ -765,11 +765,11 @@ public abstract class ConsumerLoop {
shutdownLatch.await();
}
- static class ResultDeserializer extends ReferenceDeserializer {
+ public static class ResultDeserializer extends ReferenceDeserializer {
}
- static class ResultBean {
+ public static class ResultBean {
private Timestamp ts;
private int speed;
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 845693a98e00b2342c3fd749b935b7a36f9c3fbf..d2efc5baf381d7631533f9b80fa2994dc16a221e 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -647,3 +647,173 @@ charset 的有效值是 UTF-8。
| 含义 | 是否启动 udf 服务 |
| 取值范围 | 0: 不启动;1:启动 |
| 缺省值 | 1 |
+
+## 2.X 与 3.0 配置参数对比
+| # | **参数** | **适用于 2.X 版本** | **适用于 3.0 版本** |
+| --- | :-----------------: | --------------- | --------------- |
+| 1 | firstEp | 是 | 是 |
+| 2 | secondEp | 是 | 是 |
+| 3 | fqdn | 是 | 是 |
+| 4 | serverPort | 是 | 是 |
+| 5 | maxShellConns | 是 | 是 |
+| 6 | monitor | 是 | 是 |
+| 7 | monitorFqdn | 否 | 是 |
+| 8 | monitorPort | 否 | 是 |
+| 9 | monitorInterval | 是 | 是 |
+| 10 | monitorMaxLogs | 否 | 是 |
+| 11 | monitorComp | 否 | 是 |
+| 12 | telemetryReporting | 是 | 是 |
+| 13 | telemetryInterval | 否 | 是 |
+| 14 | telemetryServer | 否 | 是 |
+| 15 | telemetryPort | 否 | 是 |
+| 16 | queryPolicy | 否 | 是 |
+| 17 | querySmaOptimize | 否 | 是 |
+| 18 | queryBufferSize | 是 | 是 |
+| 19 | maxNumOfDistinctRes | 是 | 是 |
+| 20 | minSlidingTime | 是 | 是 |
+| 21 | minIntervalTime | 是 | 是 |
+| 22 | countAlwaysReturnValue | 是 | 是 |
+| 23 | dataDir | 是 | 是 |
+| 24 | minimalDataDirGB | 是 | 是 |
+| 25 | supportVnodes | 否 | 是 |
+| 26 | tempDir | 是 | 是 |
+| 27 | minimalTmpDirGB | 是 | 是 |
+| 28 | compressMsgSize | 是 | 是 |
+| 29 | compressColData | 是 | 是 |
+| 30 | smlChildTableName | 是 | 是 |
+| 31 | smlTagName | 是 | 是 |
+| 32 | smlDataFormat | 否 | 是 |
+| 33 | statusInterval | 是 | 是 |
+| 34 | shellActivityTimer | 是 | 是 |
+| 35 | transPullupInterval | 否 | 是 |
+| 36 | mqRebalanceInterval | 否 | 是 |
+| 37 | ttlUnit | 否 | 是 |
+| 38 | ttlPushInterval | 否 | 是 |
+| 39 | numOfTaskQueueThreads | 否 | 是 |
+| 40 | numOfRpcThreads | 否 | 是 |
+| 41 | numOfCommitThreads | 是 | 是 |
+| 42 | numOfMnodeReadThreads | 否 | 是 |
+| 43 | numOfVnodeQueryThreads | 否 | 是 |
+| 44 | numOfVnodeStreamThreads | 否 | 是 |
+| 45 | numOfVnodeFetchThreads | 否 | 是 |
+| 46 | numOfVnodeWriteThreads | 否 | 是 |
+| 47 | numOfVnodeSyncThreads | 否 | 是 |
+| 48 | numOfQnodeQueryThreads | 否 | 是 |
+| 49 | numOfQnodeFetchThreads | 否 | 是 |
+| 50 | numOfSnodeSharedThreads | 否 | 是 |
+| 51 | numOfSnodeUniqueThreads | 否 | 是 |
+| 52 | rpcQueueMemoryAllowed | 否 | 是 |
+| 53 | logDir | 是 | 是 |
+| 54 | minimalLogDirGB | 是 | 是 |
+| 55 | numOfLogLines | 是 | 是 |
+| 56 | asyncLog | 是 | 是 |
+| 57 | logKeepDays | 是 | 是 |
+| 58 | debugFlag | 是 | 是 |
+| 59 | tmrDebugFlag | 是 | 是 |
+| 60 | uDebugFlag | 是 | 是 |
+| 61 | rpcDebugFlag | 是 | 是 |
+| 62 | jniDebugFlag | 是 | 是 |
+| 63 | qDebugFlag | 是 | 是 |
+| 64 | cDebugFlag | 是 | 是 |
+| 65 | dDebugFlag | 是 | 是 |
+| 66 | vDebugFlag | 是 | 是 |
+| 67 | mDebugFlag | 是 | 是 |
+| 68 | wDebugFlag | 是 | 是 |
+| 69 | sDebugFlag | 是 | 是 |
+| 70 | tsdbDebugFlag | 是 | 是 |
+| 71 | tqDebugFlag | 否 | 是 |
+| 72 | fsDebugFlag | 是 | 是 |
+| 73 | udfDebugFlag | 否 | 是 |
+| 74 | smaDebugFlag | 否 | 是 |
+| 75 | idxDebugFlag | 否 | 是 |
+| 76 | tdbDebugFlag | 否 | 是 |
+| 77 | metaDebugFlag | 否 | 是 |
+| 78 | timezone | 是 | 是 |
+| 79 | locale | 是 | 是 |
+| 80 | charset | 是 | 是 |
+| 81 | udf | 是 | 是 |
+| 82 | enableCoreFile | 是 | 是 |
+| 83 | arbitrator | 是 | 否 |
+| 84 | numOfThreadsPerCore | 是 | 否 |
+| 85 | numOfMnodes | 是 | 否 |
+| 86 | vnodeBak | 是 | 否 |
+| 87 | balance | 是 | 否 |
+| 88 | balanceInterval | 是 | 否 |
+| 89 | offlineThreshold | 是 | 否 |
+| 90 | role | 是 | 否 |
+| 91 | dnodeNopLoop | 是 | 否 |
+| 92 | keepTimeOffset | 是 | 否 |
+| 93 | rpcTimer | 是 | 否 |
+| 94 | rpcMaxTime | 是 | 否 |
+| 95 | rpcForceTcp | 是 | 否 |
+| 96 | tcpConnTimeout | 是 | 否 |
+| 97 | syncCheckInterval | 是 | 否 |
+| 98 | maxTmrCtrl | 是 | 否 |
+| 99 | monitorReplica | 是 | 否 |
+| 100 | smlTagNullName | 是 | 否 |
+| 101 | keepColumnName | 是 | 否 |
+| 102 | ratioOfQueryCores | 是 | 否 |
+| 103 | maxStreamCompDelay | 是 | 否 |
+| 104 | maxFirstStreamCompDelay | 是 | 否 |
+| 105 | retryStreamCompDelay | 是 | 否 |
+| 106 | streamCompDelayRatio | 是 | 否 |
+| 107 | maxVgroupsPerDb | 是 | 否 |
+| 108 | maxTablesPerVnode | 是 | 否 |
+| 109 | minTablesPerVnode | 是 | 否 |
+| 110 | tableIncStepPerVnode | 是 | 否 |
+| 111 | cache | 是 | 否 |
+| 112 | blocks | 是 | 否 |
+| 113 | days | 是 | 否 |
+| 114 | keep | 是 | 否 |
+| 115 | minRows | 是 | 否 |
+| 116 | maxRows | 是 | 否 |
+| 117 | quorum | 是 | 否 |
+| 118 | comp | 是 | 否 |
+| 119 | walLevel | 是 | 否 |
+| 120 | fsync | 是 | 否 |
+| 121 | replica | 是 | 否 |
+| 122 | partitions | 是 | 否 |
+| 123 | quorum | 是 | 否 |
+| 124 | update | 是 | 否 |
+| 125 | cachelast | 是 | 否 |
+| 126 | maxSQLLength | 是 | 否 |
+| 127 | maxWildCardsLength | 是 | 否 |
+| 128 | maxRegexStringLen | 是 | 否 |
+| 129 | maxNumOfOrderedRes | 是 | 否 |
+| 130 | maxConnections | 是 | 否 |
+| 131 | mnodeEqualVnodeNum | 是 | 否 |
+| 132 | http | 是 | 否 |
+| 133 | httpEnableRecordSql | 是 | 否 |
+| 134 | httpMaxThreads | 是 | 否 |
+| 135 | restfulRowLimit | 是 | 否 |
+| 136 | httpDbNameMandatory | 是 | 否 |
+| 137 | httpKeepAlive | 是 | 否 |
+| 138 | enableRecordSql | 是 | 否 |
+| 139 | maxBinaryDisplayWidth | 是 | 否 |
+| 140 | stream | 是 | 否 |
+| 141 | retrieveBlockingModel | 是 | 否 |
+| 142 | tsdbMetaCompactRatio | 是 | 否 |
+| 143 | defaultJSONStrType | 是 | 否 |
+| 144 | walFlushSize | 是 | 否 |
+| 145 | keepTimeOffset | 是 | 否 |
+| 146 | flowctrl | 是 | 否 |
+| 147 | slaveQuery | 是 | 否 |
+| 148 | adjustMaster | 是 | 否 |
+| 149 | topicBinaryLen | 是 | 否 |
+| 150 | telegrafUseFieldNum | 是 | 否 |
+| 151 | deadLockKillQuery | 是 | 否 |
+| 152 | clientMerge | 是 | 否 |
+| 153 | sdbDebugFlag | 是 | 否 |
+| 154 | odbcDebugFlag | 是 | 否 |
+| 155 | httpDebugFlag | 是 | 否 |
+| 156 | monDebugFlag | 是 | 否 |
+| 157 | cqDebugFlag | 是 | 否 |
+| 158 | shortcutFlag | 是 | 否 |
+| 159 | probeSeconds | 是 | 否 |
+| 160 | probeKillSeconds | 是 | 否 |
+| 161 | probeInterval | 是 | 否 |
+| 162 | lossyColumns | 是 | 否 |
+| 163 | fPrecision | 是 | 否 |
+| 164 | dPrecision | 是 | 否 |
+| 165 | maxRange | 是 | 否 |
+| 166 | range | 是 | 否 |
diff --git a/docs/zh/28-releases.md b/docs/zh/28-releases.md
index 5f30325829bda75d466118f69f8516908c4c99ab..311d69ac1bbbe7adb1da78c1dde74fcd748c9f62 100644
--- a/docs/zh/28-releases.md
+++ b/docs/zh/28-releases.md
@@ -3,7 +3,7 @@ sidebar_label: 发布历史
title: 发布历史
---
-import Release from "/components/Release";
+import Release from "/components/ReleaseV3";
diff --git a/examples/c/tmq.c b/examples/c/tmq.c
index fc34915fe75498d907381a22461f7dae6536b8a4..19adaad116ef65673f5541b5216ce12d2d9151c7 100644
--- a/examples/c/tmq.c
+++ b/examples/c/tmq.c
@@ -45,10 +45,9 @@ static int32_t msg_process(TAOS_RES* msg) {
int32_t numOfFields = taos_field_count(msg);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
- const char* tbName = tmq_get_table_name(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
- printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
+ printf("row content: %s\n", buf);
}
return rows;
@@ -167,7 +166,7 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -199,9 +198,7 @@ tmq_t* build_consumer() {
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "msg.with.table.name", "true");
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) return NULL;
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@@ -220,14 +217,7 @@ tmq_list_t* build_topic_list() {
return topicList;
}
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topicList))) {
- fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
- return;
- }
-
+void basic_consume_loop(tmq_t* tmq) {
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
@@ -237,8 +227,8 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
msgCnt++;
totalRows += msg_process(tmqmsg);
taos_free_result(tmqmsg);
- /*} else {*/
- /*break;*/
+ } else {
+ break;
}
}
@@ -267,14 +257,12 @@ int main(int argc, char* argv[]) {
return -1;
}
- basic_consume_loop(tmq, topic_list);
-
- code = tmq_unsubscribe(tmq);
- if (code) {
- fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
- } else {
- fprintf(stderr, "%% unsubscribe\n");
+ if ((code = tmq_subscribe(tmq, topic_list))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
+ tmq_list_destroy(topic_list);
+
+ basic_consume_loop(tmq);
code = tmq_consumer_close(tmq);
if (code) {
diff --git a/include/client/taos.h b/include/client/taos.h
index dd7266bd96dcb74bb19837ab470a744925db4e64..f260b84f4aaf238badb1de3a6446b639b5681fa9 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
-DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
-DLL_EXPORT void taos_close(TAOS *taos);
+DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
+DLL_EXPORT void taos_close(TAOS *taos);
-const char *taos_data_type(int type);
+const char *taos_data_type(int type);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
@@ -244,33 +244,37 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
+DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
+
+/* ------------------------------ TAOSX -----------------------------------*/
+// note: following apis are unstable
enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
};
-typedef struct tmq_raw_data{
- void* raw;
+typedef struct tmq_raw_data {
+ void *raw;
uint32_t raw_len;
uint16_t raw_type;
} tmq_raw_data;
typedef enum tmq_res_t tmq_res_t;
-DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
-DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
-DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname);
-DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
-DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
-DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
-DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
-
-/* ------------------------------ TMQ END -------------------------------- */
+DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
+DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
+DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
+DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
+DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
+// Returning null means error. Returned result need to be freed by tmq_free_json_meta
+DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);
+DLL_EXPORT void tmq_free_json_meta(char *jsonMeta);
+
+/* ---------------------------- TAOSX END -------------------------------- */
typedef enum {
TSDB_SRV_STATUS_UNAVAILABLE = 0,
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index a3de9164a2d4418dd1edc8493d46c20f2fafdcac..717278d51d1b252dc3f2bada18a61bbb65739b6e 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -53,6 +53,8 @@ typedef struct SParseContext {
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
+ SArray* pTableMetaPos; // sql table pos => catalog data pos
+ SArray* pTableVgroupPos; // sql table pos => catalog data pos
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
@@ -84,8 +86,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
int32_t rowNum);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
-int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind,
- char* msgBuf, int32_t msgBufLen);
+int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
+ TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 3ca6978156bb99d40245bd89c09981786c3b8d46..d7ec3697afe6500b38eb102339d4f3a7ea77d550 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -622,6 +622,7 @@ int32_t* taosGetErrno();
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
+#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
#ifdef __cplusplus
}
diff --git a/include/util/tref.h b/include/util/tref.h
index 7e08bb045b08f56afe59436d07576b6e7604c12c..c2cc54cb07ac3167941061b475f8811e460a3b91 100644
--- a/include/util/tref.h
+++ b/include/util/tref.h
@@ -29,11 +29,11 @@ int32_t taosOpenRef(int32_t max, void (*fp)(void *));
// close the reference set, refId is the return value by taosOpenRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
-int32_t taosCloseRef(int32_t refId);
+int32_t taosCloseRef(int32_t rsetId);
// add ref, p is the pointer to resource or pointer ID
// return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately
-int64_t taosAddRef(int32_t refId, void *p);
+int64_t taosAddRef(int32_t rsetId, void *p);
// remove ref, rid is the reference ID returned by taosAddRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index acdb3b68b0a88477d1ff5f7856e533e33b49e5a7..9c086fc83e155b40505c42c8096e57b7e03a9bca 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -689,11 +689,11 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
TDMT_VND_CREATE_TABLE == pRequest->type) {
pRequest->body.resInfo.numOfRows = res.numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, res.numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, res.numOfRows);
}
-
+
schedulerFreeJob(&pRequest->body.queryJob, 0);
}
@@ -800,8 +800,8 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
break;
}
case TDMT_VND_SUBMIT: {
- atomic_add_fetch_64((int64_t *)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
-
+ atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
+
code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset);
break;
}
@@ -832,9 +832,9 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (pResult) {
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, pResult->numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
}
}
@@ -877,14 +877,14 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
-
+
if (pQuery->pRoot && !pRequest->inRetry) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
}
}
@@ -1467,9 +1467,9 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId);
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
if (pResultInfo->numOfRows == 0) {
return NULL;
@@ -2006,7 +2006,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
bool inEscape = false;
int32_t code = 0;
- void *pIter = NULL;
+ void* pIter = NULL;
int32_t vIdx = 0;
int32_t vPos[2];
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 0ec724c6d0a388724dbabfd6e4e3e55559c32f14..0e95cd4d999f30343a66996d07409b01bdde097a 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -192,6 +192,7 @@ void taos_free_result(TAOS_RES *res) {
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
pRsp->resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj *pRspObj = (SMqMetaRspObj *)res;
taosMemoryFree(pRspObj->metaRsp.metaRsp);
diff --git a/source/client/src/taosx.c b/source/client/src/taosx.c
new file mode 100644
index 0000000000000000000000000000000000000000..677567e38ffcecefaa72373ac02a976cb2078676
--- /dev/null
+++ b/source/client/src/taosx.c
@@ -0,0 +1,1628 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "cJSON.h"
+#include "clientInt.h"
+#include "clientLog.h"
+#include "parser.h"
+#include "tdatablock.h"
+#include "tdef.h"
+#include "tglobal.h"
+#include "tmsgtype.h"
+#include "tqueue.h"
+#include "tref.h"
+#include "ttimer.h"
+
+static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
+ int8_t t) {
+ char* string = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+
+ // char uid[32] = {0};
+ // sprintf(uid, "%"PRIi64, id);
+ // cJSON* id_ = cJSON_CreateString(uid);
+ // cJSON_AddItemToObject(json, "id", id_);
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* columns = cJSON_CreateArray();
+ for (int i = 0; i < schemaRow->nCols; i++) {
+ cJSON* column = cJSON_CreateObject();
+ SSchema* s = schemaRow->pSchema + i;
+ cJSON* cname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(column, "name", cname);
+ cJSON* ctype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(column, "type", ctype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ }
+ cJSON_AddItemToArray(columns, column);
+ }
+ cJSON_AddItemToObject(json, "columns", columns);
+
+ cJSON* tags = cJSON_CreateArray();
+ for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
+ cJSON* tag = cJSON_CreateObject();
+ SSchema* s = schemaTag->pSchema + i;
+ cJSON* tname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(tag, "name", tname);
+ cJSON* ttype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ }
+ cJSON_AddItemToArray(tags, tag);
+ }
+ cJSON_AddItemToObject(json, "tags", tags);
+
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ return string;
+}
+
+static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
+ SMAlterStbReq req = {0};
+ cJSON* json = NULL;
+ char* string = NULL;
+
+ if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
+ goto end;
+ }
+
+ json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto end;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ SName name = {0};
+ tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ cJSON* tableName = cJSON_CreateString(name.tname);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* alterType = cJSON_CreateNumber(req.alterType);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+ switch (req.alterType) {
+ case TSDB_ALTER_TABLE_ADD_TAG:
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_TAG:
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
+ TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
+ cJSON* colName = cJSON_CreateString(oldField->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(newField->name);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+end:
+ cJSON_Delete(json);
+ tFreeSMAltertbReq(&req);
+ return string;
+}
+
+static char* processCreateStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* processAlterStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
+ char* string = NULL;
+ SArray* pTagVals = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+ // char cid[32] = {0};
+ // sprintf(cid, "%"PRIi64, id);
+ // cJSON* cid_ = cJSON_CreateString(cid);
+ // cJSON_AddItemToObject(json, "id", cid_);
+
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("child");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* using = cJSON_CreateString(sname);
+ cJSON_AddItemToObject(json, "using", using);
+ cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
+ cJSON_AddItemToObject(json, "tagNum", tagNumJson);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* tags = cJSON_CreateArray();
+ int32_t code = tTagToValArray(pTag, &pTagVals);
+ if (code) {
+ goto end;
+ }
+
+ if (tTagIsJson(pTag)) {
+ STag* p = (STag*)pTag;
+ if (p->nTag == 0) {
+ goto end;
+ }
+ char* pJson = parseTagDatatoJson(pTag);
+ cJSON* tag = cJSON_CreateObject();
+ STagVal* pTagVal = taosArrayGet(pTagVals, 0);
+
+ char* ptname = taosArrayGet(tagName, 0);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid_ = cJSON_CreateString("");
+ // cJSON_AddItemToObject(tag, "cid", cid_);
+ cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ cJSON* tvalue = cJSON_CreateString(pJson);
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ taosMemoryFree(pJson);
+ goto end;
+ }
+
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+
+ cJSON* tag = cJSON_CreateObject();
+
+ char* ptname = taosArrayGet(tagName, i);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
+ // cJSON_AddItemToObject(tag, "cid", cid);
+ cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+
+ cJSON* tvalue = NULL;
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
+ if (!buf) goto end;
+ dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
+ tvalue = cJSON_CreateString(buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ tvalue = cJSON_CreateNumber(val);
+ }
+
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ }
+
+end:
+ cJSON_AddItemToObject(json, "tags", tags);
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ taosArrayDestroy(pTagVals);
+ return string;
+}
+
+static char* processCreateTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVCreateTbBatchReq req = {0};
+ SVCreateTbReq* pCreateReq;
+ char* string = NULL;
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
+ pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
+ } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
+ string =
+ buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
+ }
+ }
+
+ tDecoderClear(&decoder);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processAlterTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVAlterTbReq vAlterTbReq = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+
+ switch (vAlterTbReq.action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
+ cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
+ cJSON_AddItemToObject(json, "colName", tagName);
+
+ bool isNull = vAlterTbReq.isNull;
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
+ if (jsonTag->nTag == 0) isNull = true;
+ }
+ if (!isNull) {
+ char* buf = NULL;
+
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
+ buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
+ } else {
+ buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
+ dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
+ }
+
+ cJSON* colValue = cJSON_CreateString(buf);
+ cJSON_AddItemToObject(json, "colValue", colValue);
+ taosMemoryFree(buf);
+ }
+
+ cJSON* isNullCJson = cJSON_CreateBool(isNull);
+ cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropSTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropStbReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ cJSON* tableName = cJSON_CreateString(req.name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropTbBatchReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ // cJSON* tableType = cJSON_CreateString("normal");
+ // cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* tableNameList = cJSON_CreateArray();
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ SVDropTbReq* pDropTbReq = req.pReqs + iReq;
+
+ cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
+ cJSON_AddItemToArray(tableNameList, tableName);
+ }
+ cJSON_AddItemToObject(json, "tableNameList", tableNameList);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ SMCreateStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+ // build create stable
+ pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
+ SSchema* pSchema = req.schemaRow.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pColumns, &field);
+ }
+ pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
+ SSchema* pSchema = req.schemaTag.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
+ pReq.colVer = req.schemaRow.version;
+ pReq.tagVer = req.schemaTag.version;
+ pReq.numOfColumns = req.schemaRow.nCols;
+ pReq.numOfTags = req.schemaTag.nCols;
+ pReq.commentLen = -1;
+ pReq.suid = req.suid;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.igExists = true;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName;
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_CREATE_STB;
+ pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tFreeSMCreateStbReq(&pReq);
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropStbReq req = {0};
+ SDecoder coder;
+ SMDropStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // build drop stable
+ pReq.igNotExists = true;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.suid = req.suid;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName = {0};
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_DROP_STB;
+ pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ return code;
+}
+
+typedef struct SVgroupCreateTableBatch {
+ SVCreateTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupCreateTableBatch;
+
+static void destroyCreateTbReqBatch(void* data) {
+ SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVCreateTbReq* pCreateReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+ taosArrayPush(pRequest->tableList, &pName);
+
+ SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupCreateTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ strcpy(tBatch.dbName, pRequest->pDb);
+
+ tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
+ taosArrayPush(tBatch.req.pArray, pCreateReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pCreateReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_CREATE_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct SVgroupDropTableBatch {
+ SVDropTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupDropTableBatch;
+
+static void destroyDropTbReqBatch(void* data) {
+ SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVDropTbReq* pDropReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ pDropReq->igNotExists = true;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ taosArrayPush(pRequest->tableList, &pName);
+ SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupDropTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
+ taosArrayPush(tBatch.req.pArray, pDropReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pDropReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_DROP_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+// delete from db.tabl where .. -> delete from tabl where ..
+// delete from db .tabl where .. -> delete from tabl where ..
+// static void getTbName(char *sql){
+// char *ch = sql;
+//
+// bool inBackQuote = false;
+// int8_t dotIndex = 0;
+// while(*ch != '\0'){
+// if(!inBackQuote && *ch == '`'){
+// inBackQuote = true;
+// ch++;
+// continue;
+// }
+//
+// if(inBackQuote && *ch == '`'){
+// inBackQuote = false;
+// ch++;
+//
+// continue;
+// }
+//
+// if(!inBackQuote && *ch == '.'){
+// dotIndex ++;
+// if(dotIndex == 2){
+// memmove(sql, ch + 1, strlen(ch + 1) + 1);
+// break;
+// }
+// }
+// ch++;
+// }
+//}
+
+static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
+ SDeleteRes req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // getTbName(req.tableFName);
+ char sql[256] = {0};
+ sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
+ req.skey, req.tsColName, req.ekey);
+ printf("delete sql:%s\n", sql);
+
+ TAOS_RES* res = taos_query(taos, sql);
+ SRequestObj* pRequest = (SRequestObj*)res;
+ code = pRequest->code;
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+ taos_free_result(res);
+
+end:
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVAlterTbReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SArray* pArray = NULL;
+ SVgDataBlocks* pVgData = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
+ if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pArray = taosArrayInit(1, sizeof(void*));
+ if (NULL == pArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == pVgData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pVgData->vg = pInfo;
+ pVgData->pData = taosMemoryMalloc(metaLen);
+ if (NULL == pVgData->pData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ memcpy(pVgData->pData, meta, metaLen);
+ ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
+ pVgData->size = metaLen;
+ pVgData->numOfTables = 1;
+ taosArrayPush(pArray, &pVgData);
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_ALTER_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+
+ pVgData = NULL;
+ pArray = NULL;
+ code = pRequest->code;
+ if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SExecResult* pRes = &pRequest->body.resInfo.execRes;
+ if (pRes->res != NULL) {
+ code = handleAlterTbExecRes(pRes->res, pCatalog);
+ }
+ }
+end:
+ taosArrayDestroy(pArray);
+ if (pVgData) taosMemoryFreeClear(pVgData->pData);
+ taosMemoryFreeClear(pVgData);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct {
+ SVgroupInfo vg;
+ void* data;
+} VgData;
+
+static void destroyVgHash(void* data) {
+ VgData* vgData = (VgData*)data;
+ taosMemoryFreeClear(vgData->data);
+}
+
+int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ STableMeta* pTableMeta = NULL;
+ SQuery* pQuery = NULL;
+
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ code = terrno;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbname);
+
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ SVgroupInfo vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
+ goto end;
+ }
+
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < numOfCols; i++) {
+ SSchema* schema = pTableMeta->schema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
+ SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pTableMeta->sversion);
+ tdSRowSetTpInfo(&rb, numOfCols, fLen);
+ int32_t dataLen = 0;
+
+ char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
+ int32_t* colLength = (int32_t*)pStart;
+ pStart += sizeof(int32_t) * numOfCols;
+
+ SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
+ pCol[i].offset = (int32_t*)pStart;
+ pStart += rows * sizeof(int32_t);
+ } else {
+ pCol[i].nullbitmap = pStart;
+ pStart += BitmapLen(rows);
+ }
+
+ pCol[i].pData = pStart;
+ pStart += colLength[i];
+ }
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+ int32_t offset = 0;
+ for (int32_t k = 0; k < numOfCols; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ if (pCol[k].offset[j] != -1) {
+ char* data = pCol[k].pData + pCol[k].offset[j];
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ } else {
+ if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
+ char* data = pCol[k].pData + pColumn->bytes * j;
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ }
+
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ taosMemoryFree(pCol);
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pTableMeta->sversion);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks = 1;
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+ nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
+
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vgData;
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ subReq = NULL; // no need free
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ taosMemoryFreeClear(pTableMeta);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqRspObj rspObj = {0};
+ SDecoder decoder = {0};
+
+ terrno = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ return terrno;
+ }
+
+ rspObj.resIter = -1;
+ rspObj.resType = RES_TYPE__TMQ;
+
+ tDecoderInit(&decoder, data, dataLen);
+ code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
+ if (code != 0) {
+ uError("WriteRaw:decode smqDataRsp error");
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(pVgHash, destroyVgHash);
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ printf("raw data block num:%d\n", rspObj.rsp.blockNum);
+ while (++rspObj.resIter < rspObj.rsp.blockNum) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
+ if (!rspObj.rsp.withSchema) {
+ uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
+ goto end;
+ }
+ SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
+ setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
+
+ code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: setQueryResultFromRsp error");
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = pSW->pSchema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
+ if (!tbName) {
+ uError("WriteRaw: tbname is null");
+ code = TSDB_CODE_TMQ_INVALID_MSG;
+ goto end;
+ }
+
+ printf("raw data tbname:%s\n", tbName);
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbName);
+
+ VgData vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
+ goto end;
+ }
+
+ SSubmitReq* subReq = NULL;
+ SSubmitBlk* blk = NULL;
+ void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
+ if (hData) {
+ vgData = *(VgData*)hData;
+
+ int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
+ void* tmp = taosMemoryRealloc(vgData.data, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ ((VgData*)hData)->data = tmp;
+ subReq = (SSubmitReq*)(vgData.data);
+ blk = POINTER_SHIFT(vgData.data, subReq->length);
+ } else {
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ void* tmp = taosMemoryCalloc(1, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
+ subReq = (SSubmitReq*)(vgData.data);
+ subReq->length = sizeof(SSubmitReq);
+ subReq->numOfBlocks = 0;
+
+ blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
+ }
+
+ STableMeta* pTableMeta = NULL;
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ taosMemoryFreeClear(pTableMeta);
+
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pSW->version);
+ tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
+ int32_t dataLen = 0;
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+
+ doSetOneRowPtr(&rspObj.resInfo);
+ rspObj.resInfo.current += 1;
+
+ int32_t offset = 0;
+ for (int32_t k = 0; k < pSW->nCols; k++) {
+ const SSchema* pColumn = &pSW->pSchema[k];
+ char* data = rspObj.resInfo.row[k];
+ if (!data) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ data -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ }
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pSW->version);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks++;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+
+ int32_t numOfVg = taosHashGetSize(pVgHash);
+ nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
+
+ VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
+ while (vData) {
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vData->vg;
+ SSubmitReq* subReq = (SSubmitReq*)(vData->data);
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ vData->data = NULL; // no need free
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+ vData = (VgData*)taosHashIterate(pVgHash, vData);
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ tDecoderClear(&decoder);
+ qDestroyQuery(pQuery);
+ destroyRequest(pRequest);
+ taosHashCleanup(pVgHash);
+ return code;
+}
+
+char* tmq_get_json_meta(TAOS_RES* res) {
+ if (!TD_RES_TMQ_META(res)) {
+ return NULL;
+ }
+
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
+ return processCreateStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
+ return processAlterStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
+ return processDropSTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
+ return processCreateTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
+ return processAlterTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
+ return processDropTable(&pMetaRspObj->metaRsp);
+ }
+ return NULL;
+}
+
+void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
+
+int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
+ if (!raw || !res) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+ if (TD_RES_TMQ_META(res)) {
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ raw->raw = pMetaRspObj->metaRsp.metaRsp;
+ raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
+ raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
+ } else if (TD_RES_TMQ(res)) {
+ SMqRspObj* rspObj = ((SMqRspObj*)res);
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+
+ void* buf = taosMemoryCalloc(1, len);
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, buf, len);
+ tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
+ tEncoderClear(&encoder);
+
+ raw->raw = buf;
+ raw->raw_len = len;
+ raw->raw_type = RES_TYPE__TMQ;
+ } else {
+ return TSDB_CODE_TMQ_INVALID_MSG;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tmq_free_raw(tmq_raw_data raw) {
+ if (raw.raw_type == RES_TYPE__TMQ) {
+ taosMemoryFree(raw.raw);
+ }
+}
+
+int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
+ if (!taos) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+
+ if (raw.raw_type == TDMT_VND_CREATE_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_STB) {
+ return taosDropStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
+ return taosCreateTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
+ return taosAlterTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
+ return taosDropTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DELETE) {
+ return taosDeleteData(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == RES_TYPE__TMQ) {
+ return tmqWriteRaw(taos, raw.raw, raw.raw_len);
+ }
+ return TSDB_CODE_INVALID_PARA;
+}
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index 436a9c2cfd082532e2f905604b6600c2c6135bd6..086cf653e9e5d6dc31b8698563dd8619ec5047e9 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -28,8 +28,9 @@
int32_t tmqAskEp(tmq_t* tmq, bool async);
typedef struct {
- int8_t inited;
- tmr_h timer;
+ int8_t inited;
+ tmr_h timer;
+ int32_t rsetId;
} SMqMgmt;
static SMqMgmt tmqMgmt = {0};
@@ -70,6 +71,7 @@ struct tmq_conf_t {
};
struct tmq_t {
+ int64_t refId;
// conf
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
@@ -146,8 +148,8 @@ typedef struct {
typedef struct {
// subscribe info
- char* topicName;
- char db[TSDB_DB_FNAME_LEN];
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray
@@ -166,29 +168,32 @@ typedef struct {
} SMqPollRspWrapper;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
tsem_t rspSem;
int32_t rspErr;
} SMqSubscribeCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int32_t code;
int32_t async;
tsem_t rspSem;
} SMqAskEpCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
SMqClientVg* pVg;
SMqClientTopic* pTopic;
- int32_t epoch;
int32_t vgId;
tsem_t rspSem;
} SMqPollCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int8_t automatic;
int8_t async;
int32_t waitingRspNum;
@@ -369,6 +374,38 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
return sprintf(dst, "%s:%d", topicName, vg);
}
+int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParamSet->refId);
+ if (tmq == NULL) {
+ if (!pParamSet->async) {
+ tsem_destroy(&pParamSet->rspSem);
+ }
+ taosMemoryFree(pParamSet);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ // if no more waiting rsp
+ if (pParamSet->async) {
+ // call async cb func
+ if (pParamSet->automatic && tmq->commitCb) {
+ tmq->commitCb(tmq, pParamSet->rspErr, tmq->commitCbUserParam);
+ } else if (!pParamSet->automatic && pParamSet->userCb) {
+ // sem post
+ pParamSet->userCb(tmq, pParamSet->rspErr, pParamSet->userParam);
+ }
+ taosMemoryFree(pParamSet);
+ } else {
+ tsem_post(&pParamSet->rspSem);
+ }
+
+#if 0
+ taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
+ taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
+#endif
+ return 0;
+}
+
int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
@@ -392,25 +429,7 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
ASSERT(waitingRspNum >= 0);
if (waitingRspNum == 0) {
- // if no more waiting rsp
- if (pParamSet->async) {
- // call async cb func
- if (pParamSet->automatic && pParamSet->tmq->commitCb) {
- pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->tmq->commitCbUserParam);
- } else if (!pParamSet->automatic && pParamSet->userCb) {
- // sem post
- pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->userParam);
- }
- } else {
- tsem_post(&pParamSet->rspSem);
- }
-
- taosMemoryFree(pParamSet);
-
-#if 0
- taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
- taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
-#endif
+ tmqCommitDone(pParamSet);
}
return 0;
}
@@ -504,7 +523,8 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
pParamSet->automatic = 0;
pParamSet->async = async;
pParamSet->userCb = userCb;
@@ -565,13 +585,19 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
+
pParamSet->automatic = automatic;
pParamSet->async = async;
pParamSet->userCb = userCb;
pParamSet->userParam = userParam;
tsem_init(&pParamSet->rspSem, 0, 0);
+ // init as 1 to prevent concurrency issue
+ pParamSet->waitingRspNum = 1;
+
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
@@ -600,10 +626,17 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
return 0;
}
+ int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
+ ASSERT(waitingRspNum >= 0);
+ if (waitingRspNum == 0) {
+ tmqCommitDone(pParamSet);
+ }
+
if (!async) {
tsem_wait(&pParamSet->rspSem);
code = pParamSet->rspErr;
tsem_destroy(&pParamSet->rspSem);
+ taosMemoryFree(pParamSet);
} else {
code = 0;
}
@@ -616,38 +649,50 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
}
}
- if (!async) {
#if 0
+ if (!async) {
taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
-#endif
}
+#endif
return 0;
}
void tmqAssignAskEpTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__COMMIT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__COMMIT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedReportTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__REPORT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__REPORT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
@@ -656,8 +701,11 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
}
void tmqSendHbReq(void* param, void* tmrId) {
- // TODO replace with ref
- tmq_t* tmq = (tmq_t*)param;
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq == NULL) {
+ return;
+ }
int64_t consumerId = tmq->consumerId;
int32_t epoch = tmq->epoch;
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
@@ -687,7 +735,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
- taosTmrReset(tmqSendHbReq, 1000, tmq, tmqMgmt.timer, &tmq->hbLiveTimer);
+ taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
}
int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
@@ -700,10 +748,18 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) {
tmqAskEp(tmq, true);
- taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
- taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
ASSERT(0);
@@ -738,7 +794,6 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
int32_t tmqSubscribeCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param;
pParam->rspErr = code;
- /*tmq_t* tmq = pParam->tmq;*/
tsem_post(&pParam->rspSem);
return 0;
}
@@ -761,40 +816,27 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
return rsp;
}
-#if 0
-tmq_t* tmq_consumer_new(void* conn, tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
- tmq_t* pTmq = taosMemoryCalloc(sizeof(tmq_t), 1);
- if (pTmq == NULL) {
- return NULL;
- }
- pTmq->pTscObj = (STscObj*)conn;
- pTmq->status = 0;
- pTmq->pollCnt = 0;
- pTmq->epoch = 0;
- pTmq->epStatus = 0;
- pTmq->epSkipCnt = 0;
- // set conf
- strcpy(pTmq->clientId, conf->clientId);
- strcpy(pTmq->groupId, conf->groupId);
- pTmq->autoCommit = conf->autoCommit;
- pTmq->commit_cb = conf->commit_cb;
- pTmq->resetOffsetCfg = conf->resetOffset;
+void tmqFreeImpl(void* handle) {
+ tmq_t* tmq = (tmq_t*)handle;
- pTmq->consumerId = generateRequestId() & (((uint64_t)-1) >> 1);
- pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic));
- if (pTmq->clientTopics == NULL) {
- taosMemoryFree(pTmq);
- return NULL;
- }
-
- pTmq->mqueue = taosOpenQueue();
- pTmq->qall = taosAllocateQall();
+ // TODO stop timer
+ if (tmq->mqueue) taosCloseQueue(tmq->mqueue);
+ if (tmq->delayedTask) taosCloseQueue(tmq->delayedTask);
+ if (tmq->qall) taosFreeQall(tmq->qall);
- tsem_init(&pTmq->rspSem, 0, 0);
+ tsem_destroy(&tmq->rspSem);
- return pTmq;
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
+ }
+ taosArrayDestroy(tmq->clientTopics);
+ taos_close_internal(tmq->pTscObj);
+ taosMemoryFree(tmq);
}
-#endif
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init timer
@@ -806,6 +848,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+ tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
}
tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t));
@@ -874,8 +917,17 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
goto FAIL;
}
+ pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq);
+ if (pTmq->refId < 0) {
+ tmqFreeImpl(pTmq);
+ return NULL;
+ }
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = pTmq->refId;
+
if (pTmq->hbBgEnable) {
- pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pTmq, tmqMgmt.timer);
+ pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId);
@@ -933,7 +985,8 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
SMqSubscribeCbParam param = {
.rspErr = 0,
- .tmq = tmq,
+ .refId = tmq->refId,
+ .epoch = tmq->epoch,
};
if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL;
@@ -975,12 +1028,16 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
// init ep timer
if (tmq->epTimer == NULL) {
- tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer);
+ int64_t* pRefId1 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId1 = tmq->refId;
+ tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, pRefId1, tmqMgmt.timer);
}
// init auto commit timer
if (tmq->autoCommit && tmq->commitTimer == NULL) {
- tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer);
+ int64_t* pRefId2 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId2 = tmq->refId;
+ tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer);
}
code = 0;
@@ -1002,9 +1059,18 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqPollCbParam* pParam = (SMqPollCbParam*)param;
SMqClientVg* pVg = pParam->pVg;
SMqClientTopic* pTopic = pParam->pTopic;
- tmq_t* tmq = pParam->tmq;
- int32_t vgId = pParam->vgId;
- int32_t epoch = pParam->epoch;
+
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+ if (tmq == NULL) {
+ tsem_destroy(&pParam->rspSem);
+ taosMemoryFree(pParam);
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ int32_t epoch = pParam->epoch;
+ int32_t vgId = pParam->vgId;
taosMemoryFree(pParam);
if (code != 0) {
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
@@ -1129,7 +1195,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
SMqClientTopic topic = {0};
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
topic.schema = pTopicEp->schema;
- topic.topicName = strdup(pTopicEp->topic);
+ tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
@@ -1158,7 +1224,16 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
}
taosArrayPush(newTopics, &topic);
}
- if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics);
+ if (tmq->clientTopics) {
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
+ }
+ taosArrayDestroy(tmq->clientTopics);
+ }
taosHashCleanup(pHash);
tmq->clientTopics = newTopics;
@@ -1173,8 +1248,20 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
- tmq_t* tmq = pParam->tmq;
int8_t async = pParam->async;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+
+ if (tmq == NULL) {
+ if (!async) {
+ tsem_destroy(&pParam->rspSem);
+ } else {
+ taosMemoryFree(pParam);
+ }
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
pParam->code = code;
if (code != 0) {
tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d", tmq->consumerId, pParam->async);
@@ -1254,7 +1341,8 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
/*atomic_store_8(&tmq->epStatus, 0);*/
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
pParam->async = async;
tsem_init(&pParam->rspSem, 0, 0);
@@ -1294,31 +1382,6 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
return code;
}
-#if 0
-int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
- const SMqOffset* pOffset = &offset->offset;
- if (strcmp(pOffset->cgroup, tmq->groupId) != 0) {
- return TMQ_RESP_ERR__FAIL;
- }
- int32_t sz = taosArrayGetSize(tmq->clientTopics);
- for (int32_t i = 0; i < sz; i++) {
- SMqClientTopic* clientTopic = taosArrayGet(tmq->clientTopics, i);
- if (strcmp(clientTopic->topicName, pOffset->topicName) == 0) {
- int32_t vgSz = taosArrayGetSize(clientTopic->vgs);
- for (int32_t j = 0; j < vgSz; j++) {
- SMqClientVg* pVg = taosArrayGet(clientTopic->vgs, j);
- if (pVg->vgId == pOffset->vgId) {
- pVg->currentOffset = pOffset->offset;
- tmqClearUnhandleMsg(tmq);
- return TMQ_RESP_ERR__SUCCESS;
- }
- }
- }
- }
- return TMQ_RESP_ERR__FAIL;
-}
-#endif
-
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
if (pReq == NULL) {
@@ -1412,11 +1475,12 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
tsem_post(&tmq->rspSem);
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
+
pParam->pVg = pVg;
pParam->pTopic = pTopic;
pParam->vgId = pVg->vgId;
- pParam->epoch = tmq->epoch;
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
@@ -1556,7 +1620,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
}
#endif
- // in no topic status also need process delayed task
+ // in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
return NULL;
}
@@ -1621,7 +1685,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
/*return rsp;*/
return 0;
}
- // TODO: free resources
+ taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
return 0;
}
@@ -1697,1610 +1761,6 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return NULL;
}
-static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
- int8_t t) {
- char* string = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
-
- // char uid[32] = {0};
- // sprintf(uid, "%"PRIi64, id);
- // cJSON* id_ = cJSON_CreateString(uid);
- // cJSON_AddItemToObject(json, "id", id_);
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
- cJSON_AddItemToObject(json, "tableType", tableType);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* columns = cJSON_CreateArray();
- for (int i = 0; i < schemaRow->nCols; i++) {
- cJSON* column = cJSON_CreateObject();
- SSchema* s = schemaRow->pSchema + i;
- cJSON* cname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(column, "name", cname);
- cJSON* ctype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(column, "type", ctype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- }
- cJSON_AddItemToArray(columns, column);
- }
- cJSON_AddItemToObject(json, "columns", columns);
-
- cJSON* tags = cJSON_CreateArray();
- for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
- cJSON* tag = cJSON_CreateObject();
- SSchema* s = schemaTag->pSchema + i;
- cJSON* tname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(tag, "name", tname);
- cJSON* ttype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(tag, "type", ttype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- }
- cJSON_AddItemToArray(tags, tag);
- }
- cJSON_AddItemToObject(json, "tags", tags);
-
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- return string;
-}
-
-static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
- SMAlterStbReq req = {0};
- cJSON* json = NULL;
- char* string = NULL;
-
- if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
- goto end;
- }
-
- json = cJSON_CreateObject();
- if (json == NULL) {
- goto end;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- SName name = {0};
- tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- cJSON* tableName = cJSON_CreateString(name.tname);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* alterType = cJSON_CreateNumber(req.alterType);
- cJSON_AddItemToObject(json, "alterType", alterType);
- switch (req.alterType) {
- case TSDB_ALTER_TABLE_ADD_TAG:
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_TAG:
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
- TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
- cJSON* colName = cJSON_CreateString(oldField->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(newField->name);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-end:
- cJSON_Delete(json);
- tFreeSMAltertbReq(&req);
- return string;
-}
-
-static char* processCreateStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* processAlterStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
- char* string = NULL;
- SArray* pTagVals = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
- // char cid[32] = {0};
- // sprintf(cid, "%"PRIi64, id);
- // cJSON* cid_ = cJSON_CreateString(cid);
- // cJSON_AddItemToObject(json, "id", cid_);
-
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("child");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* using = cJSON_CreateString(sname);
- cJSON_AddItemToObject(json, "using", using);
- cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
- cJSON_AddItemToObject(json, "tagNum", tagNumJson);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* tags = cJSON_CreateArray();
- int32_t code = tTagToValArray(pTag, &pTagVals);
- if (code) {
- goto end;
- }
-
- if (tTagIsJson(pTag)) {
- STag* p = (STag*)pTag;
- if (p->nTag == 0) {
- goto end;
- }
- char* pJson = parseTagDatatoJson(pTag);
- cJSON* tag = cJSON_CreateObject();
- STagVal* pTagVal = taosArrayGet(pTagVals, 0);
-
- char* ptname = taosArrayGet(tagName, 0);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid_ = cJSON_CreateString("");
- // cJSON_AddItemToObject(tag, "cid", cid_);
- cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
- cJSON_AddItemToObject(tag, "type", ttype);
- cJSON* tvalue = cJSON_CreateString(pJson);
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- taosMemoryFree(pJson);
- goto end;
- }
-
- for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
- STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
-
- cJSON* tag = cJSON_CreateObject();
-
- char* ptname = taosArrayGet(tagName, i);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
- // cJSON_AddItemToObject(tag, "cid", cid);
- cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
- cJSON_AddItemToObject(tag, "type", ttype);
-
- cJSON* tvalue = NULL;
- if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
- if (!buf) goto end;
- dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
- tvalue = cJSON_CreateString(buf);
- taosMemoryFree(buf);
- } else {
- double val = 0;
- GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
- tvalue = cJSON_CreateNumber(val);
- }
-
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- }
-
-end:
- cJSON_AddItemToObject(json, "tags", tags);
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- taosArrayDestroy(pTagVals);
- return string;
-}
-
-static char* processCreateTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVCreateTbBatchReq req = {0};
- SVCreateTbReq* pCreateReq;
- char* string = NULL;
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
- if (pCreateReq->type == TSDB_CHILD_TABLE) {
- string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
- pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
- } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
- string =
- buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
- }
- }
-
- tDecoderClear(&decoder);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processAlterTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVAlterTbReq vAlterTbReq = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
- cJSON_AddItemToObject(json, "alterType", alterType);
-
- switch (vAlterTbReq.action) {
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
- cJSON_AddItemToObject(json, "colType", colType);
- if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
- cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
- cJSON_AddItemToObject(json, "colName", tagName);
-
- bool isNull = vAlterTbReq.isNull;
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
- if (jsonTag->nTag == 0) isNull = true;
- }
- if (!isNull) {
- char* buf = NULL;
-
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
- buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
- } else {
- buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
- dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
- }
-
- cJSON* colValue = cJSON_CreateString(buf);
- cJSON_AddItemToObject(json, "colValue", colValue);
- taosMemoryFree(buf);
- }
-
- cJSON* isNullCJson = cJSON_CreateBool(isNull);
- cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropSTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropStbReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- cJSON* tableName = cJSON_CreateString(req.name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropTbBatchReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- // cJSON* tableType = cJSON_CreateString("normal");
- // cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* tableNameList = cJSON_CreateArray();
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- SVDropTbReq* pDropTbReq = req.pReqs + iReq;
-
- cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
- cJSON_AddItemToArray(tableNameList, tableName);
- }
- cJSON_AddItemToObject(json, "tableNameList", tableNameList);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- SMCreateStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
- // build create stable
- pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
- SSchema* pSchema = req.schemaRow.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
- SSchema* pSchema = req.schemaTag.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pTags, &field);
- }
-
- pReq.colVer = req.schemaRow.version;
- pReq.tagVer = req.schemaTag.version;
- pReq.numOfColumns = req.schemaRow.nCols;
- pReq.numOfTags = req.schemaTag.nCols;
- pReq.commentLen = -1;
- pReq.suid = req.suid;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.igExists = true;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName;
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_CREATE_STB;
- pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tFreeSMCreateStbReq(&pReq);
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropStbReq req = {0};
- SDecoder coder;
- SMDropStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // build drop stable
- pReq.igNotExists = true;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.suid = req.suid;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName = {0};
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_DROP_STB;
- pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- return code;
-}
-
-typedef struct SVgroupCreateTableBatch {
- SVCreateTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupCreateTableBatch;
-
-static void destroyCreateTbReqBatch(void* data) {
- SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVCreateTbReq* pCreateReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
- taosArrayPush(pRequest->tableList, &pName);
-
- SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupCreateTableBatch tBatch = {0};
- tBatch.info = pInfo;
- strcpy(tBatch.dbName, pRequest->pDb);
-
- tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
- taosArrayPush(tBatch.req.pArray, pCreateReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pCreateReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_CREATE_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
-
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct SVgroupDropTableBatch {
- SVDropTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupDropTableBatch;
-
-static void destroyDropTbReqBatch(void* data) {
- SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVDropTbReq* pDropReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pDropReq = req.pReqs + iReq;
- pDropReq->igNotExists = true;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- taosArrayPush(pRequest->tableList, &pName);
- SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupDropTableBatch tBatch = {0};
- tBatch.info = pInfo;
- tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
- taosArrayPush(tBatch.req.pArray, pDropReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pDropReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_DROP_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-// delete from db.tabl where .. -> delete from tabl where ..
-// delete from db .tabl where .. -> delete from tabl where ..
-// static void getTbName(char *sql){
-// char *ch = sql;
-//
-// bool inBackQuote = false;
-// int8_t dotIndex = 0;
-// while(*ch != '\0'){
-// if(!inBackQuote && *ch == '`'){
-// inBackQuote = true;
-// ch++;
-// continue;
-// }
-//
-// if(inBackQuote && *ch == '`'){
-// inBackQuote = false;
-// ch++;
-//
-// continue;
-// }
-//
-// if(!inBackQuote && *ch == '.'){
-// dotIndex ++;
-// if(dotIndex == 2){
-// memmove(sql, ch + 1, strlen(ch + 1) + 1);
-// break;
-// }
-// }
-// ch++;
-// }
-//}
-
-static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
- SDeleteRes req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
-
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeDeleteRes(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // getTbName(req.tableFName);
- char sql[256] = {0};
- sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
- req.skey, req.tsColName, req.ekey);
- printf("delete sql:%s\n", sql);
-
- TAOS_RES* res = taos_query(taos, sql);
- SRequestObj* pRequest = (SRequestObj*)res;
- code = pRequest->code;
- if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
- taos_free_result(res);
-
-end:
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVAlterTbReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SArray* pArray = NULL;
- SVgDataBlocks* pVgData = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
-
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
- if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pArray = taosArrayInit(1, sizeof(void*));
- if (NULL == pArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == pVgData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pVgData->vg = pInfo;
- pVgData->pData = taosMemoryMalloc(metaLen);
- if (NULL == pVgData->pData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- memcpy(pVgData->pData, meta, metaLen);
- ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
- pVgData->size = metaLen;
- pVgData->numOfTables = 1;
- taosArrayPush(pArray, &pVgData);
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_ALTER_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
-
- pVgData = NULL;
- pArray = NULL;
- code = pRequest->code;
- if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SExecResult* pRes = &pRequest->body.resInfo.execRes;
- if (pRes->res != NULL) {
- code = handleAlterTbExecRes(pRes->res, pCatalog);
- }
- }
-end:
- taosArrayDestroy(pArray);
- if (pVgData) taosMemoryFreeClear(pVgData->pData);
- taosMemoryFreeClear(pVgData);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct {
- SVgroupInfo vg;
- void* data;
-} VgData;
-
-static void destroyVgHash(void* data) {
- VgData* vgData = (VgData*)data;
- taosMemoryFreeClear(vgData->data);
-}
-
-int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
- int32_t code = TSDB_CODE_SUCCESS;
- STableMeta* pTableMeta = NULL;
- SQuery* pQuery = NULL;
-
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- code = terrno;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbname);
-
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- SVgroupInfo vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
- goto end;
- }
-
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < numOfCols; i++) {
- SSchema* schema = pTableMeta->schema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
- SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pTableMeta->sversion);
- tdSRowSetTpInfo(&rb, numOfCols, fLen);
- int32_t dataLen = 0;
-
- char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
- int32_t* colLength = (int32_t*)pStart;
- pStart += sizeof(int32_t) * numOfCols;
-
- SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
- pCol[i].offset = (int32_t*)pStart;
- pStart += rows * sizeof(int32_t);
- } else {
- pCol[i].nullbitmap = pStart;
- pStart += BitmapLen(rows);
- }
-
- pCol[i].pData = pStart;
- pStart += colLength[i];
- }
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
- int32_t offset = 0;
- for (int32_t k = 0; k < numOfCols; k++) {
- const SSchema* pColumn = &pTableMeta->schema[k];
-
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- if (pCol[k].offset[j] != -1) {
- char* data = pCol[k].pData + pCol[k].offset[j];
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
-
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- } else {
- if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
- char* data = pCol[k].pData + pColumn->bytes * j;
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- }
-
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- taosMemoryFree(pCol);
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pTableMeta->sversion);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks = 1;
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
- nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
-
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vgData;
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- subReq = NULL; // no need free
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- taosMemoryFreeClear(pTableMeta);
- qDestroyQuery(pQuery);
- return code;
-}
-
-static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
- int32_t code = TSDB_CODE_SUCCESS;
- SHashObj* pVgHash = NULL;
- SQuery* pQuery = NULL;
- SMqRspObj rspObj = {0};
- SDecoder decoder = {0};
-
- terrno = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- return terrno;
- }
-
- rspObj.resIter = -1;
- rspObj.resType = RES_TYPE__TMQ;
-
- tDecoderInit(&decoder, data, dataLen);
- code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
- if (code != 0) {
- uError("WriteRaw:decode smqDataRsp error");
- code = TSDB_CODE_INVALID_MSG;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
- taosHashSetFreeFp(pVgHash, destroyVgHash);
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- printf("raw data block num:%d\n", rspObj.rsp.blockNum);
- while (++rspObj.resIter < rspObj.rsp.blockNum) {
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
- if (!rspObj.rsp.withSchema) {
- uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
- goto end;
- }
- SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
- setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
-
- code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: setQueryResultFromRsp error");
- goto end;
- }
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t rows = rspObj.resInfo.numOfRows;
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
- if (!tbName) {
- uError("WriteRaw: tbname is null");
- code = TSDB_CODE_TMQ_INVALID_MSG;
- goto end;
- }
-
- printf("raw data tbname:%s\n", tbName);
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbName);
-
- VgData vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
- goto end;
- }
-
- SSubmitReq* subReq = NULL;
- SSubmitBlk* blk = NULL;
- void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
- if (hData) {
- vgData = *(VgData*)hData;
-
- int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
- void* tmp = taosMemoryRealloc(vgData.data, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- ((VgData*)hData)->data = tmp;
- subReq = (SSubmitReq*)(vgData.data);
- blk = POINTER_SHIFT(vgData.data, subReq->length);
- } else {
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- void* tmp = taosMemoryCalloc(1, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
- subReq = (SSubmitReq*)(vgData.data);
- subReq->length = sizeof(SSubmitReq);
- subReq->numOfBlocks = 0;
-
- blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
- }
-
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
-
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
-
- doSetOneRowPtr(&rspObj.resInfo);
- rspObj.resInfo.current += 1;
-
- int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- } else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
- }
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- }
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks++;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
-
- int32_t numOfVg = taosHashGetSize(pVgHash);
- nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
-
- VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
- while (vData) {
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vData->vg;
- SSubmitReq* subReq = (SSubmitReq*)(vData->data);
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- vData->data = NULL; // no need free
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
- vData = (VgData*)taosHashIterate(pVgHash, vData);
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- tDecoderClear(&decoder);
- taos_free_result(&rspObj);
- qDestroyQuery(pQuery);
- destroyRequest(pRequest);
- taosHashCleanup(pVgHash);
- return code;
-}
-
-char* tmq_get_json_meta(TAOS_RES* res) {
- if (!TD_RES_TMQ_META(res)) {
- return NULL;
- }
-
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
- return processCreateStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
- return processAlterStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
- return processDropSTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
- return processCreateTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
- return processAlterTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
- return processDropTable(&pMetaRspObj->metaRsp);
- }
- return NULL;
-}
-
-void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
-
-int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
- if (!raw || !res) {
- return TSDB_CODE_INVALID_PARA;
- }
- if (TD_RES_TMQ_META(res)) {
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- raw->raw = pMetaRspObj->metaRsp.metaRsp;
- raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
- raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
- } else if (TD_RES_TMQ(res)) {
- SMqRspObj* rspObj = ((SMqRspObj*)res);
-
- int32_t len = 0;
- int32_t code = 0;
- tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
- if (code < 0) {
- return -1;
- }
-
- void* buf = taosMemoryCalloc(1, len);
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, len);
- tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
- tEncoderClear(&encoder);
-
- raw->raw = buf;
- raw->raw_len = len;
- raw->raw_type = RES_TYPE__TMQ;
- } else {
- return TSDB_CODE_TMQ_INVALID_MSG;
- }
- return TSDB_CODE_SUCCESS;
-}
-
-void tmq_free_raw(tmq_raw_data raw) {
- if (raw.raw_type == RES_TYPE__TMQ) {
- taosMemoryFree(raw.raw);
- }
-}
-
-int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
- if (!taos) {
- return TSDB_CODE_INVALID_PARA;
- }
-
- if (raw.raw_type == TDMT_VND_CREATE_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_STB) {
- return taosDropStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
- return taosCreateTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
- return taosAlterTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
- return taosDropTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DELETE) {
- return taosDeleteData(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == RES_TYPE__TMQ) {
- return tmqWriteRaw(taos, raw.raw, raw.raw_len);
- }
- return TSDB_CODE_INVALID_PARA;
-}
-
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
//
tmqCommitInner(tmq, msg, 0, 1, cb, param);
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 2249bc7823a49589e99f1714d06401b419c3d72d..308afd467f1248c14dac9d8abea638cb42444936 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -22,6 +22,7 @@ extern "C" {
#include "catalog.h"
#include "os.h"
+#include "parser.h"
#include "query.h"
#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
@@ -44,18 +45,37 @@ typedef struct SParseTablesMetaReq {
SHashObj* pTables;
} SParseTablesMetaReq;
+typedef enum ECatalogReqType {
+ CATALOG_REQ_TYPE_META = 1,
+ CATALOG_REQ_TYPE_VGROUP,
+ CATALOG_REQ_TYPE_BOTH
+} ECatalogReqType;
+
+typedef struct SInsertTablesMetaReq {
+ char dbFName[TSDB_DB_FNAME_LEN];
+ SArray* pTableMetaPos;
+ SArray* pTableMetaReq; // element is SName
+ SArray* pTableVgroupPos;
+ SArray* pTableVgroupReq; // element is SName
+} SInsertTablesMetaReq;
+
typedef struct SParseMetaCache {
- SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
- SHashObj* pDbVgroup; // key is dbFName, element is SArray*
- SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
- SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
- SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
- SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
- SHashObj* pUdf; // key is funcName, element is SFuncInfo*
- SHashObj* pTableIndex; // key is tbFName, element is SArray*
- SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
- SArray* pDnodes; // element is SEpSet
- bool dnodeRequired;
+ SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
+ SHashObj* pDbVgroup; // key is dbFName, element is SArray*
+ SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
+ SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
+ SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
+ SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
+ SHashObj* pUdf; // key is funcName, element is SFuncInfo*
+ SHashObj* pTableIndex; // key is tbFName, element is SArray*
+ SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
+ SArray* pDnodes; // element is SEpSet
+ bool dnodeRequired;
+ SHashObj* pInsertTables; // key is dbName, element is SInsertTablesMetaReq*, for insert
+ const char* pUser;
+ const SArray* pTableMetaData; // pRes = STableMeta*
+ const SArray* pTableVgroupData; // pRes = SVgroupInfo*
+ int32_t sqlTableNum;
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
@@ -72,8 +92,9 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
@@ -100,6 +121,12 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun
int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes);
int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput);
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache);
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta);
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup);
void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
#ifdef __cplusplus
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index b7532173c8380e9628fa504c9ce476f4884967fc..31ae35e7177d55397a6000a86712b977962942dd 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -73,6 +73,9 @@ typedef struct SInsertParseContext {
SStmtCallback* pStmtCb;
SParseMetaCache* pMetaCache;
char sTableName[TSDB_TABLE_NAME_LEN];
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW];
+ int64_t memElapsed;
+ int64_t parRowElapsed;
} SInsertParseContext;
typedef struct SInsertParseSyntaxCxt {
@@ -203,10 +206,11 @@ static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass)
return catalogChkAuth(pBasicCtx->pCatalog, &conn, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass);
}
-static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) {
+static int32_t getTableSchema(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, bool isStb,
+ STableMeta** pTableMeta) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta);
+ return getTableMetaFromCacheForInsert(pBasicCtx->pTableMetaPos, pCxt->pMetaCache, tbNo, pTableMeta);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -219,10 +223,10 @@ static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool is
return catalogGetTableMeta(pBasicCtx->pCatalog, &conn, pTbName, pTableMeta);
}
-static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) {
+static int32_t getTableVgroup(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, SVgroupInfo* pVg) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg);
+ return getTableVgroupFromCacheForInsert(pBasicCtx->pTableVgroupPos, pCxt->pMetaCache, tbNo, pVg);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -231,28 +235,22 @@ static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroup
return catalogGetTableHashVgroup(pBasicCtx->pCatalog, &conn, pTbName, pVg);
}
-static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
- bool pass = false;
- CHECK_CODE(checkAuth(pCxt, dbFname, &pass));
- if (!pass) {
- return TSDB_CODE_PAR_PERMISSION_DENIED;
- }
-
- CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta));
+static int32_t getTableMetaImpl(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname, bool isStb) {
+ CHECK_CODE(getTableSchema(pCxt, tbNo, name, isStb, &pCxt->pTableMeta));
if (!isStb) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, name, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
return TSDB_CODE_SUCCESS;
}
-static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, false);
+static int32_t getTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, false);
}
-static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, true);
+static int32_t getSTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, true);
}
static int32_t getDBCfg(SInsertParseContext* pCxt, const char* pDbFName, SDbCfgInfo* pInfo) {
@@ -1028,13 +1026,13 @@ end:
return code;
}
-static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName,
- int32_t len, STableMeta* pMeta) {
+static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, int32_t tbNo, SName* pTableName,
+ const char* pName, int32_t len, STableMeta* pMeta) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, pTableName, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
- pMeta->uid = 0;
+ pMeta->uid = tbNo;
pMeta->vgId = vg.vgId;
pMeta->tableType = TSDB_CHILD_TABLE;
@@ -1084,7 +1082,7 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
}
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
-static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tbFName) {
+static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
STableMeta** pMeta = taosHashGet(pCxt->pSubTableHashObj, tbFName, len);
if (NULL != pMeta) {
@@ -1102,11 +1100,11 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
tNameGetFullDbName(&sname, dbFName);
strcpy(pCxt->sTableName, sname.tname);
- CHECK_CODE(getSTableMeta(pCxt, &sname, dbFName));
+ CHECK_CODE(getSTableMeta(pCxt, tbNo, &sname, dbFName));
if (TSDB_SUPER_TABLE != pCxt->pTableMeta->tableType) {
return buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed");
}
- CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, name, tbFName, len, pCxt->pTableMeta));
+ CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, tbNo, name, tbFName, len, pCxt->pTableMeta));
SSchema* pTagsSchema = getTableTagSchema(pCxt->pTableMeta);
setBoundColumnInfo(&pCxt->tags, pTagsSchema, getNumOfTags(pCxt->pTableMeta));
@@ -1195,7 +1193,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
tdSRowEnd(pBuilder);
*gotRow = true;
-
+
#ifdef TD_DEBUG_PRINT_ROW
STSchema* pSTSchema = tdGetSTSChemaFromSSChema(schema, spd->numOfCols, 1);
tdSRowPrint(row, pSTSchema, __func__);
@@ -1214,7 +1212,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
CHECK_CODE(initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo));
(*numOfRows) = 0;
- char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
+ // char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
SToken sToken;
while (1) {
int32_t index = 0;
@@ -1232,7 +1230,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
}
bool gotRow = false;
- CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, tmpTokenBuf));
+ CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, pCxt->tmpTokenBuf));
if (gotRow) {
pDataBlock->size += extendedRowSize; // len;
}
@@ -1347,7 +1345,9 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
}
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
- taosMemoryFreeClear(pCxt->pTableMeta);
+ if (!pCxt->pComCxt->async) {
+ taosMemoryFreeClear(pCxt->pTableMeta);
+ }
destroyBoundColumnInfo(&pCxt->tags);
tdDestroySVCreateTbReq(&pCxt->createTblReq);
}
@@ -1365,6 +1365,20 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
destroyBlockArrayList(pCxt->pVgDataBlocks);
}
+static int32_t parseTableName(SInsertParseContext* pCxt, SToken* pTbnameToken, SName* pName, char* pDbFName,
+ char* pTbFName) {
+ int32_t code = createSName(pName, pTbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameExtractFullName(pName, pTbFName);
+ code = taosHashPut(pCxt->pTableNameHashObj, pTbFName, strlen(pTbFName), pName, sizeof(SName));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameGetFullDbName(pName, pDbFName);
+ code = taosHashPut(pCxt->pDbFNameHashObj, pDbFName, strlen(pDbFName), pDbFName, TSDB_DB_FNAME_LEN);
+ }
+ return code;
+}
+
// tb_name
// [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
// [(field1_name, ...)]
@@ -1372,7 +1386,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
// [...];
static int32_t parseInsertBody(SInsertParseContext* pCxt) {
int32_t tbNum = 0;
+ SName name;
char tbFName[TSDB_TABLE_FNAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
bool autoCreateTbl = false;
// for each table
@@ -1415,20 +1431,15 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
SToken tbnameToken = sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
- SName name;
- CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
-
- tNameExtractFullName(&name, tbFName);
- CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName)));
- char dbFName[TSDB_DB_FNAME_LEN];
- tNameGetFullDbName(&name, dbFName);
- CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName)));
+ if (!pCxt->pComCxt->async || TK_USING == sToken.type) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
bool existedUsing = false;
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
}
@@ -1438,22 +1449,31 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
// pSql -> field1_name, ...)
pBoundColsStart = pCxt->pSql;
CHECK_CODE(ignoreBoundColumns(pCxt));
- // CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_USING == sToken.type) {
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else if (!existedUsing) {
- CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
+ CHECK_CODE(getTableMeta(pCxt, tbNum, &name, dbFName));
}
STableDataBlocks* dataBuf = NULL;
- CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
- sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
- &dataBuf, NULL, &pCxt->createTblReq));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, &pCxt->pTableMeta->uid, sizeof(pCxt->pTableMeta->uid),
+ TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL,
+ &pCxt->createTblReq));
+ } else {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
+ sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
+ &dataBuf, NULL, &pCxt->createTblReq));
+ }
if (NULL != pBoundColsStart) {
char* pCurrPos = pCxt->pSql;
@@ -1532,7 +1552,9 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
.totalNum = 0,
.pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT),
.pStmtCb = pContext->pStmtCb,
- .pMetaCache = pMetaCache};
+ .pMetaCache = pMetaCache,
+ .memElapsed = 0,
+ .parRowElapsed = 0};
if (pContext->pStmtCb && *pQuery) {
(*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj,
@@ -1547,7 +1569,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
} else {
context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
context.pTableBlockHashObj =
- taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
}
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj ||
@@ -1656,24 +1678,24 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return TSDB_CODE_SUCCESS;
}
-static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
- CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo,
+ pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
-static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
- bool hasData = false;
+ bool hasData = false;
+ int32_t tableNo = 0;
// for each table
while (1) {
SToken sToken;
@@ -1702,9 +1724,9 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
@@ -1717,15 +1739,17 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
if (TK_USING == sToken.type && !existedUsing) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
- } else {
- CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
+ } else if (!existedUsing) {
+ CHECK_CODE(collectTableMetaKey(pCxt, false, tableNo, &tbnameToken));
}
+ ++tableNo;
+
if (TK_VALUES == sToken.type) {
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE(skipValuesClause(pCxt));
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index ae5a281aab92ab5e365fe19e1769d95b2b43ea47..17e78e78061b69c9eff64ad6a5802369fefaf62d 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -476,9 +476,11 @@ static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) {
static int32_t buildTableReqFromDb(SHashObj* pDbsHash, SArray** pDbs) {
if (NULL != pDbsHash) {
- *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
if (NULL == *pDbs) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
SParseTablesMetaReq* p = taosHashIterate(pDbsHash, NULL);
while (NULL != p) {
@@ -530,7 +532,62 @@ static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
return TSDB_CODE_SUCCESS;
}
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+static int32_t buildCatalogReqForInsert(SParseContext* pCxt, const SParseMetaCache* pMetaCache,
+ SCatalogReq* pCatalogReq) {
+ int32_t ndbs = taosHashGetSize(pMetaCache->pInsertTables);
+ pCatalogReq->pTableMeta = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pTableHash = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableHash) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pUser = taosArrayInit(ndbs, sizeof(SUserAuthInfo));
+ if (NULL == pCatalogReq->pUser) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pCxt->pTableMetaPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+ pCxt->pTableVgroupPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+
+ int32_t metaReqNo = 0;
+ int32_t vgroupReqNo = 0;
+ SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL);
+ while (NULL != p) {
+ STablesReq req = {0};
+ strcpy(req.dbFName, p->dbFName);
+ TSWAP(req.pTables, p->pTableMetaReq);
+ taosArrayPush(pCatalogReq->pTableMeta, &req);
+
+ req.pTables = NULL;
+ TSWAP(req.pTables, p->pTableVgroupReq);
+ taosArrayPush(pCatalogReq->pTableHash, &req);
+
+ int32_t ntables = taosArrayGetSize(p->pTableMetaPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableMetaPos, *(int32_t*)taosArrayGet(p->pTableMetaPos, i), &metaReqNo);
+ ++metaReqNo;
+ }
+
+ ntables = taosArrayGetSize(p->pTableVgroupPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableVgroupPos, *(int32_t*)taosArrayGet(p->pTableVgroupPos, i), &vgroupReqNo);
+ ++vgroupReqNo;
+ }
+
+ SUserAuthInfo auth = {0};
+ strcpy(auth.user, pCxt->pUser);
+ strcpy(auth.dbFName, p->dbFName);
+ auth.type = AUTH_TYPE_WRITE;
+ taosArrayPush(pCatalogReq->pUser, &auth);
+
+ p = taosHashIterate(pMetaCache->pInsertTables, p);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildCatalogReqForQuery(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
int32_t code = buildTableReqFromDb(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = buildDbReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup);
@@ -560,6 +617,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
return code;
}
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ if (NULL != pMetaCache->pInsertTables) {
+ return buildCatalogReqForInsert(pCxt, pMetaCache, pCatalogReq);
+ }
+ return buildCatalogReqForQuery(pMetaCache, pCatalogReq);
+}
+
static int32_t putMetaDataToHash(const char* pKey, int32_t len, const SArray* pData, int32_t index, SHashObj** pHash) {
if (NULL == *pHash) {
*pHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -647,7 +711,8 @@ static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHas
return TSDB_CODE_SUCCESS;
}
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+int32_t putMetaDataToCacheForQuery(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
+ SParseMetaCache* pMetaCache) {
int32_t code = putDbTableDataToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, &pMetaCache->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = putDbDataToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, &pMetaCache->pDbVgroup);
@@ -677,6 +742,30 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
return code;
}
+int32_t putMetaDataToCacheForInsert(const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+ int32_t ndbs = taosArrayGetSize(pMetaData->pUser);
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SMetaRes* pRes = taosArrayGet(pMetaData->pUser, i);
+ if (TSDB_CODE_SUCCESS != pRes->code) {
+ return pRes->code;
+ }
+ if (!(*(bool*)pRes->pRes)) {
+ return TSDB_CODE_PAR_PERMISSION_DENIED;
+ }
+ }
+ pMetaCache->pTableMetaData = pMetaData->pTableMeta;
+ pMetaCache->pTableVgroupData = pMetaData->pTableHash;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt) {
+ if (insertValuesStmt) {
+ return putMetaDataToCacheForInsert(pMetaData, pMetaCache);
+ }
+ return putMetaDataToCacheForQuery(pCatalogReq, pMetaData, pMetaCache);
+}
+
static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) {
if (NULL == *pTables) {
*pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
@@ -977,6 +1066,82 @@ int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) {
return TSDB_CODE_SUCCESS;
}
+static int32_t reserveTableReqInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SInsertTablesMetaReq* pReq) {
+ switch (reqType) {
+ case CATALOG_REQ_TYPE_META:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_VGROUP:
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_BOTH:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ default:
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveTableReqInDbCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SHashObj* pDbs) {
+ SInsertTablesMetaReq req = {.pTableMetaReq = taosArrayInit(4, sizeof(SName)),
+ .pTableMetaPos = taosArrayInit(4, sizeof(int32_t)),
+ .pTableVgroupReq = taosArrayInit(4, sizeof(SName)),
+ .pTableVgroupPos = taosArrayInit(4, sizeof(int32_t))};
+ tNameGetFullDbName(pName, req.dbFName);
+ int32_t code = reserveTableReqInCacheForInsert(pName, reqType, tableNo, &req);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = taosHashPut(pDbs, pName->dbname, strlen(pName->dbname), &req, sizeof(SInsertTablesMetaReq));
+ }
+ return code;
+}
+
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pInsertTables) {
+ pMetaCache->pInsertTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pInsertTables) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ pMetaCache->sqlTableNum = tableNo;
+ SInsertTablesMetaReq* pReq = taosHashGet(pMetaCache->pInsertTables, pName->dbname, strlen(pName->dbname));
+ if (NULL == pReq) {
+ return reserveTableReqInDbCacheForInsert(pName, reqType, tableNo, pMetaCache->pInsertTables);
+ }
+ return reserveTableReqInCacheForInsert(pName, reqType, tableNo, pReq);
+}
+
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ *pMeta = pRes->pRes;
+ if (NULL == *pMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return pRes->code;
+}
+
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableVgroupPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableVgroupData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ memcpy(pVgroup, pRes->pRes, sizeof(SVgroupInfo));
+ }
+ return pRes->code;
+}
+
void destoryParseTablesMetaReqHash(SHashObj* pHash) {
SParseTablesMetaReq* p = taosHashIterate(pHash, NULL);
while (NULL != p) {
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 34cd783ace5c84608de6d62ae6b994c2fbb9e3c3..7e27132f3cbc453a5cf09bd487acc75fa546ff7e 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -185,7 +185,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
code = parseSqlSyntax(pCxt, pQuery, &metaCache);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildCatalogReq(&metaCache, pCatalogReq);
+ code = buildCatalogReq(pCxt, &metaCache, pCatalogReq);
}
destoryParseMetaCache(&metaCache, true);
terrno = code;
@@ -195,7 +195,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
- int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache);
+ int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pQuery->pRoot) {
code = parseInsertSql(pCxt, &pQuery, &metaCache);
diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp
index 7302491ba7b15daca8333c4b9870eb3615e0c015..ddf15ec67bf2b77edd2e1e622aad409a9ecc0e69 100644
--- a/source/libs/parser/test/parInsertTest.cpp
+++ b/source/libs/parser/test/parInsertTest.cpp
@@ -13,21 +13,13 @@
* along with this program. If not, see .
*/
-#include
-
#include
-#include "mockCatalogService.h"
-#include "os.h"
-#include "parInt.h"
+#include "parTestUtil.h"
using namespace std;
-using namespace std::placeholders;
-using namespace testing;
-namespace {
-string toString(int32_t code) { return tstrerror(code); }
-} // namespace
+namespace ParserTest {
// syntax:
// INSERT INTO
@@ -36,259 +28,60 @@ string toString(int32_t code) { return tstrerror(code); }
// [(field1_name, ...)]
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
-class InsertTest : public Test {
- protected:
- InsertTest() : res_(nullptr) {}
- ~InsertTest() { reset(); }
-
- void setDatabase(const string& acctId, const string& db) {
- acctId_ = acctId;
- db_ = db;
- }
-
- void bind(const char* sql) {
- reset();
- cxt_.acctId = atoi(acctId_.c_str());
- cxt_.db = (char*)db_.c_str();
- strcpy(sqlBuf_, sql);
- cxt_.sqlLen = strlen(sql);
- sqlBuf_[cxt_.sqlLen] = '\0';
- cxt_.pSql = sqlBuf_;
- }
-
- int32_t run() {
- code_ = parseInsertSql(&cxt_, &res_, nullptr);
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- }
- return code_;
- }
-
- int32_t runAsync() {
- cxt_.async = true;
- bool request = true;
- unique_ptr > metaCache(
- new SParseMetaCache(), std::bind(_destoryParseMetaCache, _1, cref(request)));
- code_ = parseInsertSyntax(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr catalogReq(new SCatalogReq(),
- MockCatalogService::destoryCatalogReq);
- code_ = buildCatalogReq(metaCache.get(), catalogReq.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData);
- g_mockCatalogService->catalogGetAllMeta(catalogReq.get(), metaData.get());
-
- metaCache.reset(new SParseMetaCache());
- request = false;
- code_ = putMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- code_ = parseInsertSql(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- return code_;
- }
-
- void dumpReslut() {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- cout << "payloadType:" << (int32_t)pStmt->payloadType << ", insertType:" << pStmt->insertType
- << ", numOfVgs:" << num << endl;
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- cout << "vgId:" << vg->vg.vgId << ", numOfTables:" << vg->numOfTables << ", dataSize:" << vg->size << endl;
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- cout << "length:" << ntohl(submit->length) << ", numOfBlocks:" << ntohl(submit->numOfBlocks) << endl;
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- cout << "Block:" << i << endl;
- cout << "\tuid:" << be64toh(blk->uid) << ", tid:" << be64toh(blk->suid) << ", sversion:" << ntohl(blk->sversion)
- << ", dataLen:" << ntohl(blk->dataLen) << ", schemaLen:" << ntohl(blk->schemaLen)
- << ", numOfRows:" << ntohl(blk->numOfRows) << endl;
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- void checkReslut(int32_t numOfTables, int32_t numOfRows1, int32_t numOfRows2 = -1) {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- ASSERT_EQ(pStmt->payloadType, PAYLOAD_TYPE_KV);
- ASSERT_EQ(pStmt->insertType, TSDB_QUERY_TYPE_INSERT);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- ASSERT_GE(num, 0);
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- ASSERT_EQ(vg->numOfTables, numOfTables);
- ASSERT_GE(vg->size, 0);
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- ASSERT_GE(ntohl(submit->length), 0);
- ASSERT_GE(ntohl(submit->numOfBlocks), 0);
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- ASSERT_EQ(ntohl(blk->numOfRows), (0 == i ? numOfRows1 : (numOfRows2 > 0 ? numOfRows2 : numOfRows1)));
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- private:
- static const int max_err_len = 1024;
- static const int max_sql_len = 1024 * 1024;
-
- static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
- destoryParseMetaCache(pMetaCache, request);
- delete pMetaCache;
- }
-
- void reset() {
- memset(&cxt_, 0, sizeof(cxt_));
- memset(errMagBuf_, 0, max_err_len);
- cxt_.pMsg = errMagBuf_;
- cxt_.msgLen = max_err_len;
- code_ = TSDB_CODE_SUCCESS;
- qDestroyQuery(res_);
- res_ = nullptr;
- }
-
- SVnodeModifOpStmt* getVnodeModifStmt(SQuery* pQuery) { return (SVnodeModifOpStmt*)pQuery->pRoot; }
-
- string acctId_;
- string db_;
- char errMagBuf_[max_err_len];
- char sqlBuf_[max_sql_len];
- SParseContext cxt_;
- int32_t code_;
- SQuery* res_;
-};
+class ParserInsertTest : public ParserTestBase {};
// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...)
-TEST_F(InsertTest, singleTableSingleRowTest) {
- setDatabase("root", "test");
-
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
+TEST_F(ParserInsertTest, singleTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)");
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
-
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO t1 (ts, c1, c2, c3, c4, c5) VALUES (now, 1, 'beijing', 3, 4, 5)");
}
// INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...)
-TEST_F(InsertTest, singleTableMultiRowTest) {
- setDatabase("root", "test");
-
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
- "(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
+TEST_F(ParserInsertTest, singleTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)"
+ "(now+1s, 2, 'shanghai', 6, 7, 8)"
"(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableSingleRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, multiTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 1);
-
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 VALUES (now, 1, 'beijing') st1s2 VALUES (now, 10, '131028')");
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableMultiRowTest) {
- setDatabase("root", "test");
-
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 3, 2);
+TEST_F(ParserInsertTest, multiTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou') "
+ "st1s2 VALUES (now, 10, '131028')(now+1s, 20, '132028')");
}
// INSERT INTO
// tb1_name USING st1_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
// tb2_name USING st2_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
-TEST_F(InsertTest, autoCreateTableTest) {
- setDatabase("root", "test");
-
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
+TEST_F(ParserInsertTest, autoCreateTableTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 USING st1 TAGS(1, 'wxy', now) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 (ts, c1, c2) USING st1 (tag1, tag2) TAGS(1, 'wxy') "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) values (now, 1, \"beijing\")"
- "st1s1 using st1 tags(1, 'wxy', now) values (now+1s, 2, \"shanghai\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) VALUES (now, 1, 'beijing') "
+ "st1s2 (ts, c1, c2) USING st1 TAGS(2, 'abc', now) VALUES (now+1s, 2, 'shanghai')");
}
-TEST_F(InsertTest, toleranceTest) {
- setDatabase("root", "test");
-
- bind("insert into");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
-
- bind("insert into");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
-}
+} // namespace ParserTest
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 3fe4b533e44fe70e8e999ef3cacd15715cd632dd..98281b7bf070095b4bb23326b156d5e8764690de 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -225,16 +225,17 @@ class ParserTestBaseImpl {
DO_WITH_THROW(collectMetaKey, pCxt, pQuery, pMetaCache);
}
- void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
- DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
+ void doBuildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ DO_WITH_THROW(buildCatalogReq, pCxt, pMetaCache, pCatalogReq);
}
void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) {
DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData);
}
- void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
- DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
+ void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool isInsertValues) {
+ DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache, isInsertValues);
}
void doAuthenticate(SParseContext* pCxt, SQuery* pQuery, SParseMetaCache* pMetaCache) {
@@ -261,7 +262,9 @@ class ParserTestBaseImpl {
void doParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq) {
DO_WITH_THROW(qParseSqlSyntax, pCxt, pQuery, pCatalogReq);
ASSERT_NE(*pQuery, nullptr);
- res_.parsedAst_ = toString((*pQuery)->pRoot);
+ if (nullptr != (*pQuery)->pRoot) {
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
}
void doAnalyseSqlSemantic(SParseContext* pCxt, const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
@@ -270,6 +273,17 @@ class ParserTestBaseImpl {
res_.calcConstAst_ = toString(pQuery->pRoot);
}
+ void doParseInsertSql(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSql, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
+
+ void doParseInsertSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSyntax, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ }
+
string toString(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
@@ -287,15 +301,20 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
+ if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseInsertSql(&cxt, query.get(), nullptr);
+ } else {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParse(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
- doAuthenticate(&cxt, pQuery, nullptr);
+ doAuthenticate(&cxt, pQuery, nullptr);
- doTranslate(&cxt, pQuery, nullptr);
+ doTranslate(&cxt, pQuery, nullptr);
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
if (g_dump) {
dump();
@@ -338,17 +357,22 @@ class ParserTestBaseImpl {
setParseContext(sql, &cxt, true);
unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
-
- bool request = true;
+ bool request = true;
unique_ptr > metaCache(
new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request)));
- doCollectMetaKey(&cxt, pQuery, metaCache.get());
+ bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen);
+ if (isInsertValues) {
+ doParseInsertSyntax(&cxt, query.get(), metaCache.get());
+ } else {
+ doParse(&cxt, query.get());
+ doCollectMetaKey(&cxt, *(query.get()), metaCache.get());
+ }
+
+ SQuery* pQuery = *(query.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- doBuildCatalogReq(metaCache.get(), catalogReq.get());
+ doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get());
string err;
thread t1([&]() {
@@ -358,13 +382,17 @@ class ParserTestBaseImpl {
metaCache.reset(new SParseMetaCache());
request = false;
- doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
+ doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues);
- doAuthenticate(&cxt, pQuery, metaCache.get());
+ if (isInsertValues) {
+ doParseInsertSql(&cxt, query.get(), metaCache.get());
+ } else {
+ doAuthenticate(&cxt, pQuery, metaCache.get());
- doTranslate(&cxt, pQuery, metaCache.get());
+ doTranslate(&cxt, pQuery, metaCache.get());
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 6e3067d44edc67f10944cdde2ffc72fbd4b57fea..7b06967940e42a8b0cef13a775fa64bfe512719c 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -624,6 +624,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file"
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
+TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
#ifdef TAOS_ERROR_C
};
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index 203541f14a49e27d8298cb6f21077bae8cfbc0b9..600c64b8e6ac0a521d3c736c3256c79dfcefbf8e 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -1120,7 +1120,7 @@ class Database:
@classmethod
def setupLastTick(cls):
# start time will be auto generated , start at 10 years ago local time
- local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
+ local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
local_epoch_time = [int(i) for i in local_time.split("-")]
#local_epoch_time will be such as : [2022, 7, 18]
diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py
index fd73f97fcbd0f241ac96a241ff3f912a67ce58d4..6a8a59a02700a4e15320532e0f20b2d4e0b0c336 100644
--- a/tests/pytest/crash_gen/shared/misc.py
+++ b/tests/pytest/crash_gen/shared/misc.py
@@ -46,7 +46,7 @@ class Logging:
@classmethod
def _get_datetime(cls):
- return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
+ return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
@classmethod
def getLogger(cls):