提交 da30668b 编写于 作者: C Cary Xu

Merge branch 'develop' into feature/TD-6117

...@@ -234,6 +234,7 @@ pipeline { ...@@ -234,6 +234,7 @@ pipeline {
cd ${WKC}/tests/examples/nodejs cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1 npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost node nodejsChecker.js host=localhost
node test1970.js
''' '''
sh ''' sh '''
cd ${WKC}/tests/examples/C#/taosdemo cd ${WKC}/tests/examples/C#/taosdemo
...@@ -256,14 +257,12 @@ pipeline { ...@@ -256,14 +257,12 @@ pipeline {
steps { steps {
pre_test() pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
timeout(time: 60, unit: 'MINUTES'){ timeout(time: 60, unit: 'MINUTES'){
sh ''' sh '''
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000 ./crash_gen.sh -a -p -t 4 -s 2000
''' '''
} }
}
timeout(time: 60, unit: 'MINUTES'){ timeout(time: 60, unit: 'MINUTES'){
// sh ''' // sh '''
// cd ${WKC}/tests/pytest // cd ${WKC}/tests/pytest
......
...@@ -208,7 +208,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ...@@ -208,7 +208,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。 请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。
...@@ -46,7 +46,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 ...@@ -46,7 +46,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
</tr> </tr>
</table> </table>
注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.1.8.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: 注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.2.0.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如:
```sql ```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
``` ```
......
...@@ -17,7 +17,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 ...@@ -17,7 +17,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 其中 ● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
注意: 注意:
...@@ -654,7 +654,7 @@ conn.close() ...@@ -654,7 +654,7 @@ conn.close()
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html) 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.1.8.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) 注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。)
### 安装 ### 安装
...@@ -695,7 +695,7 @@ http://<fqdn>:<port>/rest/sql/[db_name] ...@@ -695,7 +695,7 @@ http://<fqdn>:<port>/rest/sql/[db_name]
- fqnd: 集群中的任一台主机 FQDN 或 IP 地址 - fqnd: 集群中的任一台主机 FQDN 或 IP 地址
- port: 配置文件中 httpPort 配置项,缺省为 6041 - port: 配置文件中 httpPort 配置项,缺省为 6041
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.1.8.0 版本开始支持) - db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持)
例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。 例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。
...@@ -984,15 +984,18 @@ go build ...@@ -984,15 +984,18 @@ go build
### Go连接器的使用 ### Go连接器的使用
TDengine提供了GO驱动程序包`taosSql`.`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。 TDengine提供了GO驱动程序包`taosSql``taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。
```go ```go
import ( import (
"database/sql" "database/sql"
_ "github.com/taosdata/driver-go/taosSql" _ "github.com/taosdata/driver-go/v2/taosSql"
) )
``` ```
**提示**:下划线与双引号之间必须有一个空格。 **提示**:下划线与双引号之间必须有一个空格。
`taosSql` 的 v2 版本进行了重构,分离出内置数据库操作接口 `database/sql/driver` 到目录 `taosSql`;订阅、stmt等其他功能放到目录 `af`
### 常用API ### 常用API
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
......
...@@ -14,7 +14,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 ...@@ -14,7 +14,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html) **第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); **注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`);
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 **注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是修改 hosts 文件。
**第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; **第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
...@@ -79,13 +79,13 @@ Query OK, 1 row(s) in set (0.006385s) ...@@ -79,13 +79,13 @@ Query OK, 1 row(s) in set (0.006385s)
taos> taos>
``` ```
上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP 上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEp
## <a class="anchor" id="node-other"></a>启动后续数据节点 ## <a class="anchor" id="node-other"></a>启动后续数据节点
将后续的数据节点添加到现有集群,具体有以下几步: 将后续的数据节点添加到现有集群,具体有以下几步:
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) 1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEp参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令: 2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令:
...@@ -110,7 +110,7 @@ taos> ...@@ -110,7 +110,7 @@ taos>
**提示:** **提示:**
- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP - 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp
- firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。 - firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
- 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。 - 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
- 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群** - 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**
......
...@@ -800,7 +800,7 @@ taos -n sync -P 6042 -h <fqdn of server> ...@@ -800,7 +800,7 @@ taos -n sync -P 6042 -h <fqdn of server>
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP` `taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: 从 2.2.0.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
-n:设为“speed”时,表示对网络速度进行诊断。 -n:设为“speed”时,表示对网络速度进行诊断。
-h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 -h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
...@@ -813,7 +813,7 @@ taos -n sync -P 6042 -h <fqdn of server> ...@@ -813,7 +813,7 @@ taos -n sync -P 6042 -h <fqdn of server>
`taos -n fqdn -h <fqdn of server>` `taos -n fqdn -h <fqdn of server>`
从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: 从 2.2.0.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 -n:设为“fqdn”时,表示对 FQDN 解析进行诊断。
-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 -h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
......
...@@ -35,7 +35,7 @@ taos> DESCRIBE meters; ...@@ -35,7 +35,7 @@ taos> DESCRIBE meters;
- 内部函数 now 是客户端的当前时间 - 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。) - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。)
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度)
...@@ -713,21 +713,49 @@ Query OK, 1 row(s) in set (0.001091s) ...@@ -713,21 +713,49 @@ Query OK, 1 row(s) in set (0.001091s)
| <= | smaller than or equal to | **`timestamp`** and all numeric types | | <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types | | = | equal to | all types |
| <> | not equal to | all types | | <> | not equal to | all types |
| is [not] null | is null or is not null | all types |
| between and | within a certain range | **`timestamp`** and all numeric types | | between and | within a certain range | **`timestamp`** and all numeric types |
| in | match any value in a set | all types except first column `timestamp` | | in | match any value in a set | all types except first column `timestamp` |
| like | match a wildcard string | **`binary`** **`nchar`** | | like | match a wildcard string | **`binary`** **`nchar`** |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. like 算子使用通配符字符串进行匹配检查。 2. like 算子使用通配符字符串进行匹配检查。
* 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
* 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
* 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
* 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
* 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER --> 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
<a class="anchor" id="nested"></a>
### 嵌套查询
“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。
从 2.2.0.0 版本开始,TDengine 的查询引擎开始支持在 FROM 子句中使用非关联子查询(“非关联”的意思是,子查询不会用到父查询中的参数)。也即在普通 SELECT 语句的 tb_name_list 位置,用一个独立的 SELECT 语句来代替(这一 SELECT 语句被包含在英文圆括号内),于是完整的嵌套查询 SQL 语句形如:
```mysql
SELECT ... FROM (SELECT ... FROM ...) ...;
```
说明:
1. 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
2. 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
3. 目前不能在“连续查询”功能中使用子查询。
4. 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
5. 目前内层查询、外层查询均不支持 UNION 操作。
6. 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
* 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
7. 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
* 计算函数部分:
1. 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
2. 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
* 外层查询中不支持 IN 算子,但在内层中可以使用。
* 外层查询不支持 GROUP BY。
<a class="anchor" id="union"></a> <a class="anchor" id="union"></a>
### UNION ALL 操作符 ### UNION ALL 操作符
...@@ -1220,27 +1248,36 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -1220,27 +1248,36 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
``` ```
功能说明:返回表/超级表的指定时间截面、指定字段的记录。 功能说明:返回表/超级表的指定时间截面、指定字段的记录。
返回结果数据类型:同应用的字段 返回结果数据类型:同字段类型
应用字段:所有字段。 应用字段:数值型字段。
适用于:**表、超级表**。 适用于:**表、超级表**。
说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。
INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。
示例: 示例:
```mysql ```sql
taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev); taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004';
interp(ts) | interp(f1) | interp(f2) | interp(f3) | interp(ts) | interp(current) | interp(voltage) | interp(phase) |
==================================================================== ==========================================================================================
2017-07-14 10:42:00.005 | 5 | 9 | 6 | 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 |
Query OK, 1 row(s) in set (0.002912s) Query OK, 1 row(s) in set (0.002652s)
```
如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。
taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev); ```sql
interp(ts) | interp(f1) | interp(f2) | interp(f3) | taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005';
==================================================================== Query OK, 0 row(s) in set (0.004022s)
2017-07-14 10:42:00.005 | 5 | 6 | 7 |
Query OK, 1 row(s) in set (0.002005s) taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV);;
interp(ts) | interp(current) | interp(voltage) | interp(phase) |
==========================================================================================
2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 |
Query OK, 1 row(s) in set (0.003056s)
``` ```
### 计算函数 ### 计算函数
...@@ -1423,17 +1460,19 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P ...@@ -1423,17 +1460,19 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
## TAOS SQL其他约定 ## TAOS SQL 其他约定
**GROUP BY的限制** **GROUP BY的限制**
TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。 TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。
**JOIN 操作的限制**
**JOIN操作的限制** TAOS SQL 支持表之间按主键时间戳来 join 两张表的列,暂不支持两个表之间聚合后的四则运算。
TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算 JOIN 查询的不同表的过滤条件之间不能为 OR
**IS NOT NULL与不为空的表达式适用范围** **IS NOT NULL 与不为空的表达式适用范围**
IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
...@@ -6,7 +6,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ...@@ -6,7 +6,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
* [TDengine Introduction and Features](/evaluation#intro) * [TDengine Introduction and Features](/evaluation#intro)
* [TDengine Use Scenes](/evaluation#scenes) * [TDengine Use Scenes](/evaluation#scenes)
* [TDengine Performance Metrics and Verification]((/evaluation#)) * [TDengine Performance Metrics and Verification](/evaluation#)
## [Getting Started](/getting-started) ## [Getting Started](/getting-started)
......
# Quickly experience TDengine through Docker
While it is not recommended to deploy TDengine services via Docker in a production environment, Docker tools do a good job of shielding the environmental differences in the underlying operating system and are well suited for use in development testing or first-time experience with the toolset for installing and running TDengine. In particular, Docker makes it relatively easy to try TDengine on Mac OSX and Windows systems without having to install a virtual machine or rent an additional Linux server. In addition, starting from version 2.0.14.0, TDengine provides images that support both X86-64, X86, arm64, and arm32 platforms, so non-mainstream computers that can run docker, such as NAS, Raspberry Pi, and embedded development boards, can also easily experience TDengine based on this document.
The following article explains how to quickly build a single-node TDengine runtime environment via Docker to support development and testing through a Step by Step style introduction.
## Docker download
The Docker tools themselves can be downloaded from [Docker official site](https://docs.docker.com/get-docker/).
After installation, you can check the Docker version in the command line terminal. If the version number is output properly, the Docker environment has been installed successfully.
```bash
$ docker -v
Docker version 20.10.3, build 48d30b5
```
## Running TDengine in a Docker container
1, Use the command to pull the TDengine image and make it run in the background.
```bash
$ docker run -d --name tdengine tdengine/tdengine
7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
```
- **docker run**: Running a container via Docker
- **--name tdengine**: Set the container name, we can see the corresponding container by the container name
- **-d**: Keeping containers running in the background
- **tdengine/tdengine**: Pulled from the official TDengine published application image
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**: The long character returned is the container ID, and we can also view the corresponding container by its container ID
2, Verify that the container is running correctly.
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**: Lists information about all containers that are in running state.
- **CONTAINER ID**: Container ID.
- **IMAGE**: The mirror used.
- **COMMAND**: The command to run when starting the container.
- **CREATED**: The time when the container was created.
- **STATUS**: The container status. Up means running.
3, Go inside the Docker container and use TDengine.
```bash
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
- **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop.
- **-i**: Enter the interactive mode.
- **-t**: Specify a terminal.
- **c452519b0f9b**: The container ID, which needs to be modified according to the value returned by the docker ps command.
- **/bin/bash**: Load the container and run bash to interact with it.
4, After entering the container, execute the taos shell client program.
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
The TDengine terminal successfully connects to the server and prints out a welcome message and version information. If it fails, an error message is printed.
In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql).
## Learn more about TDengine with taosdemo
1, Following the above steps, exit the TDengine terminal program first.
```bash
$ taos> q
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
2, Execute taosdemo from the command line interface.
```bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
taosdemo is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
password: taosdata
configDir:
resultFile: ./output.txt
thread num of insert data: 10
thread num of create table: 10
top insert interval: 0
number of records per req: 30000
max sql length: 1048576
database count: 1
database[0]:
database[0] name: test
drop: yes
replica: 1
precision: ms
super table count: 1
super table[0]:
stbName: meters
autoCreateTable: no
childTblExists: no
childTblCount: 10000
childTblPrefix: d
dataSource: rand
iface: taosc
insertRows: 10000
interlaceRows: 0
disorderRange: 1000
disorderRatio: 0
maxSqlLen: 1048576
timeStampStep: 1
startTimestamp: 2017-07-14 10:40:00.000
sampleFormat:
sampleFile:
tagsFile:
columnCount: 3
column[0]:FLOAT column[1]:INT column[2]:FLOAT
tagCount: 2
tag[0]:INT tag[1]:BINARY(16)
Press enter key to continue or Ctrl-C to stop
```
After enter, this command will automatically create a super table meters under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai".
It takes about a few minutes to execute this command and ends up inserting a total of 100 million records.
3, Go to the TDengine terminal and view the data generated by taosdemo.
- **Go to the terminal interface.**
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
- **View the database.**
```bash
$ taos> show databases;
name | created_time | ntables | vgroups | ···
test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
```
- **View Super Tables.**
```bash
$ taos> use test;
Database changed.
$ taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.003259s)
```
- **View the table and limit the output to 10 entries.**
```bash
$ taos> select * from test.t0 limit 10;
DB error: Table does not exist (0.002857s)
taos> select * from test.d0 limit 10;
ts | current | voltage | phase |
======================================================================================
2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
Query OK, 10 row(s) in set (0.016791s)
```
- **View the tag values for the d0 table.**
```bash
$ taos> select groupid, location from test.d0;
groupid | location |
=================================
0 | shanghai |
Query OK, 1 row(s) in set (0.003490s)
```
## Stop the TDengine service that is running in Docker
```bash
$ docker stop tdengine
tdengine
```
- **docker stop**: Stop the specified running docker image with docker stop.
- **tdengine**: The name of the container.
## TDengine connected in Docker during programming development
There are two ideas for connecting from outside of Docker to use TDengine services running inside a Docker container:
1, By port mapping (-p), the open network port inside the container is mapped to the specified port of the host. By mounting the local directory (-v), you can synchronize the data inside the host and the container to prevent data loss after the container is deleted.
```bash
$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- The first command starts a docker container with TDengine running and maps the 6041 port of the container to port 6041 of the host.
- The second command, accessing TDengine through the RESTful interface, connects to port 6041 on the local machine, so the connection is successful.
Note: In this example, for convenience reasons, only port 6041 is mapped, which is required for RESTful. If you wish to connect to the TDengine service in a non-RESTful manner, you will need to map a total of 11 ports starting at 6030. In the example, mounting the local directory also only deals with the /etc/taos directory where the configuration files are located, but not the data storage directory.
2, Go directly to the docker container to do development via the exec command. That is, put the program code in the same Docker container where the TDengine server is located and connect to the TDengine service local to the container.
```bash
$ docker exec -it tdengine /bin/bash
```
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
## <a class="anchor" id="install"></a>Quick Install ## <a class="anchor" id="install"></a>Quick Install
TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). TDengine software consists of 3 components: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source ### <a class="anchor" id="source-install"></a>Install from Source
...@@ -10,7 +10,9 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo ...@@ -10,7 +10,9 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo
### Install from Docker Container ### Install from Docker Container
Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html). For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments.
Please refer to the detailed operation in [Quickly experience TDengine through Docker](https://www.taosdata.com/en/documentation/getting-started/docker).
### <a class="anchor" id="package-install"></a>Install from Package ### <a class="anchor" id="package-install"></a>Install from Package
......
...@@ -119,7 +119,7 @@ As the data points are a series of data points over time, the data points genera ...@@ -119,7 +119,7 @@ As the data points are a series of data points over time, the data points genera
9. in addition to storage and query operations, various statistical and real-time calculation operations are also required; 9. in addition to storage and query operations, various statistical and real-time calculation operations are also required;
10. data volume is huge, a system may generate over 10 billion data points in a day. 10. data volume is huge, a system may generate over 10 billion data points in a day.
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. In light of the characteristics mentioned above, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
### Relational Database Model ### Relational Database Model
...@@ -139,7 +139,7 @@ TDengine suggests using collection point ID as the table name (like D1001 in the ...@@ -139,7 +139,7 @@ TDengine suggests using collection point ID as the table name (like D1001 in the
### STable: A Collection of Data Points in the Same Type ### STable: A Collection of Data Points in the Same Type
The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine. The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine.
STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
......
...@@ -26,6 +26,7 @@ Replace the database operating in the current connection with “power”, other ...@@ -26,6 +26,7 @@ Replace the database operating in the current connection with “power”, other
- Any table or STable belongs to a database. Before creating a table, a database must be created first. - Any table or STable belongs to a database. Before creating a table, a database must be created first.
- Tables in two different databases cannot be JOIN. - Tables in two different databases cannot be JOIN.
- You need to specify a timestamp when creating and inserting records, or querying history records.
## <a class="anchor" id="create-stable"></a> Create a STable ## <a class="anchor" id="create-stable"></a> Create a STable
......
...@@ -179,7 +179,7 @@ Clean up the running environment and call this API before the application exits. ...@@ -179,7 +179,7 @@ Clean up the running environment and call this API before the application exits.
- `int taos_options(TSDB_OPTION option, const void * arg, ...)` - `int taos_options(TSDB_OPTION option, const void * arg, ...)`
Set client options, currently only time zone setting (_TSDB_OPTIONTIMEZONE) and encoding setting (_TSDB_OPTIONLOCALE) are supported. The time zone and encoding default to the current operating system settings. Set client options, currently only time zone setting (`_TSDB_OPTIONTIMEZONE`) and encoding setting (`_TSDB_OPTIONLOCALE`) are supported. The time zone and encoding default to the current operating system settings.
- `char *taos_get_client_info()` - `char *taos_get_client_info()`
...@@ -296,9 +296,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective ...@@ -296,9 +296,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective
The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed. The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed.
<a class="anchor" id="stmt"></a> <a class="anchor" id="stmt"></a>
### Parameter binding API ### Parameter binding API
In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows: In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows:
...@@ -823,12 +821,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html ...@@ -823,12 +821,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html
The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details. The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details.
Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go .
```Go ```Go
import ( import (
"database/sql" "database/sql"
_ "github.com/taosdata/driver-go/taosSql" _ "github.com/taosdata/driver-go/v2/taosSql"
) )
``` ```
...@@ -839,6 +837,8 @@ go env -w GO111MODULE=on ...@@ -839,6 +837,8 @@ go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct go env -w GOPROXY=https://goproxy.io,direct
``` ```
`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`.
### Common APIs ### Common APIs
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
...@@ -937,7 +937,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh ...@@ -937,7 +937,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh
Steps: Steps:
1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). 1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js).
2. Execute the following command: 2. Execute the following command:
......
...@@ -16,7 +16,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 ...@@ -16,7 +16,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19
**Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up;
**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file. **Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify hosts file.
**Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built; **Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built;
......
...@@ -20,20 +20,19 @@ fi ...@@ -20,20 +20,19 @@ fi
# Dynamic directory # Dynamic directory
data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
data_dir="/var/lib/taos"
log_dir="/var/log/taos" log_dir="/var/log/taos"
else else
log_dir=~/TDengine/log data_dir="/usr/local/var/lib/taos"
log_dir="/usr/local/var/log/taos"
fi fi
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cfg_install_dir="/etc/taos" cfg_install_dir="/etc/taos"
else else
cfg_install_dir="/usr/local/Cellar/tdengine/${verNumber}/taos" cfg_install_dir="/usr/local/etc/taos"
fi fi
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
...@@ -41,6 +40,10 @@ if [ "$osType" != "Darwin" ]; then ...@@ -41,6 +40,10 @@ if [ "$osType" != "Darwin" ]; then
lib_link_dir="/usr/lib" lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64" lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include" inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi fi
#install main path #install main path
...@@ -144,12 +147,13 @@ function install_main_path() { ...@@ -144,12 +147,13 @@ function install_main_path() {
function install_bin() { function install_bin() {
# Remove links # Remove links
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/taosdump || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/rmtaos || :
fi fi
...@@ -167,11 +171,12 @@ function install_bin() { ...@@ -167,11 +171,12 @@ function install_bin() {
${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link #Make link
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi fi
...@@ -288,18 +293,14 @@ function install_config() { ...@@ -288,18 +293,14 @@ function install_config() {
} }
function install_log() { function install_log() {
if [ "$osType" != "Darwin" ]; then
${csudo} rm -rf ${log_dir} || : ${csudo} rm -rf ${log_dir} || :
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
${csudo} ln -s ${log_dir} ${install_main_dir}/log ${csudo} ln -s ${log_dir} ${install_main_dir}/log
fi
} }
function install_data() { function install_data() {
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${data_dir} ${csudo} mkdir -p ${data_dir}
${csudo} ln -s ${data_dir} ${install_main_dir}/data ${csudo} ln -s ${data_dir} ${install_main_dir}/data
fi
} }
function install_connector() { function install_connector() {
...@@ -496,10 +497,7 @@ function install_TDengine() { ...@@ -496,10 +497,7 @@ function install_TDengine() {
install_main_path install_main_path
if [ "$osType" != "Darwin" ]; then
install_data install_data
fi
install_log install_log
install_header install_header
install_lib install_lib
......
此差异已折叠。
...@@ -448,6 +448,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -448,6 +448,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int dcol = 0; int dcol = 0;
while (dcol < pCols->numOfCols) { while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) { if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
...@@ -458,13 +459,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -458,13 +459,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
STColumn *pRowCol = schemaColAt(pSchema, rcol); STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) { if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE); void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++; dcol++;
rcol++; rcol++;
} else if (pRowCol->colId < pDataCol->colId) { } else if (pRowCol->colId < pDataCol->colId) {
rcol++; rcol++;
} else { } else {
if(forceSetNull) { if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
} }
dcol++; dcol++;
...@@ -482,6 +484,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -482,6 +484,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int nRowCols = kvRowNCols(row); int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) { while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
...@@ -493,13 +496,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -493,13 +496,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) { if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset); void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
++dcol; ++dcol;
++rcol; ++rcol;
} else if (colIdx->colId < pDataCol->colId) { } else if (colIdx->colId < pDataCol->colId) {
++rcol; ++rcol;
} else { } else {
if (forceSetNull) { if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
} }
++dcol; ++dcol;
...@@ -518,7 +522,6 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b ...@@ -518,7 +522,6 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b
} }
} }
//TODO: refactor this function to eliminate additional memory copy
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols); ASSERT(target->numOfCols == source->numOfCols);
...@@ -534,7 +537,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * ...@@ -534,7 +537,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) { for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) { for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0) { if (source->cols[j].len > 0 || target->cols[j].len > 0) {
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
target->maxPoints); target->maxPoints);
} }
...@@ -578,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i ...@@ -578,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if (key1 < key2) { if (key1 < key2) {
for (int i = 0; i < src1->numOfCols; i++) { for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type); ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0) { if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints); target->maxPoints);
} }
...@@ -596,6 +599,8 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i ...@@ -596,6 +599,8 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints); target->maxPoints);
} else if(target->cols[i].len > 0) {
dataColSetNullAt(&target->cols[i], target->numOfRows);
} }
} }
target->numOfRows++; target->numOfRows++;
......
...@@ -32,6 +32,7 @@ import java.util.List; ...@@ -32,6 +32,7 @@ import java.util.List;
import com.taosdata.jdbc.utils.NullType; import com.taosdata.jdbc.utils.NullType;
public class TSDBResultSetBlockData { public class TSDBResultSetBlockData {
private static final int BINARY_LENGTH_OFFSET = 2;
private int numOfRows = 0; private int numOfRows = 0;
private int rowIndex = 0; private int rowIndex = 0;
...@@ -404,10 +405,8 @@ public class TSDBResultSetBlockData { ...@@ -404,10 +405,8 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_BINARY: { case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col); ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex); bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort(); int length = bb.getShort();
byte[] dest = new byte[length]; byte[] dest = new byte[length];
bb.get(dest, 0, length); bb.get(dest, 0, length);
if (NullType.isBinaryNull(dest, length)) { if (NullType.isBinaryNull(dest, length)) {
...@@ -419,16 +418,13 @@ public class TSDBResultSetBlockData { ...@@ -419,16 +418,13 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col); ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex); bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort(); int length = bb.getShort();
byte[] dest = new byte[length]; byte[] dest = new byte[length];
bb.get(dest, 0, length); bb.get(dest, 0, length);
if (NullType.isNcharNull(dest, length)) { if (NullType.isNcharNull(dest, length)) {
return null; return null;
} }
try { try {
String charset = TaosGlobalConfig.getCharset(); String charset = TaosGlobalConfig.getCharset();
return new String(dest, charset); return new String(dest, charset);
......
...@@ -586,6 +586,130 @@ public class TSDBPreparedStatementTest { ...@@ -586,6 +586,130 @@ public class TSDBPreparedStatementTest {
Assert.assertEquals(numOfRows, rows); Assert.assertEquals(numOfRows, rows);
} }
@Test
public void bindDataQueryTest() throws SQLException {
Statement stmt = conn.createStatement();
stmt.execute("drop table if exists weather_test");
stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
int numOfRows = 1;
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
s.setTableName("w2");
s.setTagInt(0, 1);
s.setTagString(1, "test");
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s2.add("test" + i % 4);
}
s.setString(1, s2, 10);
s.columnDataAddBatch();
s.columnDataExecuteBatch();
s.columnDataCloseBatch();
String sql = "select * from weather_test where t1 >= ? and t1 <= ?";
TSDBPreparedStatement s1 = (TSDBPreparedStatement) conn.prepareStatement(sql);
s1.setInt(1, 0);
s1.setInt(2, 10);
ResultSet rs = s1.executeQuery();
int rows = 0;
while (rs.next()) {
rows++;
}
Assert.assertEquals(numOfRows, rows);
}
@Test
public void setTagNullTest()throws SQLException {
Statement stmt = conn.createStatement();
stmt.execute("drop table if exists weather_test");
stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(10), t9 nchar(10))");
int numOfRows = 1;
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?,?,?,?,?,?,?,?) values(?, ?)");
s.setTableName("w3");
s.setTagNull(0, TSDBConstants.TSDB_DATA_TYPE_TINYINT);
s.setTagNull(1, TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
s.setTagNull(2, TSDBConstants.TSDB_DATA_TYPE_INT);
s.setTagNull(3, TSDBConstants.TSDB_DATA_TYPE_BIGINT);
s.setTagNull(4, TSDBConstants.TSDB_DATA_TYPE_FLOAT);
s.setTagNull(5, TSDBConstants.TSDB_DATA_TYPE_DOUBLE);
s.setTagNull(6, TSDBConstants.TSDB_DATA_TYPE_BOOL);
s.setTagNull(7, TSDBConstants.TSDB_DATA_TYPE_BINARY);
s.setTagNull(8, TSDBConstants.TSDB_DATA_TYPE_NCHAR);
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s2.add(i);
}
s.setInt(1, s2);
s.columnDataAddBatch();
s.columnDataExecuteBatch();
s.columnDataCloseBatch();
}
private String stringGenerator(int length) {
String source = "abcdefghijklmnopqrstuvwxyz";
StringBuilder sb = new StringBuilder();
Random rand = new Random();
for(int i = 0; i < length; i++) {
sb.append(source.charAt(rand.nextInt(26)));
}
return sb.toString();
}
@Test(expected = SQLException.class)
public void setMaxTableNameTest()throws SQLException {
Statement stmt = conn.createStatement();
stmt.execute("drop table if exists weather_test");
stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 int)");
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?)");
String tbname = stringGenerator(193);
s.setTableName(tbname);
s.setTagInt(0, 1);
int numOfRows = 1;
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++) {
s2.add(i);
}
s.setInt(1, s2);
s.columnDataAddBatch();
s.columnDataExecuteBatch();
s.columnDataCloseBatch();
}
@Test(expected = SQLException.class) @Test(expected = SQLException.class)
public void createTwoSameDbTest() throws SQLException { public void createTwoSameDbTest() throws SQLException {
// when // when
......
...@@ -102,9 +102,7 @@ _libtaos.taos_get_client_info.restype = c_char_p ...@@ -102,9 +102,7 @@ _libtaos.taos_get_client_info.restype = c_char_p
def taos_get_client_info(): def taos_get_client_info():
# type: () -> str # type: () -> str
"""Get client version info. """Get client version info."""
获取客户端版本信息。
"""
return _libtaos.taos_get_client_info().decode() return _libtaos.taos_get_client_info().decode()
...@@ -114,6 +112,7 @@ _libtaos.taos_get_server_info.argtypes = (c_void_p,) ...@@ -114,6 +112,7 @@ _libtaos.taos_get_server_info.argtypes = (c_void_p,)
def taos_get_server_info(connection): def taos_get_server_info(connection):
# type: (c_void_p) -> str # type: (c_void_p) -> str
"""Get server version as string."""
return _libtaos.taos_get_server_info(connection).decode() return _libtaos.taos_get_server_info(connection).decode()
...@@ -134,11 +133,10 @@ _libtaos.taos_connect.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint1 ...@@ -134,11 +133,10 @@ _libtaos.taos_connect.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint1
def taos_connect(host=None, user="root", password="taosdata", db=None, port=0): def taos_connect(host=None, user="root", password="taosdata", db=None, port=0):
# type: (None|str, str, str, None|str, int) -> c_void_p # type: (None|str, str, str, None|str, int) -> c_void_p
"""Create TDengine database connection. """Create TDengine database connection.
创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
- host: server hostname/FQDN, TDengine管理主节点的FQDN - host: server hostname/FQDN
- user: user name/用户名 - user: user name
- password: user password / 用户密码 - password: user password
- db: database name (optional) - db: database name (optional)
- port: server port - port: server port
...@@ -187,11 +185,10 @@ _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_ ...@@ -187,11 +185,10 @@ _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_
def taos_connect_auth(host=None, user="root", auth="", db=None, port=0): def taos_connect_auth(host=None, user="root", auth="", db=None, port=0):
# type: (None|str, str, str, None|str, int) -> c_void_p # type: (None|str, str, str, None|str, int) -> c_void_p
""" """Connect server with auth token.
创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
- host: server hostname/FQDN, TDengine管理主节点的FQDN - host: server hostname/FQDN
- user: user name/用户名 - user: user name
- auth: base64 encoded auth token - auth: base64 encoded auth token
- db: database name (optional) - db: database name (optional)
- port: server port - port: server port
......
此差异已折叠。
...@@ -272,26 +272,35 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { ...@@ -272,26 +272,35 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
switch (timePrecision) { switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: { case TSDB_TIME_PRECISION_MILLI: {
mod = ((t) % 1000 + 1000) % 1000;
if (t < 0 && mod != 0) {
t -= 1000;
}
quot = t / 1000; quot = t / 1000;
fractionLen = 5; fractionLen = 5;
format = ".%03" PRId64; format = ".%03" PRId64;
mod = t % 1000;
break; break;
} }
case TSDB_TIME_PRECISION_MICRO: { case TSDB_TIME_PRECISION_MICRO: {
mod = ((t) % 1000000 + 1000000) % 1000000;
if (t < 0 && mod != 0) {
t -= 1000000;
}
quot = t / 1000000; quot = t / 1000000;
fractionLen = 8; fractionLen = 8;
format = ".%06" PRId64; format = ".%06" PRId64;
mod = t % 1000000;
break; break;
} }
case TSDB_TIME_PRECISION_NANO: { case TSDB_TIME_PRECISION_NANO: {
mod = ((t) % 1000000000 + 1000000000) % 1000000000;
if (t < 0 && mod != 0) {
t -= 1000000000;
}
quot = t / 1000000000; quot = t / 1000000000;
fractionLen = 11; fractionLen = 11;
format = ".%09" PRId64; format = ".%09" PRId64;
mod = t % 1000000000;
break; break;
} }
...@@ -319,26 +328,35 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { ...@@ -319,26 +328,35 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
switch (timePrecision) { switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: { case TSDB_TIME_PRECISION_MILLI: {
mod = ((t) % 1000 + 1000) % 1000;
if (t < 0 && mod != 0) {
t -= 1000;
}
quot = t / 1000; quot = t / 1000;
fractionLen = 5; fractionLen = 5;
format = ".%03" PRId64; format = ".%03" PRId64;
mod = t % 1000;
break; break;
} }
case TSDB_TIME_PRECISION_MICRO: { case TSDB_TIME_PRECISION_MICRO: {
mod = ((t) % 1000000 + 1000000) % 1000000;
if (t < 0 && mod != 0) {
t -= 1000000;
}
quot = t / 1000000; quot = t / 1000000;
fractionLen = 8; fractionLen = 8;
format = ".%06" PRId64; format = ".%06" PRId64;
mod = t % 1000000;
break; break;
} }
case TSDB_TIME_PRECISION_NANO: { case TSDB_TIME_PRECISION_NANO: {
mod = ((t) % 1000000000 + 1000000000) % 1000000000;
if (t < 0 && mod != 0) {
t -= 1000000000;
}
quot = t / 1000000000; quot = t / 1000000000;
fractionLen = 11; fractionLen = 11;
format = ".%09" PRId64; format = ".%09" PRId64;
mod = t % 1000000000;
break; break;
} }
......
...@@ -6388,6 +6388,19 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { ...@@ -6388,6 +6388,19 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
return pInfo->binfo.pRes; return pInfo->binfo.pRes;
} }
static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo *pInfo, SQueryRuntimeEnv* pRuntimeEnv, bool* newgroup) {
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
pInfo->existNewGroupBlock = NULL;
*newgroup = true;
}
static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) { static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) {
if (taosFillHasMoreResults(pInfo->pFillInfo)) { if (taosFillHasMoreResults(pInfo->pFillInfo)) {
*newgroup = false; *newgroup = false;
...@@ -6399,16 +6412,7 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRunt ...@@ -6399,16 +6412,7 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRunt
// handle the cached new group data block // handle the cached new group data block
if (pInfo->existNewGroupBlock) { if (pInfo->existNewGroupBlock) {
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup);
int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
pInfo->existNewGroupBlock = NULL;
*newgroup = true;
} }
} }
...@@ -6427,26 +6431,6 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { ...@@ -6427,26 +6431,6 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) {
return pInfo->pRes; return pInfo->pRes;
} }
// if (taosFillHasMoreResults(pInfo->pFillInfo)) {
// *newgroup = false;
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
// return pInfo->pRes;
// }
//
// // handle the cached new group data block
// if (pInfo->existNewGroupBlock) {
// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
//
// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
//
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
// pInfo->existNewGroupBlock = NULL;
// *newgroup = true;
// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
// }
while(1) { while(1) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
...@@ -6493,46 +6477,13 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { ...@@ -6493,46 +6477,13 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) {
return pInfo->pRes; return pInfo->pRes;
} }
// if (taosFillHasMoreResults(pInfo->pFillInfo)) {
// *newgroup = false;
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
// return pInfo->pRes;
// }
//
// // handle the cached new group data block
// if (pInfo->existNewGroupBlock) {
// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
//
// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
//
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
// pInfo->existNewGroupBlock = NULL;
// *newgroup = true;
//
// if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) {
// return pInfo->pRes;
// }
//
//// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
// }
} else if (pInfo->existNewGroupBlock) { // try next group } else if (pInfo->existNewGroupBlock) { // try next group
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; assert(pBlock != NULL);
int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey; doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup);
taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
pInfo->existNewGroupBlock = NULL;
*newgroup = true;
return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL; if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) {
return pInfo->pRes;
}
} else { } else {
return NULL; return NULL;
} }
......
...@@ -1457,13 +1457,11 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1457,13 +1457,11 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
while (true) { while (true) {
key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter);
bool isRowDel = false;
SMemRow row = tsdbNextIterRow(pCommitIter->pIter); SMemRow row = tsdbNextIterRow(pCommitIter->pIter);
if (row == NULL || memRowKey(row) > maxKey) { if (row == NULL || memRowKey(row) > maxKey) {
key2 = INT64_MAX; key2 = INT64_MAX;
} else { } else {
key2 = memRowKey(row); key2 = memRowKey(row);
isRowDel = memRowDeleted(row);
} }
if (key1 == INT64_MAX && key2 == INT64_MAX) break; if (key1 == INT64_MAX && key2 == INT64_MAX) break;
...@@ -1478,36 +1476,33 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1478,36 +1476,33 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
pTarget->numOfRows++; pTarget->numOfRows++;
(*iter)++; (*iter)++;
} else if (key1 > key2) { } else if (key1 > key2) {
if (!isRowDel) {
if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
ASSERT(pSchema != NULL); ASSERT(pSchema != NULL);
} }
tdAppendMemRowToDataCol(row, pSchema, pTarget, true); tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
}
tSkipListIterNext(pCommitIter->pIter); tSkipListIterNext(pCommitIter->pIter);
} else { } else {
if (update) { if (update != TD_ROW_OVERWRITE_UPDATE) {
if (!isRowDel) { //copy disk data
if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
}
} else {
ASSERT(!isRowDel);
for (int i = 0; i < pDataCols->numOfCols; i++) { for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail //TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints); pTarget->maxPoints);
} }
pTarget->numOfRows++; if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++;
}
if (update != TD_ROW_DISCARD_UPDATE) {
//copy mem data
if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
} }
(*iter)++; (*iter)++;
tSkipListIterNext(pCommitIter->pIter); tSkipListIterNext(pCommitIter->pIter);
......
...@@ -488,7 +488,7 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { ...@@ -488,7 +488,7 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
return -1; return -1;
} }
if (listen(sockFd, 10) < 0) { if (listen(sockFd, 1024) < 0) {
uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno));
taosCloseSocket(sockFd); taosCloseSocket(sockFd);
return -1; return -1;
......
const taos = require('td2.0-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var c1 = conn.cursor(); // Initializing a new cursor
let stime = new Date();
let interval = 1000;
function convertDateToTS(date) {
let tsArr = date.toISOString().split("T")
return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
}
function R(l, r) {
return Math.random() * (r - l) - r;
}
function randomBool() {
if (Math.random() < 0.5) {
return true;
}
return false;
}
// Initialize
const dbname = "nodejs_1970_db";
const tbname = "t1";
let dropDB = "drop database if exists " + dbname
console.log(dropDB);//asdasdasd
c1.execute(dropDB);///asdasd
let createDB = "create database " + dbname + " keep 36500"
console.log(createDB);
c1.execute(createDB);
let useTbl = "use " + dbname
console.log(useTbl)
c1.execute(useTbl);
let createTbl = "create table if not exists " + tbname + "(ts timestamp,id int)"
console.log(createTbl);
c1.execute(createTbl);
//1969-12-31 23:59:59.999
//1970-01-01 00:00:00.000
//1970-01-01 07:59:59.999
//1970-01-01 08:00:00.000a
//1628928479484 2021-08-14 08:07:59.484
let sql1 = "insert into " + dbname + "." + tbname + " values('1969-12-31 23:59:59.999',1)"
console.log(sql1);
c1.execute(sql1);
let sql2 = "insert into " + dbname + "." + tbname + " values('1970-01-01 00:00:00.000',2)"
console.log(sql2);
c1.execute(sql2);
let sql3 = "insert into " + dbname + "." + tbname + " values('1970-01-01 07:59:59.999',3)"
console.log(sql3);
c1.execute(sql3);
let sql4 = "insert into " + dbname + "." + tbname + " values('1970-01-01 08:00:00.000',4)"
console.log(sql4);
c1.execute(sql4);
let sql5 = "insert into " + dbname + "." + tbname + " values('2021-08-14 08:07:59.484',5)"
console.log(sql5);
c1.execute(sql5);
// Select
let query1 = "select * from " + dbname + "." + tbname
console.log(query1);
c1.execute(query1);
var d = c1.fetchall();
console.log(c1.fields);
for (let i = 0; i < d.length; i++)
console.log(d[i][0].valueOf());
//initialize
let initSql1 = "drop table if exists " + tbname
console.log(initSql1);
c1.execute(initSql1);
console.log(createTbl);
c1.execute(createTbl);
c1.execute(useTbl)
//-28800001 1969-12-31 23:59:59.999
//-28800000 1970-01-01 00:00:00.000
//-1 1970-01-01 07:59:59.999
//0 1970-01-01 08:00:00.00
//1628928479484 2021-08-14 08:07:59.484
let sql11 = "insert into " + dbname + "." + tbname + " values(-28800001,11)";
console.log(sql11);
c1.execute(sql11);
let sql12 = "insert into " + dbname + "." + tbname + " values(-28800000,12)"
console.log(sql12);
c1.execute(sql12);
let sql13 = "insert into " + dbname + "." + tbname + " values(-1,13)"
console.log(sql13);
c1.execute(sql13);
let sql14 = "insert into " + dbname + "." + tbname + " values(0,14)"
console.log(sql14);
c1.execute(sql14);
let sql15 = "insert into " + dbname + "." + tbname + " values(1628928479484,15)"
console.log(sql15);
c1.execute(sql15);
// Select
console.log(query1);
c1.execute(query1);
var d = c1.fetchall();
console.log(c1.fields);
for (let i = 0; i < d.length; i++)
console.log(d[i][0].valueOf());
setTimeout(function () {
conn.close();
}, 2000);
...@@ -17,5 +17,5 @@ go env -w GO111MODULE=on ...@@ -17,5 +17,5 @@ go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct go env -w GOPROXY=https://goproxy.io,direct
bash ./case001/case001.sh $severIp $serverPort bash ./case001/case001.sh $severIp $serverPort
#bash ./case002/case002.sh $severIp $serverPort bash ./case002/case002.sh $severIp $serverPort
#bash ./case003/case003.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort
...@@ -19,7 +19,7 @@ import ( ...@@ -19,7 +19,7 @@ import (
"database/sql" "database/sql"
"flag" "flag"
"fmt" "fmt"
_ "github.com/taosdata/driver-go/taosSql" _ "github.com/taosdata/driver-go/v2/taosSql"
"log" "log"
"strconv" "strconv"
"time" "time"
...@@ -63,6 +63,7 @@ func main() { ...@@ -63,6 +63,7 @@ func main() {
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/"
// open connect to taos server // open connect to taos server
fmt.Printf("url:%s",url)
db, err := sql.Open(taosDriverName, url) db, err := sql.Open(taosDriverName, url)
if err != nil { if err != nil {
log.Fatalf("Open database error: %s\n", err) log.Fatalf("Open database error: %s\n", err)
...@@ -168,17 +169,18 @@ func insert_data(db *sql.DB, demot string) { ...@@ -168,17 +169,18 @@ func insert_data(db *sql.DB, demot string) {
func select_data(db *sql.DB, demot string) { func select_data(db *sql.DB, demot string) {
st := time.Now().Nanosecond() st := time.Now().Nanosecond()
fmt.Println(demot)
rows, err := db.Query("select * from ? ", demot) // go text mode rows, err := db.Query("select * from ? ", demot) // go text mode
fmt.Println("end query",err)
checkErr(err, "select db.Query") checkErr(err, "select db.Query")
fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv") fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
var affectd int var affectd int
//decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。 //decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。
fmt.Println("start next")
for rows.Next() { for rows.Next() {
var ts string var ts time.Time
var name string var name string
var id int var id int
var len int8 var len int8
...@@ -188,6 +190,7 @@ func select_data(db *sql.DB, demot string) { ...@@ -188,6 +190,7 @@ func select_data(db *sql.DB, demot string) {
var dv float64 var dv float64
err = rows.Scan(&ts, &id, &name, &len, &flag, &notes, &fv, &dv) err = rows.Scan(&ts, &id, &name, &len, &flag, &notes, &fv, &dv)
fmt.Println("rows:",err)
checkErr(err, "select rows.Scan") checkErr(err, "select rows.Scan")
fmt.Printf("%s|\t", ts) fmt.Printf("%s|\t", ts)
......
@echo off
echo ==== start run cases001.go
del go.*
go mod init demotest
go build
demotest.exe -h %1 -p %2
cd ..
package main
import (
"database/sql/driver"
"fmt"
"io"
"os"
"time"
taos "github.com/taosdata/driver-go/v2/af"
)
func Subscribe_check(topic taos.Subscriber, check int) bool {
count := 0
rows, err := topic.Consume()
defer func() { rows.Close(); time.Sleep(time.Second) }()
if err != nil {
fmt.Println(err)
os.Exit(3)
}
for {
values := make([]driver.Value, 2)
err := rows.Next(values)
if err == io.EOF {
break
} else if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(4)
}
count++
}
if count == check {
return false
} else {
return true
}
}
func main() {
ts := 1630461600000
db, err := taos.Open("127.0.0.1", "", "", "", 0)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer db.Close()
db.Exec("drop if exists database test")
db.Exec("create if not exists database test")
db.Exec("use test")
db.Exec("drop if exists database test")
db.Exec("create table test (ts timestamp ,level int)")
for i := 0; i < 10; i++ {
sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i)
db.Exec(sqlcmd)
}
fmt.Println("consumption 01.")
topic, err := db.Subscribe(false, "test", "select ts, level from test", time.Second)
if Subscribe_check(topic, 10) {
os.Exit(3)
}
fmt.Println("consumption 02: no new rows inserted")
if Subscribe_check(topic, 0) {
os.Exit(3)
}
fmt.Println("consumption 03: after one new rows inserted")
sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+10, 10)
db.Exec(sqlcmd)
if Subscribe_check(topic, 1) {
os.Exit(3)
}
fmt.Println("consumption 04: keep progress and continue previous subscription")
topic.Unsubscribe(true)
topic, err = db.Subscribe(false, "test", "select ts, level from test", time.Second)
if Subscribe_check(topic, 0) {
os.Exit(3)
}
}
#!/bin/bash
echo "==== start run cases001.go"
set +e
#set -x
script_dir="$(dirname $(readlink -f $0))"
#echo "pwd: $script_dir, para0: $0"
#execName=$0
#execName=`echo ${execName##*/}`
#goName=`echo ${execName%.*}`
###### step 3: start build
cd $script_dir
rm -f go.*
go mod init demotest > /dev/null 2>&1
go mod tidy > /dev/null 2>&1
go build > /dev/null 2>&1
sleep 1s
./demotest -h $1 -p $2
...@@ -390,7 +390,7 @@ python3 ./test.py -f alter/alterColMultiTimes.py ...@@ -390,7 +390,7 @@ python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f query/queryWildcardLength.py python3 ./test.py -f query/queryWildcardLength.py
python3 ./test.py -f query/queryTbnameUpperLower.py python3 ./test.py -f query/queryTbnameUpperLower.py
python3 ./test.py -f query/query.py python3 ./test.py -f query/query.py
python3 ./test.py -f query/queryDiffColsOr.py
#======================p4-end=============== #======================p4-end===============
......
...@@ -10,13 +10,10 @@ ...@@ -10,13 +10,10 @@
################################################################### ###################################################################
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from copy import deepcopy
from util.log import tdLog from util.log import tdLog
from util.cases import tdCases from util.cases import tdCases
from util.sql import tdSql from util.sql import tdSql
from util.common import tdCom from util.common import tdCom
class TDTestCase: class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
...@@ -409,6 +406,62 @@ class TDTestCase: ...@@ -409,6 +406,62 @@ class TDTestCase:
tdSql.checkRows(10) tdSql.checkRows(10)
tdSql.checkEqual(int(res[9][0]), 10) tdSql.checkEqual(int(res[9][0]), 10)
def queryMultiTbWithTag(self, tb_name):
# tags (1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)')
tdSql.execute(
f'CREATE TABLE {tb_name}_sub2 using {tb_name} tags (2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)')
tdSql.execute(
f'CREATE TABLE {tb_name}_sub3 using {tb_name} tags (3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)')
tdSql.execute(
f'insert into {tb_name}_sub2 values ("2021-01-25 12:00:00", 2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)')
tdSql.execute(
f'insert into {tb_name}_sub3 values ("2021-01-27 12:00:00", 3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)')
## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or condition_tag_C or condition_tag_D or like and in interval
query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)'
res = tdSql.query(query_sql, True)
tdSql.checkRows(3)
tdSql.checkEqual(int(res[0][1]), 3)
tdSql.checkEqual(int(res[0][2]), 1)
tdSql.checkEqual(int(res[0][3]), 10)
tdSql.checkEqual(int(res[1][1]), 3)
tdSql.checkEqual(int(res[1][2]), 3)
tdSql.checkEqual(int(res[1][3]), 3)
tdSql.checkEqual(int(res[2][1]), 3)
tdSql.checkEqual(int(res[2][2]), 2)
tdSql.checkEqual(int(res[2][3]), 6)
# ! to confirm
## select count avg sum from (condition_A or condition_B or condition_tag_C or condition_tag_D and like and in) where condition_A or condition_B or like and in interval
# query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where t1 = 3 and t1 = 2 or c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true) interval(8d)'
# res = tdSql.query(query_sql, True)
# tdSql.checkRows(3)
# tdSql.checkEqual(int(res[0][1]), 3)
# tdSql.checkEqual(int(res[0][2]), 1)
# tdSql.checkEqual(int(res[0][3]), 10)
# tdSql.checkEqual(int(res[1][1]), 3)
# tdSql.checkEqual(int(res[1][2]), 3)
# tdSql.checkEqual(int(res[1][3]), 3)
# tdSql.checkEqual(int(res[2][1]), 3)
# tdSql.checkEqual(int(res[2][2]), 2)
# tdSql.checkEqual(int(res[2][3]), 6)
## select count avg sum from (condition_A and condition_B and and line and in and ts and condition_tag_A and condition_tag_B and between) where condition_C orr condition_D or condition_tag_C or condition_tag_D or like and in interval
query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >= 1 and c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00" and t1 < 2 and t1 > 0 and c6 between 0 and 7) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)'
res = tdSql.query(query_sql, True)
tdSql.checkRows(2)
tdSql.checkEqual(int(res[0][1]), 2)
tdSql.checkEqual(int(res[0][2]), 1)
tdSql.checkEqual(int(res[0][3]), 2)
tdSql.checkEqual(int(res[1][1]), 1)
tdSql.checkEqual(int(res[1][2]), 1)
tdSql.checkEqual(int(res[1][3]), 1)
# ! to confirm
#select * from (select * from pyclqtwi where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00") where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) ;
#DB error: invalid operation: invalid expression (0.008747s)
def checkTbColTypeOperator(self): def checkTbColTypeOperator(self):
''' '''
Ordinary table full column type and operator Ordinary table full column type and operator
...@@ -493,32 +546,12 @@ class TDTestCase: ...@@ -493,32 +546,12 @@ class TDTestCase:
tb_name = self.initStb() tb_name = self.initStb()
self.queryMultiTb(tb_name) self.queryMultiTb(tb_name)
def checkMultiTbWithTag(self):
# tb_name1 = tdCom.getLongName(8, "letters") '''
# tb_name2 = tdCom.getLongName(8, "letters") test Multi tb with tag
# tb_name3 = tdCom.getLongName(8, "letters") '''
# tdSql.execute( tb_name = self.initStb()
# f"CREATE TABLE {tb_name1} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") self.queryMultiTbWithTag(tb_name)
# tdSql.execute(
# f"CREATE TABLE {tb_name2} (ts timestamp, c1 tinyint, c2 smallint, c3 int)")
# tdSql.execute(
# f"CREATE TABLE {tb_name3} (ts timestamp, c1 tinyint, c2 smallint, c3 int)")
# insert_sql_list = [f'insert into {tb_name1} values ("2021-01-01 12:00:00", 1, 5, 1)',
# f'insert into {tb_name1} values ("2021-01-03 12:00:00", 2, 4, 1)',
# f'insert into {tb_name1} values ("2021-01-05 12:00:00", 3, 2, 1)',
# f'insert into {tb_name2} values ("2021-01-01 12:00:00", 4, 2, 1)',
# f'insert into {tb_name2} values ("2021-01-02 12:00:00", 5, 1, 1)',
# f'insert into {tb_name2} values ("2021-01-04 12:00:00", 1, 2, 1)',
# f'insert into {tb_name3} values ("2021-01-02 12:00:00", 4, 2, 1)',
# f'insert into {tb_name3} values ("2021-01-06 12:00:00", 5, 1, 1)',
# f'insert into {tb_name3} values ("2021-01-07 12:00:00", 1, 2, 1)',
# ]
# for sql in insert_sql_list:
# tdSql.execute(sql)
# tdSql.query(
# f'select * from {tb_name1} t1, {tb_name2}, {tb_name3} t3 t2 where (t1.ts=t2.ts or t2.ts=t3.ts)')
# tdSql.checkRows(4)
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
...@@ -534,7 +567,7 @@ class TDTestCase: ...@@ -534,7 +567,7 @@ class TDTestCase:
self.checkStbPreCal() self.checkStbPreCal()
self.checkMultiTb() self.checkMultiTb()
self.checkMultiStb() self.checkMultiStb()
self.checkMultiTbWithTag()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
......
...@@ -21,7 +21,15 @@ import shutil ...@@ -21,7 +21,15 @@ import shutil
import pandas as pd import pandas as pd
from util.log import * from util.log import *
def _parse_datetime(timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
class TDSql: class TDSql:
def __init__(self): def __init__(self):
...@@ -181,7 +189,7 @@ class TDSql: ...@@ -181,7 +189,7 @@ class TDSql:
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
else: else:
if self.queryResult[row][col] == datetime.datetime.fromisoformat(data): if self.queryResult[row][col] == _parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
return return
......
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <pthread.h>
#define MAXLINE 1024
typedef struct {
pthread_t pid;
int threadId;
int rows;
int tables;
} ThreadObj;
void post(char *ip,int port,char *page,char *msg) {
int sockfd,n;
char recvline[MAXLINE];
struct sockaddr_in servaddr;
char content[4096];
char content_page[50];
sprintf(content_page,"POST /%s HTTP/1.1\r\n",page);
char content_host[50];
sprintf(content_host,"HOST: %s:%d\r\n",ip,port);
char content_type[] = "Content-Type: text/plain\r\n";
char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
char content_len[50];
sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg));
sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg);
if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) {
printf("socket error\n");
}
bzero(&servaddr,sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(port);
if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) {
printf("inet_pton error\n");
}
if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) {
printf("connect error\n");
}
write(sockfd,content,strlen(content));
printf("%s\n", content);
while((n = read(sockfd,recvline,MAXLINE)) > 0) {
recvline[n] = 0;
if(fputs(recvline,stdout) == EOF) {
printf("fputs error\n");
}
}
if(n < 0) {
printf("read error\n");
}
}
void singleThread() {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sqlutc";
char page1[] = "rest/sqlutc/db1";
char page2[] = "rest/sqlutc/db2";
char nonexit[] = "rest/sqlutc/xxdb";
post(ip,port,page,"drop database if exists db1");
post(ip,port,page,"create database if not exists db1");
post(ip,port,page,"drop database if exists db2");
post(ip,port,page,"create database if not exists db2");
post(ip,port,page1,"create table t11 (ts timestamp, c1 int)");
post(ip,port,page2,"create table t21 (ts timestamp, c1 int)");
post(ip,port,page1,"insert into t11 values (now, 1)");
post(ip,port,page2,"insert into t21 values (now, 2)");
post(ip,port,nonexit,"create database if not exists db3");
}
void execute(void *params) {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sqlutc";
char *unique = calloc(1, 1024);
char *sql = calloc(1, 1024);
ThreadObj *pThread = (ThreadObj *)params;
printf("Thread %d started\n", pThread->threadId);
sprintf(unique, "rest/sqlutc/db%d",pThread->threadId);
sprintf(sql, "drop database if exists db%d", pThread->threadId);
post(ip,port,page, sql);
sprintf(sql, "create database if not exists db%d", pThread->threadId);
post(ip,port,page, sql);
for (int i = 0; i < pThread->tables; i++) {
sprintf(sql, "create table t%d (ts timestamp, c1 int)", i);
post(ip,port,unique, sql);
}
for (int i = 0; i < pThread->rows; i++) {
sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId);
post(ip,port,unique, sql);
}
free(unique);
free(sql);
return;
}
void multiThread() {
int numOfThreads = 100;
int numOfTables = 100;
int numOfRows = 1;
ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj));
for (int i = 0; i < numOfThreads; i++) {
ThreadObj *pthread = threads + i;
pthread_attr_t thattr;
pthread->threadId = i + 1;
pthread->rows = numOfRows;
pthread->tables = numOfTables;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread);
}
for (int i = 0; i < numOfThreads; i++) {
pthread_join(threads[i].pid, NULL);
}
free(threads);
}
int main() {
singleThread();
multiThread();
exit(0);
}
\ No newline at end of file
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <pthread.h>
#define MAXLINE 1024
typedef struct {
pthread_t pid;
int threadId;
int rows;
int tables;
} ThreadObj;
void post(char *ip,int port,char *page,char *msg) {
int sockfd,n;
char recvline[MAXLINE];
struct sockaddr_in servaddr;
char content[4096];
char content_page[50];
sprintf(content_page,"POST /%s HTTP/1.1\r\n",page);
char content_host[50];
sprintf(content_host,"HOST: %s:%d\r\n",ip,port);
char content_type[] = "Content-Type: text/plain\r\n";
char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
char content_len[50];
sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg));
sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg);
if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) {
printf("socket error\n");
}
bzero(&servaddr,sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(port);
if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) {
printf("inet_pton error\n");
}
if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) {
printf("connect error\n");
}
write(sockfd,content,strlen(content));
printf("%s\n", content);
while((n = read(sockfd,recvline,MAXLINE)) > 0) {
recvline[n] = 0;
if(fputs(recvline,stdout) == EOF) {
printf("fputs error\n");
}
}
if(n < 0) {
printf("read error\n");
}
}
void singleThread() {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sqlt";
char page1[] = "rest/sqlt/db1";
char page2[] = "rest/sqlt/db2";
char nonexit[] = "rest/sqlt/xxdb";
post(ip,port,page,"drop database if exists db1");
post(ip,port,page,"create database if not exists db1");
post(ip,port,page,"drop database if exists db2");
post(ip,port,page,"create database if not exists db2");
post(ip,port,page1,"create table t11 (ts timestamp, c1 int)");
post(ip,port,page2,"create table t21 (ts timestamp, c1 int)");
post(ip,port,page1,"insert into t11 values (now, 1)");
post(ip,port,page2,"insert into t21 values (now, 2)");
post(ip,port,nonexit,"create database if not exists db3");
}
void execute(void *params) {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sqlt";
char *unique = calloc(1, 1024);
char *sql = calloc(1, 1024);
ThreadObj *pThread = (ThreadObj *)params;
printf("Thread %d started\n", pThread->threadId);
sprintf(unique, "rest/sqlt/db%d",pThread->threadId);
sprintf(sql, "drop database if exists db%d", pThread->threadId);
post(ip,port,page, sql);
sprintf(sql, "create database if not exists db%d", pThread->threadId);
post(ip,port,page, sql);
for (int i = 0; i < pThread->tables; i++) {
sprintf(sql, "create table t%d (ts timestamp, c1 int)", i);
post(ip,port,unique, sql);
}
for (int i = 0; i < pThread->rows; i++) {
sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId);
post(ip,port,unique, sql);
}
free(unique);
free(sql);
return;
}
void multiThread() {
int numOfThreads = 100;
int numOfTables = 100;
int numOfRows = 1;
ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj));
for (int i = 0; i < numOfThreads; i++) {
ThreadObj *pthread = threads + i;
pthread_attr_t thattr;
pthread->threadId = i + 1;
pthread->rows = numOfRows;
pthread->tables = numOfTables;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread);
}
for (int i = 0; i < numOfThreads; i++) {
pthread_join(threads[i].pid, NULL);
}
free(threads);
}
int main() {
singleThread();
multiThread();
exit(0);
}
\ No newline at end of file
all: all:
gcc -g httpTest.c -o httpTest -lpthread gcc -g httpTest.c -o httpTest -lpthread
gcc -g httpTestSqlt.c -o httpTestSqlt -lpthread
gcc -g httpTestSqlUtc.c -o httpTestSqlUtc -lpthread
clean:
rm httpTest
rm httpTestSqlt
rm httpTestSqlUtc
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册