diff --git a/Jenkinsfile b/Jenkinsfile
index 680fcce37f913410055a540df3b3340f41be701d..b073c32e1384dc7fa527695ab3be8dfde26be978 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -5,7 +5,7 @@ node {
git url: 'https://github.com/taosdata/TDengine.git'
}
-def skipstage=0
+def skipbuild=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
@@ -33,8 +33,7 @@ def abort_previous(){
milestone(buildNumber)
}
def pre_test(){
-
-
+ sh'hostname'
sh '''
sudo rmtaos || echo "taosd has not installed"
'''
@@ -52,12 +51,18 @@ def pre_test(){
git checkout master
'''
}
- else {
+ else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
- git checkout develop
+ git checkout 2.0
'''
}
+ else{
+ sh '''
+ cd ${WKC}
+ git checkout develop
+ '''
+ }
}
sh'''
cd ${WKC}
@@ -75,7 +80,13 @@ def pre_test(){
git checkout master
'''
}
- else {
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WK}
+ git checkout 2.0
+ '''
+ }
+ else{
sh '''
cd ${WK}
git checkout develop
@@ -95,19 +106,17 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
- pip3 install ${WKC}/src/connector/python
+ pip3 install ${WKC}/src/connector/python/
'''
return 1
}
pipeline {
agent none
-
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
-
stages {
stage('pre_build'){
agent{label 'master'}
@@ -123,19 +132,22 @@ pipeline {
rm -rf ${WORKSPACE}.tes
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
-
+ git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
git checkout master
- git pull origin master
'''
}
- else {
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ git checkout 2.0
+ '''
+ }
+ else{
sh '''
git checkout develop
- git pull origin develop
'''
}
}
@@ -143,28 +155,30 @@ pipeline {
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
-
- script{
- env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true)
+
+ script{
+ skipbuild='2'
+ skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
+ println skipbuild
}
- println env.skipstage
sh'''
rm -rf ${WORKSPACE}.tes
'''
}
}
-
stage('Parallel test stage') {
//only build pr
when {
+ allOf{
changeRequest()
- expression {
- env.skipstage != 0
+ expression{
+ return skipbuild.trim() == '2'
}
+ }
}
parallel {
stage('python_1_s1') {
- agent{label 'p1'}
+ agent{label " slave1 || slave11 "}
steps {
pre_test()
@@ -179,7 +193,7 @@ pipeline {
}
}
stage('python_2_s5') {
- agent{label 'p2'}
+ agent{label " slave5 || slave15 "}
steps {
pre_test()
@@ -193,7 +207,7 @@ pipeline {
}
}
stage('python_3_s6') {
- agent{label 'p3'}
+ agent{label " slave6 || slave16 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -206,7 +220,7 @@ pipeline {
}
}
stage('test_b1_s2') {
- agent{label 'b1'}
+ agent{label " slave2 || slave12 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -217,9 +231,8 @@ pipeline {
}
}
}
-
stage('test_crash_gen_s3') {
- agent{label "b2"}
+ agent{label " slave3 || slave13 "}
steps {
pre_test()
@@ -252,13 +265,11 @@ pipeline {
./test-all.sh b2fq
date
'''
- }
-
+ }
}
}
-
stage('test_valgrind_s4') {
- agent{label "b3"}
+ agent{label " slave4 || slave14 "}
steps {
pre_test()
@@ -284,7 +295,7 @@ pipeline {
}
}
stage('test_b4_s7') {
- agent{label 'b4'}
+ agent{label " slave7 || slave17 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -303,7 +314,7 @@ pipeline {
}
}
stage('test_b5_s8') {
- agent{label 'b5'}
+ agent{label " slave8 || slave18 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -316,7 +327,7 @@ pipeline {
}
}
stage('test_b6_s9') {
- agent{label 'b6'}
+ agent{label " slave9 || slave19 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -329,7 +340,7 @@ pipeline {
}
}
stage('test_b7_s10') {
- agent{label 'b7'}
+ agent{label " slave10 || slave20 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
@@ -421,6 +432,5 @@ pipeline {
from: "support@taosdata.com"
)
}
- }
-
-}
+ }
+}
\ No newline at end of file
diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md
index db20ca4edb6513f70ebbf17969be1c20dccb6163..ecc9352ba6bb68743407c9a1013719439dedf218 100644
--- a/documentation20/cn/10.cluster/docs.md
+++ b/documentation20/cn/10.cluster/docs.md
@@ -1,6 +1,6 @@
# TDengine 集群安装、管理
-多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
+多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看《TDengine整体架构》一章。而且在安装集群之前,建议先按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。
集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
@@ -12,7 +12,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
-**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
+**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`);
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
@@ -23,23 +23,23 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
**第四步**:检查所有数据节点,以及应用程序所在物理节点的网络设置:
1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查);
-2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
+2. 每个物理节点上执行`ping host`,其中host是其他物理节点的hostname,看能否ping通其它物理节点;如果不能ping通,需要检查网络设置,或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件;
4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
-**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
+**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030,其与集群配置相关参数如下:
```
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
firstEp h1.taosdata.com:6030
-// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本配置
+// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本项
fqdn h1.taosdata.com
// 配置本数据节点的端口号,缺省是6030
serverPort 6030
-// 使用场景,请参考《Arbitrator的使用》的部分
+// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042
```
@@ -53,7 +53,7 @@ arbitrator ha.taosdata.com:6042
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
| 3 | offlineThreshold | dnode离线阈值,超过该时间将导致Dnode离线 |
| 4 | statusInterval | dnode向mnode报告状态时长 |
-| 5 | arbitrator | 系统中裁决器的end point |
+| 5 | arbitrator | 系统中裁决器的End Point |
| 6 | timezone | 时区 |
| 7 | balance | 是否启动负载均衡 |
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
@@ -87,7 +87,7 @@ taos>
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
-2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
+2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令:
```
CREATE DNODE "h2.taos.com:6030";
@@ -101,7 +101,7 @@ taos>
SHOW DNODES;
```
- 查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
+ 查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查:
- 查看该数据节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。
@@ -121,7 +121,7 @@ taos>
### 添加数据节点
-执行CLI程序taos, 使用root账号登录进系统, 执行:
+执行CLI程序taos,使用root账号登录进系统,执行:
```
CREATE DNODE "fqdn:port";
@@ -131,13 +131,13 @@ CREATE DNODE "fqdn:port";
### 删除数据节点
-执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,使用root账号登录进TDengine系统,执行:
-```
-DROP DNODE "fqdn:port";
+```mysql
+DROP DNODE "fqdn:port | dnodeID";
```
-其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
+通过"fqdn:port"或"dnodeID"来指定一个具体的节点都是可以的。其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号;dnodeID可以通过SHOW DNODES获得。
**【注意】**
@@ -147,25 +147,41 @@ DROP DNODE "fqdn:port";
- 一个数据节点被drop之后,其他节点都会感知到这个dnodeID的删除操作,任何集群中的节点都不会再接收此dnodeID的请求。
- - dnodeID的是集群自动分配的,不得人工指定。它在生成时递增的,不会重复。
+ - dnodeID是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
-### 查看数据节点
+### 手动迁移数据节点
+
+手动将某个vnode迁移到指定的dnode。
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+```mysql
+ALTER DNODE BALANCE "VNODE:-DNODE:";
```
+
+其中:source-dnodeId是源dnodeId,也就是待迁移的vnode所在的dnodeID;vgId可以通过SHOW VGROUPS获得,列表的第一列;dest-dnodeId是目标dnodeId。
+
+**【注意】**
+
+ - 只有在集群的自动负载均衡选项关闭时(balance设置为0),才允许手动迁移。
+ - 只有处于正常工作状态的vnode才能被迁移:master/slave,当处于offline/unsynced/syncing状态时,是不能迁移的。
+ - 迁移前,务必核实目标dnode的资源足够:CPU、内存、硬盘。
+
+### 查看数据节点
+
+执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+```mysql
SHOW DNODES;
```
-它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
+它将列出集群中所有的dnode,每个dnode的ID,end_point(fqdn:port),状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
### 查看虚拟节点组
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
-
-```
+执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+```mysql
SHOW VGROUPS;
```
@@ -173,9 +189,9 @@ SHOW VGROUPS;
TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。
-vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
+vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误"more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
-```
+```mysql
CREATE DATABASE demo replica 3;
```
@@ -183,20 +199,19 @@ CREATE DATABASE demo replica 3;
一个数据节点dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。
-因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
+因为vnode的引入,无法简单地给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
## Mnode的高可用性
TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
-一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
+一个集群有多个数据节点dnode,但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
-```
+```mysql
SHOW MNODES;
```
-来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
-当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
+来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
@@ -210,7 +225,7 @@ SHOW MNODES;
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
- 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。
-当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。
+当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。
**【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。**
@@ -225,7 +240,7 @@ SHOW MNODES;
## Arbitrator的使用
-如果副本数为偶数,当一个 vnode group 里一半 vnode 不工作时,是无法从中选出 master 的。同理,一半 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。
+如果副本数为偶数,当一个 vnode group 里一半或超过一半的 vnode 不工作时,是无法从中选出 master 的。同理,一半或超过一半的 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。
总之,在目前版本下,TDengine 建议在双副本环境要配置 Arbitrator,以提升系统的可用性。
@@ -235,3 +250,9 @@ Arbitrator 的执行程序名为 tarbitrator。该程序对系统资源几乎没
3. 修改每个 taosd 实例的配置文件,在 taos.cfg 里将参数 arbitrator 设置为 tarbitrator 程序所对应的 End Point。(如果该参数配置了,当副本数为偶数时,系统将自动连接配置的 Arbitrator。如果副本数为奇数,即使配置了 Arbitrator,系统也不会去建立连接。)
4. 在配置文件中配置了的 Arbitrator,会出现在 `SHOW DNODES;` 指令的返回结果中,对应的 role 列的值会是“arb”。
+
+查看集群 Arbitrator 的状态【2.0.14.0 以后支持】
+
+```mysql
+SHOW DNODES;
+```
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 496d16ec63eba6a768bac7318a0d7c0d7b0a1293..4a6eca4bb3bd855ad4a87407db01ce55b331ed9d 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -218,7 +218,8 @@ taosd -C
| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 |
| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 |
| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
-| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | |
+| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | |
+| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index c0be5cc68fa8f2fab0446aac69f3fef71d6cf1af..6a53423e9b57f1b051c20ee9277f09acd1e9b335 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -206,7 +206,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
- 说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
+ 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
@@ -953,6 +953,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
### 选择函数
+在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。
+
- **MIN**
```mysql
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index b7a2320b07b08abaa164b030507adf65ecaae3c1..89e3832007f11dc0ede00e639d75875f142b12f1 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -657,9 +657,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
index = 0;
sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_RP) {
- tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
- code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
- return code;
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
}
*str += index;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index ad91209f1833b14e86603223888df92300557f32..59978d90f0f550d9967c7a411b3f9d2a58d1e04e 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -4293,7 +4293,7 @@ static bool isValidExpr(tSqlExpr* pLeft, tSqlExpr* pRight, int32_t optr) {
if (pRight == NULL) {
return true;
}
-
+
if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && pRight->tokenId >= TK_BOOL && pRight->tokenId <= TK_BINARY) {
return false;
}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 3554b43ff6c2a8800409fdd67be7040d143627d0..0d26ec58f68b02cf7e04eccad19c1efff8f16373 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -2404,8 +2404,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
tscColumnCopy(x, pCol);
} else {
- SColumn *p = tscColumnClone(pCol);
- taosArrayPush(pNewQueryInfo->colList, &p);
+ SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
+ tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
}
}
}
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 3c904dc03473015179bff37921c2f4381f68c1bb..f9135605bb7eb2e15fd99d92125c6e93cdfc7f92 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -991,7 +991,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
- cfg.maxValue = TSDB_MAX_ALLOWED_SQL_LEN;
+ cfg.maxValue = TSDB_MAX_FIELD_LEN;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..2ae03b4e5cd92056ce0ea995c8edcd21e51e24bb
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java
@@ -0,0 +1,570 @@
+package com.taosdata.jdbc.cases;
+
+
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.Properties;
+import java.text.Format;
+import java.text.SimpleDateFormat;
+
+public class TimestampPrecisonInNanoRestTest {
+
+ private static final String host = "127.0.0.1";
+ private static final String ns_timestamp_db = "ns_precision_test";
+ private static final long timestamp1 = System.currentTimeMillis();
+ private static final long timestamp2 = timestamp1 * 1000_000 + 123455;
+ private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456;
+ private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ private static final String date1 = format.format(new Date(timestamp1));
+ private static final String date4 = format.format(new Date(timestamp1 + 10L));
+ private static final String date2 = date1 + "123455";
+ private static final String date3 = date4 + "123456";
+
+
+ private static Connection conn;
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
+ conn = DriverManager.getConnection(url, properties);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + ns_timestamp_db);
+ stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
+ stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
+ stmt.close();
+ }
+
+ @After
+ public void afterEach() throws SQLException {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + ns_timestamp_db);
+ stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
+ stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
+ stmt.close();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ try {
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void checkCount(long count, ResultSet rs) throws SQLException {
+ if (count == 0) {
+ Assert.fail();
+ }
+ rs.next();
+ long test_count = rs.getLong(1);
+ Assert.assertEquals(count, test_count);
+ }
+
+ private void checkTime(long ts, ResultSet rs) throws SQLException {
+ rs.next();
+ int nanos = rs.getTimestamp(1).getNanos();
+ Assert.assertEquals(ts % 1000_000_000l, nanos);
+ long test_ts = rs.getLong(1);
+ Assert.assertEquals(ts / 1000_000l, test_ts);
+ }
+
+ @Test
+ public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'");
+ checkTime(timestamp3, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
+ checkTime(timestamp1 * 1000_000l + 123123l, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'");
+ checkTime(timestamp1 * 1000_000l + 123123l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'");
+ checkTime(timestamp2, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ long timestamp4 = timestamp1 * 1000_000 + 123123;
+ stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
+ checkTime(timestamp4, rs);
+ rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'");
+ checkTime(timestamp4, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectLastRowFromWeatherForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectLastRowFromWeatherForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectFirstRowFromWeatherForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canSelectFirstRowFromWeatherForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'");
+ checkCount(2l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInDateTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInNumberTypeForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryBetweenAndInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'");
+ checkTime(timestamp3, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualToInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualToInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualInDateTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canQueryNotEqualInNumberTypeForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
+ checkCount(1l, rs);
+ rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'");
+ checkTime(timestamp2, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather");
+ checkCount(3l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
+ rs.next();
+ long sum = rs.getLong(2);
+ Assert.assertEquals(127l, sum);
+ rs.next();
+ sum = rs.getLong(2);
+ Assert.assertEquals(128l, sum);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
+ rs.next();
+ long sum = rs.getLong(2);
+ Assert.assertEquals(127l, sum);
+ rs.next();
+ sum = rs.getLong(2);
+ Assert.assertEquals(128l, sum);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testDataOutOfRangeExceptionForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)");
+ } catch (SQLException e) {
+ Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testDataOutOfRangeExceptionForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)");
+ } catch (SQLException e) {
+ Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
+ }
+ }
+
+ @Test
+ public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)");
+ ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'");
+ checkCount(1l, rs);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js
index 0fc8dc8ef1a057c7e410956a2b68072e65cbb613..3bc0fe0aca060a32daa7a5cebd2dbfb99ac29a7c 100644
--- a/src/connector/nodejs/nodetaos/taosobjects.js
+++ b/src/connector/nodejs/nodetaos/taosobjects.js
@@ -47,7 +47,8 @@ class TaosTimestamp extends Date {
super(Math.floor(date / 1000));
this.precisionExtras = date % 1000;
} else if (precision === 2) {
- super(parseInt(date / 1000000));
+ // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
+ super(parseInt(BigInt(date) / 1000000n));
// use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
this.precisionExtras = parseInt(BigInt(date) % 1000000n);
} else {
diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json
index db37318a164c6207432ebb64defb608381d2cb49..6a2c66100b3d1921b3ce8997e70d33f024e5c3f2 100644
--- a/src/connector/nodejs/package.json
+++ b/src/connector/nodejs/package.json
@@ -1,6 +1,6 @@
{
"name": "td2.0-connector",
- "version": "2.0.9",
+ "version": "2.0.10",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {
diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c
index ee37ffdcbb90f710253d051f1d4895ee8bc26dea..2f77788025e6d5f36460ceb866b64d54736af6a1 100644
--- a/src/dnode/src/dnodeSystem.c
+++ b/src/dnode/src/dnodeSystem.c
@@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) {
}
} else if (strcmp(argv[i], "-C") == 0) {
dump_config = 1;
+ } else if (strcmp(argv[i], "--force-keep-file") == 0) {
+ tsdbForceKeepFile = true;
} else if (strcmp(argv[i], "--compact-mnode-wal") == 0) {
tsCompactMnodeWal = 1;
} else if (strcmp(argv[i], "-V") == 0) {
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index a74a531361e64126ad922660d0fd57fc7a8693d5..1767b25402f0b33bce6068568810b499768dd0ba 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -471,6 +471,7 @@ typedef struct {
bool stableQuery; // super table query or not
bool topBotQuery; // TODO used bitwise flag
+ bool interpQuery; // interp query or not
bool groupbyColumn; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
bool timeWindowInterpo;// if the time window start/end required interpolation
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 57c1271d07e633148d089be8f107003e8062476c..596a6f9df56eaf2931cad51a4aeb9029add3e48f 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -75,6 +75,7 @@ extern char configDir[];
#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN
#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
+
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 20
#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
@@ -245,7 +246,6 @@ typedef struct SArguments_S {
uint32_t disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms, us or ns. accordig to database precision
uint32_t method_of_delete;
- char ** arg_list;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
bool demo_mode; // use default column name and semi-random data
@@ -637,7 +637,6 @@ SArguments g_args = {
0, // disorderRatio
1000, // disorderRange
1, // method_of_delete
- NULL, // arg_list
0, // totalInsertRows;
0, // totalAffectedRows;
true, // demo_mode;
@@ -1011,6 +1010,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->datatype[0] = argv[i];
+ arguments->datatype[1] = NULL;
} else {
// more than one col
int index = 0;
@@ -1415,6 +1415,7 @@ static char *rand_float_str()
return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN);
}
+
static float rand_float()
{
static int cursor;
@@ -6409,6 +6410,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
bool flagSleep = true;
uint64_t sleepTimeTotal = 0;
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
st = taosGetTimestampMs();
@@ -6585,6 +6589,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalAffectedRows += affectedRows;
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
@@ -6606,6 +6615,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
}
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
free_of_interlace:
tmfree(pThreadInfo->buffer);
@@ -6643,6 +6654,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
for (uint64_t tableSeq = pThreadInfo->start_table_from;
tableSeq <= pThreadInfo->end_table_to;
tableSeq ++) {
@@ -6748,6 +6762,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->totalAffectedRows += affectedRows;
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
@@ -6770,6 +6789,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__, pThreadInfo->samplePos);
}
} // tableSeq
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
free_of_progressive:
tmfree(pThreadInfo->buffer);
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 5fe22826b7d0a2270300bacc3d1ae8f59d346a54..570f5c344b624eea1f23fd13f11bfc6e230c61d5 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -253,11 +253,15 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
int32_t connId = htonl(pHBMsg->connId);
SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort);
+ if (pConn == NULL) {
+ pHBMsg->pid = htonl(pHBMsg->pid);
+ pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName);
+ }
if (pConn == NULL) {
// do not close existing links, otherwise
// mError("failed to create connId, close connect");
- // pRsp->killConnection = 1;
+ // pRsp->killConnection = 1;
} else {
pRsp->connId = htonl(pConn->connId);
mnodeSaveQueryStreamList(pConn, pHBMsg);
diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c
index c0699b05b364927492b8c2656bead0e14d46ab5a..9a993dfaafab725847a43097497287fbe5642511 100644
--- a/src/mnode/src/mnodeWrite.c
+++ b/src/mnode/src/mnodeWrite.c
@@ -65,7 +65,14 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_MSG_NOT_PROCESSED;
}
- int32_t code = mnodeInitMsg(pMsg);
+ int32_t code = grantCheck(TSDB_GRANT_TIME);
+ if (code != TSDB_CODE_SUCCESS) {
+ mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
+ tstrerror(code));
+ return code;
+ }
+
+ code = mnodeInitMsg(pMsg);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
tstrerror(code));
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 996d9257560be8587f0e6ccf9d5d5b0ea4bbc0e1..56fab57e26227212ca6d2502fc7e035e2af258d5 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -333,6 +333,8 @@ enum OPERATOR_TYPE_E {
OP_Distinct = 20,
OP_Join = 21,
OP_StateWindow = 22,
+ OP_AllTimeWindow = 23,
+ OP_AllMultiTableTimeInterval = 24,
};
typedef struct SOperatorInfo {
@@ -554,11 +556,13 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
+SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
+SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h
index d2802d9fe00be74f750abd39b81cc3585bcef773..ce607f0fe20a2743579e99e71ddf78fc2e1dbcdc 100644
--- a/src/query/inc/qUtil.h
+++ b/src/query/inc/qUtil.h
@@ -39,7 +39,6 @@
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
-#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1)
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr);
@@ -60,6 +59,7 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t
void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr);
void* freeColumnInfo(SColumnInfo* pColumnInfo, int32_t numOfCols);
+int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable);
static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) {
assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size);
@@ -70,7 +70,7 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage*
int32_t offset) {
assert(rowOffset >= 0 && pQueryAttr != NULL);
- int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery);
+ int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery);
return ((char *)page->data) + rowOffset + offset * numOfRows;
}
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index dad05df22a5d85ec2e00911c9060fc19d6e0cb42..8d7f52eb2604b6cead74c5cadbd2afcc331c0490 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -3708,27 +3708,59 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
}
} else {
// no data generated yet
- if (pCtx->size == 1) {
+ if (pCtx->size < 1) {
return;
}
// check the timestamp in input buffer
TSKEY skey = GET_TS_DATA(pCtx, 0);
- TSKEY ekey = GET_TS_DATA(pCtx, 1);
-
- // no data generated yet
- if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
- return;
- }
-
- assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
if (type == TSDB_FILL_PREV) {
+ if (skey > pCtx->startTs) {
+ return;
+ }
+
+ if (pCtx->size > 1) {
+ TSKEY ekey = GET_TS_DATA(pCtx, 1);
+ if (ekey > skey && ekey <= pCtx->startTs) {
+ skey = ekey;
+ }
+ }
assignVal(pCtx->pOutput, pCtx->pInput, pCtx->outputBytes, pCtx->inputType);
} else if (type == TSDB_FILL_NEXT) {
- char* val = ((char*)pCtx->pInput) + pCtx->inputBytes;
+ TSKEY ekey = skey;
+ char* val = NULL;
+
+ if (ekey < pCtx->startTs) {
+ if (pCtx->size > 1) {
+ ekey = GET_TS_DATA(pCtx, 1);
+ if (ekey < pCtx->startTs) {
+ return;
+ }
+
+ val = ((char*)pCtx->pInput) + pCtx->inputBytes;
+ } else {
+ return;
+ }
+ } else {
+ val = (char*)pCtx->pInput;
+ }
+
assignVal(pCtx->pOutput, val, pCtx->outputBytes, pCtx->inputType);
} else if (type == TSDB_FILL_LINEAR) {
+ if (pCtx->size <= 1) {
+ return;
+ }
+
+ TSKEY ekey = GET_TS_DATA(pCtx, 1);
+
+ // no data generated yet
+ if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
+ return;
+ }
+
+ assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
+
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@@ -4047,9 +4079,9 @@ void block_func_merge(SQLFunctionCtx* pCtx) {
STableBlockDist info = {0};
int32_t len = *(int32_t*) pCtx->pInput;
blockDistInfoFromBinary(((char*)pCtx->pInput) + sizeof(int32_t), len, &info);
-
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
mergeTableBlockDist(pResInfo, &info);
+ taosArrayDestroy(info.dataBlockInfos);
pResInfo->numOfRes = 1;
pResInfo->hasResult = DATA_SET_FLAG;
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 22e4f87ef98dd1d8fbca8016e3e7f781eb8e84ff..f93163665188c3e57d1bcdbce73bfae8020858fb 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -448,6 +448,44 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim
pResultRowInfo->capacity = (int32_t)newCapacity;
}
+static bool chkResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData,
+ int16_t bytes, bool masterscan, uint64_t uid) {
+ bool existed = false;
+ SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid);
+
+ SResultRow **p1 =
+ (SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+
+ // in case of repeat scan/reverse scan, no new time window added.
+ if (QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQueryAttr)) {
+ if (!masterscan) { // the *p1 may be NULL in case of sliding+offset exists.
+ return p1 != NULL;
+ }
+
+ if (p1 != NULL) {
+ if (pResultRowInfo->size == 0) {
+ existed = false;
+ assert(pResultRowInfo->curPos == -1);
+ } else if (pResultRowInfo->size == 1) {
+ existed = (pResultRowInfo->pResult[0] == (*p1));
+ } else { // check if current pResultRowInfo contains the existed pResultRow
+ SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid, pResultRowInfo);
+ int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
+ if (index != NULL) {
+ existed = true;
+ } else {
+ existed = false;
+ }
+ }
+ }
+
+ return existed;
+ }
+
+ return p1 != NULL;
+}
+
+
static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, int64_t tid,
char* pData, int16_t bytes, bool masterscan, uint64_t tableGroupId) {
bool existed = false;
@@ -592,6 +630,35 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t
return w;
}
+// get the correct time window according to the handled timestamp
+static STimeWindow getCurrentActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) {
+ STimeWindow w = {0};
+
+ if (pResultRowInfo->curPos == -1) { // the first window, from the previous stored value
+ getInitialStartTimeWindow(pQueryAttr, ts, &w);
+
+ if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') {
+ w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1;
+ } else {
+ w.ekey = w.skey + pQueryAttr->interval.interval - 1;
+ }
+ } else {
+ w = getResultRow(pResultRowInfo, pResultRowInfo->curPos)->win;
+ }
+
+ /*
+ * query border check, skey should not be bounded by the query time range, since the value skey will
+ * be used as the time window index value. So we only change ekey of time window accordingly.
+ */
+ if (w.ekey > pQueryAttr->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) {
+ w.ekey = pQueryAttr->window.ekey;
+ }
+
+ return w;
+}
+
+
+
// a new buffer page for each table. Needs to opt this design
static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf *pResultBuf, int32_t tid, uint32_t size) {
if (pWindowRes->pageId != -1) {
@@ -637,6 +704,14 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf
return 0;
}
+static bool chkWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win,
+ bool masterscan, SResultRow **pResult, int64_t groupId, SQLFunctionCtx* pCtx,
+ int32_t numOfOutput, int32_t* rowCellInfoOffset) {
+ assert(win->skey <= win->ekey);
+
+ return chkResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, groupId);
+}
+
static int32_t setResultOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, STimeWindow *win,
bool masterscan, SResultRow **pResult, int64_t tableGroupId, SQLFunctionCtx* pCtx,
int32_t numOfOutput, int32_t* rowCellInfoOffset) {
@@ -707,7 +782,7 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se
}
}
- assert(forwardStep > 0);
+ assert(forwardStep >= 0);
return forwardStep;
}
@@ -764,6 +839,8 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey,
pResultRowInfo->curPos = i + 1; // current not closed result object
}
}
+
+ //pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey;
}
static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQueryAttr* pQueryAttr, TSKEY lastKey) {
@@ -813,7 +890,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
}
}
- assert(num > 0);
+ assert(num >= 0);
return num;
}
@@ -973,6 +1050,11 @@ static int32_t getNextQualifiedWindow(SQueryAttr* pQueryAttr, STimeWindow *pNext
}
}
+ /* interp query with fill should not skip time window */
+ if (pQueryAttr->pointInterpQuery && pQueryAttr->fillType != TSDB_FILL_NONE) {
+ return startPos;
+ }
+
/*
* This time window does not cover any data, try next time window,
* this case may happen when the time window is too small
@@ -1485,6 +1567,82 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey);
}
+
+static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) {
+ STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info;
+
+ SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv;
+ int32_t numOfOutput = pOperatorInfo->numOfOutput;
+ SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
+
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
+ bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
+
+ TSKEY* tsCols = NULL;
+ if (pSDataBlock->pDataBlock != NULL) {
+ SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, 0);
+ tsCols = (int64_t*) pColDataInfo->pData;
+ assert(tsCols[0] == pSDataBlock->info.window.skey &&
+ tsCols[pSDataBlock->info.rows - 1] == pSDataBlock->info.window.ekey);
+ }
+
+ int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1);
+ TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows);
+
+ STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr);
+ bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
+
+ SResultRow* pResult = NULL;
+ int32_t forwardStep = 0;
+ int32_t ret = 0;
+
+ while (1) {
+ // null data, failed to allocate more memory buffer
+ ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult,
+ tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
+ if (ret != TSDB_CODE_SUCCESS) {
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ TSKEY ekey = reviseWindowEkey(pQueryAttr, &win);
+ forwardStep = getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true);
+
+ // window start(end) key interpolation
+ doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+
+ int32_t prevEndPos = (forwardStep - 1) * step + startPos;
+ startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
+ if (startPos < 0) {
+ if (win.skey <= pQueryAttr->window.ekey) {
+ int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
+ pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ startPos = pSDataBlock->info.rows - 1;
+
+ // window start(end) key interpolation
+ doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ }
+
+ break;
+ }
+ setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
+ }
+
+ if (pQueryAttr->timeWindowInterpo) {
+ int32_t rowIndex = ascQuery? (pSDataBlock->info.rows-1):0;
+ saveDataBlockLastRow(pRuntimeEnv, &pSDataBlock->info, pSDataBlock->pDataBlock, rowIndex);
+ }
+
+ updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey);
+}
+
+
+
static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
STableQueryInfo* item = pRuntimeEnv->current;
@@ -1981,6 +2139,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
break;
}
+ case OP_AllMultiTableTimeInterval: {
+ pRuntimeEnv->proot =
+ createAllMultiTableTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ break;
+ }
case OP_TimeWindow: {
pRuntimeEnv->proot =
createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
@@ -1990,6 +2154,15 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
break;
}
+ case OP_AllTimeWindow: {
+ pRuntimeEnv->proot =
+ createAllTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
+ int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
+ if (opType != OP_DummyInput && opType != OP_Join) {
+ setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot);
+ }
+ break;
+ }
case OP_Groupby: {
pRuntimeEnv->proot =
createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
@@ -2533,7 +2706,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
int32_t MIN_ROWS_PER_PAGE = 4;
- *rowsize = (int32_t)(pQueryAttr->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
+ *rowsize = (int32_t)(pQueryAttr->resultRowSize * getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
int32_t overhead = sizeof(tFilePage);
// one page contains at least two rows
@@ -2907,6 +3080,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
// check if this data block is required to load
if ((*status) != BLK_DATA_ALL_NEEDED) {
+ bool needFilter = true;
+
// the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet,
// the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer
if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
@@ -2916,10 +3091,16 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey;
STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr);
- if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
- pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
- pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ if (pQueryAttr->pointInterpQuery) {
+ needFilter = chkWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId,
+ pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
+ pTableScanInfo->rowCellInfoOffset);
+ } else {
+ if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId,
+ pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
+ pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) {
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
}
} else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate
doSetTableGroupOutputBuf(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx,
@@ -2927,7 +3108,11 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
pRuntimeEnv->current->groupIndex);
}
- (*status) = doFilterByBlockTimeWindow(pTableScanInfo, pBlock);
+ if (needFilter) {
+ (*status) = doFilterByBlockTimeWindow(pTableScanInfo, pBlock);
+ } else {
+ (*status) = BLK_DATA_ALL_NEEDED;
+ }
}
SDataBlockInfo* pBlockInfo = &pBlock->info;
@@ -3437,7 +3622,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
// re-estabilish output buffer pointer.
int32_t functionId = pBInfo->pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
- pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[0].pOutput;
+ pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
}
}
}
@@ -4538,6 +4723,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
pQueryAttr->tsdb = tsdb;
+
if (tsdb != NULL) {
int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery);
if (code != TSDB_CODE_SUCCESS) {
@@ -4946,7 +5132,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
pTableScanInfo->pCtx = pAggInfo->binfo.pCtx;
pTableScanInfo->pResultRowInfo = &pAggInfo->binfo.resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pAggInfo->binfo.rowCellInfoOffset;
- } else if (pDownstream->operatorType == OP_TimeWindow) {
+ } else if (pDownstream->operatorType == OP_TimeWindow || pDownstream->operatorType == OP_AllTimeWindow) {
STableIntervalOperatorInfo *pIntervalInfo = pDownstream->info;
pTableScanInfo->pCtx = pIntervalInfo->pCtx;
@@ -4960,7 +5146,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
pTableScanInfo->pResultRowInfo = &pGroupbyInfo->binfo.resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pGroupbyInfo->binfo.rowCellInfoOffset;
- } else if (pDownstream->operatorType == OP_MultiTableTimeInterval) {
+ } else if (pDownstream->operatorType == OP_MultiTableTimeInterval || pDownstream->operatorType == OP_AllMultiTableTimeInterval) {
STableIntervalOperatorInfo *pInfo = pDownstream->info;
pTableScanInfo->pCtx = pInfo->pCtx;
@@ -5104,7 +5290,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
pInfo->resultRowFactor =
- (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false));
+ (int32_t)(getRowNumForMultioutput(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false));
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
@@ -5579,6 +5765,66 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes;
}
+static SSDataBlock* doAllIntervalAgg(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ STableIntervalOperatorInfo* pIntervalInfo = pOperator->info;
+
+ SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
+
+ if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
+ pOperator->status = OP_EXEC_DONE;
+ }
+
+ return pIntervalInfo->pRes;
+ }
+
+ SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
+ int32_t order = pQueryAttr->order.order;
+ STimeWindow win = pQueryAttr->window;
+
+ SOperatorInfo* upstream = pOperator->upstream[0];
+
+ while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ if (pBlock == NULL) {
+ break;
+ }
+
+ setTagValue(pOperator, pRuntimeEnv->current->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput);
+
+ // the pDataBlock are always the same one, no need to call this again
+ setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order);
+ hashAllIntervalAgg(pOperator, &pIntervalInfo->resultRowInfo, pBlock, 0);
+ }
+
+ // restore the value
+ pQueryAttr->order.order = order;
+ pQueryAttr->window = win;
+
+ pOperator->status = OP_RES_TO_RETURN;
+ closeAllResultRows(&pIntervalInfo->resultRowInfo);
+ setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
+ finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset);
+
+ initGroupResInfo(&pRuntimeEnv->groupResInfo, &pIntervalInfo->resultRowInfo);
+ toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
+
+ if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
+ pOperator->status = OP_EXEC_DONE;
+ }
+
+ return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes;
+}
+
static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -5634,6 +5880,63 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
return pIntervalInfo->pRes;
}
+static SSDataBlock* doAllSTableIntervalAgg(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ STableIntervalOperatorInfo* pIntervalInfo = pOperator->info;
+ SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset);
+ if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) {
+ pOperator->status = OP_EXEC_DONE;
+ }
+
+ return pIntervalInfo->pRes;
+ }
+
+ SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
+ int32_t order = pQueryAttr->order.order;
+
+ SOperatorInfo* upstream = pOperator->upstream[0];
+
+ while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ if (pBlock == NULL) {
+ break;
+ }
+
+ // the pDataBlock are always the same one, no need to call this again
+ STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
+
+ setTagValue(pOperator, pTableQueryInfo->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput);
+ setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order);
+ setIntervalQueryRange(pRuntimeEnv, pBlock->info.window.skey);
+
+ hashAllIntervalAgg(pOperator, &pTableQueryInfo->resInfo, pBlock, pTableQueryInfo->groupIndex);
+ }
+
+ pOperator->status = OP_RES_TO_RETURN;
+ pQueryAttr->order.order = order; // TODO : restore the order
+ doCloseAllTimeWindow(pRuntimeEnv);
+ setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
+
+ copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset);
+ if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) {
+ pOperator->status = OP_EXEC_DONE;
+ }
+
+ return pIntervalInfo->pRes;
+}
+
+
+
static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
@@ -6016,7 +6319,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo));
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
- int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
+ int32_t numOfRows = (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery));
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows);
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
@@ -6255,6 +6558,32 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp
appendUpstream(pOperator, upstream);
return pOperator;
}
+
+
+SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
+ STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo));
+
+ pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset);
+ pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
+ initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT);
+
+ SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
+
+ pOperator->name = "AllTimeIntervalAggOperator";
+ pOperator->operatorType = OP_AllTimeWindow;
+ pOperator->blockingOptr = true;
+ pOperator->status = OP_IN_EXECUTING;
+ pOperator->pExpr = pExpr;
+ pOperator->numOfOutput = numOfOutput;
+ pOperator->info = pInfo;
+ pOperator->pRuntimeEnv = pRuntimeEnv;
+ pOperator->exec = doAllIntervalAgg;
+ pOperator->cleanup = destroyBasicOperatorInfo;
+
+ appendUpstream(pOperator, upstream);
+ return pOperator;
+}
+
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
pInfo->colIndex = -1;
@@ -6277,7 +6606,6 @@ SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpe
appendUpstream(pOperator, upstream);
return pOperator;
-
}
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo));
@@ -6329,6 +6657,32 @@ SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRunti
return pOperator;
}
+SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
+ STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo));
+
+ pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset);
+ pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
+ initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT);
+
+ SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
+ pOperator->name = "AllMultiTableTimeIntervalOperator";
+ pOperator->operatorType = OP_AllMultiTableTimeInterval;
+ pOperator->blockingOptr = true;
+ pOperator->status = OP_IN_EXECUTING;
+ pOperator->pExpr = pExpr;
+ pOperator->numOfOutput = numOfOutput;
+ pOperator->info = pInfo;
+ pOperator->pRuntimeEnv = pRuntimeEnv;
+
+ pOperator->exec = doAllSTableIntervalAgg;
+ pOperator->cleanup = destroyBasicOperatorInfo;
+
+ appendUpstream(pOperator, upstream);
+
+ return pOperator;
+}
+
+
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo));
pInfo->colIndex = -1; // group by column index
@@ -6339,7 +6693,7 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize *
- (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)));
+ (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)));
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index 7dd73c9fe48be39ee3a7a879348076ac3fbe9f44..1a86bbae36697224585522b5be836c61394c7cc4 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else {
assert(pFillInfo->currentKey == ts);
initBeforeAfterDataBuf(pFillInfo, prev);
+ if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) {
+ initBeforeAfterDataBuf(pFillInfo, next);
+ ++pFillInfo->index;
+ copyCurrentRowIntoBuf(pFillInfo, srcData, *next);
+ --pFillInfo->index;
+ }
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
@@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
assignVal(output, src, pCol->col.bytes, pCol->col.type);
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
+ } else if (pFillInfo->type == TSDB_FILL_NEXT) {
+ if (*next) {
+ assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type);
+ } else {
+ setNull(output, pCol->col.type, pCol->col.bytes);
+ }
} else {
assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
}
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index e01f41276fb73e9293ba5e3b379c85600053ed93..b8a5ee7699b34fed82ad67a592a6ca9148cc92cb 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -567,10 +567,18 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
} else if (pQueryAttr->interval.interval > 0) {
if (pQueryAttr->stableQuery) {
- op = OP_MultiTableTimeInterval;
+ if (pQueryAttr->pointInterpQuery) {
+ op = OP_AllMultiTableTimeInterval;
+ } else {
+ op = OP_MultiTableTimeInterval;
+ }
taosArrayPush(plan, &op);
- } else {
- op = OP_TimeWindow;
+ } else {
+ if (pQueryAttr->pointInterpQuery) {
+ op = OP_AllTimeWindow;
+ } else {
+ op = OP_TimeWindow;
+ }
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
@@ -578,7 +586,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
}
- if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
+ if (pQueryAttr->fillType != TSDB_FILL_NONE) {
op = OP_Fill;
taosArrayPush(plan, &op);
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index a3d2e424d23e5ee566bc54117d3fc421d5b42d78..4caf351799adbf000265566fb22617067efb725d 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -30,6 +30,18 @@ typedef struct SCompSupporter {
int32_t order;
} SCompSupporter;
+int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) {
+ if (pQueryAttr && (!stable)) {
+ for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
+ if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) {
+ return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64;
+ }
+ }
+ }
+
+ return 1;
+}
+
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) {
int32_t size = 0;
diff --git a/src/query/tests/rangeMergeTest.cpp b/src/query/tests/rangeMergeTest.cpp
index e65508a300c7823dfc0127ba4a0881a5c7591f3e..f7fc558ccfa71ffc186725ea820e81eb90045d7b 100644
--- a/src/query/tests/rangeMergeTest.cpp
+++ b/src/query/tests/rangeMergeTest.cpp
@@ -330,7 +330,7 @@ void intDataTest() {
filterAddRange(h, ra + i, TSDB_RELATION_AND);
}
filterGetRangeNum(h, &num);
- ASSERT_EQ(num, 0);
+ ASSERT_EQ(num, 1);
filterFreeRangeCtx(h);
diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c
index 0449ecac8b228662455930b8caf7ff2b5a2da7b2..25495182498f7c1a82f9f9459290e44f082f5eb2 100644
--- a/src/rpc/src/rpcTcp.c
+++ b/src/rpc/src/rpcTcp.c
@@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
SThreadObj *pThreadObj = pClientObj->pThreadObj[index];
SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
+#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
+ if (fd == (SOCKET)-1) return NULL;
+#else
if (fd <= 0) return NULL;
+#endif
struct sockaddr_in sin;
uint16_t localPort = 0;
diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h
index 9a8de01f71331d091e75fedac61cc333dca165cc..51801c843c279f10e9e0895a0f2dee2839a3f6a2 100644
--- a/src/tsdb/inc/tsdbMeta.h
+++ b/src/tsdb/inc/tsdbMeta.h
@@ -24,8 +24,7 @@ typedef struct STable {
tstr* name; // NOTE: there a flexible string here
uint64_t suid;
struct STable* pSuper; // super table pointer
- uint8_t numOfSchemas;
- STSchema* schema[TSDB_MAX_TABLE_SCHEMAS];
+ SArray* schema;
STSchema* tagSchema;
SKVRow tagVal;
SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index
@@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
if (lock) TSDB_RLOCK_TABLE(pDTable);
if (_version < 0) { // get the latest version of schema
- pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
+ pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema);
} else { // get the schema with version
- void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
- tsdbCompareSchemaVersion, TD_EQ);
+ void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index e53d2826c76acb057020b05bfeba4e22cf128c51..68450301d8f0c8536327e593d87030920f27ff49 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -37,6 +37,8 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired);
static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo);
+// For backward compatibility
+bool tsdbForceKeepFile = false;
// ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0;
@@ -1048,6 +1050,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) {
return -1;
}
+ if (tsdbForceKeepFile) {
+ struct stat tfstat;
+
+ // Get real file size
+ if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ tsdbCloseMFile(pfs->cstatus->pmf);
+ tfsClosedir(tdir);
+ regfree(®ex);
+ return -1;
+ }
+
+ if (pfs->cstatus->pmf->info.size != tfstat.st_size) {
+ int64_t tfsize = pfs->cstatus->pmf->info.size;
+ pfs->cstatus->pmf->info.size = tfstat.st_size;
+ tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
+ TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size);
+ }
+ }
+
tsdbCloseMFile(pfs->cstatus->pmf);
}
} else if (code == REG_NOMATCH) {
@@ -1212,6 +1234,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
return -1;
}
+ if (tsdbForceKeepFile) {
+ struct stat tfstat;
+
+ // Get real file size
+ if (fstat(pDFile->fd, &tfstat) < 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ taosArrayDestroy(fArray);
+ return -1;
+ }
+
+ if (pDFile->info.size != tfstat.st_size) {
+ int64_t tfsize = pDFile->info.size;
+ pDFile->info.size = tfstat.st_size;
+ tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo),
+ TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size);
+ }
+ }
+
tsdbCloseDFile(pDFile);
index++;
}
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 21150c66e21cf2488981bd9d475f333073e7aa22..96e86a6d99ce05624d72a557f112fa1aa0919e1f 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -43,6 +43,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
+static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
+static void tsdbFreeTableSchema(STable *pTable);
// ------------------ OUTER FUNCTIONS ------------------
int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
@@ -722,17 +724,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
STsdbMeta *pMeta = pRepo->tsdbMeta;
STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
- ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1]));
+ ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema)));
TSDB_WLOCK_TABLE(pCTable);
- if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
- pCTable->schema[pCTable->numOfSchemas++] = pSchema;
- } else {
- ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
- tdFreeSchema(pCTable->schema[0]);
- memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
- pCTable->schema[pCTable->numOfSchemas - 1] = pSchema;
- }
+ tsdbAddSchema(pCTable, pSchema);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
@@ -828,9 +823,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
TABLE_TID(pTable) = -1;
TABLE_SUID(pTable) = -1;
pTable->pSuper = NULL;
- pTable->numOfSchemas = 1;
- pTable->schema[0] = tdDupSchema(pCfg->schema);
- if (pTable->schema[0] == NULL) {
+ if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
@@ -841,7 +834,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
pTable->tagVal = NULL;
STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN);
- pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey);
+ pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL,
+ SL_ALLOW_DUP_KEY, getTagIndexKey);
if (pTable->pIndex == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
@@ -870,9 +864,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
} else {
TABLE_SUID(pTable) = -1;
- pTable->numOfSchemas = 1;
- pTable->schema[0] = tdDupSchema(pCfg->schema);
- if (pTable->schema[0] == NULL) {
+ if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
@@ -906,9 +898,7 @@ static void tsdbFreeTable(STable *pTable) {
TABLE_UID(pTable));
tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
- for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
- tdFreeSchema(pTable->schema[i]);
- }
+ tsdbFreeTableSchema(pTable);
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tdFreeSchema(pTable->tagSchema);
@@ -1260,9 +1250,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable));
tlen += tdEncodeKVRow(buf, pTable->tagVal);
} else {
- tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas);
- for (int i = 0; i < pTable->numOfSchemas; i++) {
- tlen += tdEncodeSchema(buf, pTable->schema[i]);
+ tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema));
+ for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) {
+ STSchema *pSchema = taosArrayGetP(pTable->schema, i);
+ tlen += tdEncodeSchema(buf, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
@@ -1293,9 +1284,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable));
buf = tdDecodeKVRow(buf, &(pTable->tagVal));
} else {
- buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas));
- for (int i = 0; i < pTable->numOfSchemas; i++) {
- buf = tdDecodeSchema(buf, &(pTable->schema[i]));
+ uint8_t nSchemas;
+ buf = taosDecodeFixedU8(buf, &nSchemas);
+ for (int i = 0; i < nSchemas; i++) {
+ STSchema *pSchema;
+ buf = tdDecodeSchema(buf, &pSchema);
+ tsdbAddSchema(pTable, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
@@ -1457,3 +1451,38 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
return 0;
}
+
+static int tsdbAddSchema(STable *pTable, STSchema *pSchema) {
+ ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE);
+
+ if (pTable->schema == NULL) {
+ pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *));
+ if (pTable->schema == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+ }
+
+ ASSERT(taosArrayGetSize(pTable->schema) == 0 ||
+ schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema)));
+
+ if (taosArrayPush(pTable->schema, &pSchema) == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void tsdbFreeTableSchema(STable *pTable) {
+ ASSERT(pTable != NULL);
+
+ if (pTable->schema) {
+ for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) {
+ STSchema *pSchema = taosArrayGetP(pTable->schema, i);
+ tdFreeSchema(pSchema);
+ }
+
+ taosArrayDestroy(pTable->schema);
+ }
+}
\ No newline at end of file
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 716f82d1545d5aa3adb257dc3ed8ea3047acb3ca..e1d40aa7d046c4fce79d76bcbea36ee3f635163a 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -2693,7 +2693,7 @@ static void destroyHelper(void* param) {
free(param);
}
-static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
+static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
if (pQueryHandle->checkFiles) {
// check if the query range overlaps with the file data block
bool exists = true;
diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h
index d03ce6e0f1f34478951a84b2ab18020f5cbec92b..f146ec0b8b675527b41dfb2267946193e5e5fe89 100644
--- a/src/util/inc/tconfig.h
+++ b/src/util/inc/tconfig.h
@@ -81,6 +81,7 @@ typedef struct {
extern SGlobalCfg tsGlobalConfig[];
extern int32_t tsGlobalConfigNum;
extern char * tsCfgStatusStr[];
+extern bool tsdbForceKeepFile;
void taosReadGlobalLogCfg();
bool taosReadGlobalCfg();
diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c
index 7577451f8830b4f981656b4237dcb3e188ab16b4..a3c01d2be79074203744d0028d7e8dd143de9c24 100644
--- a/src/util/src/tcompare.c
+++ b/src/util/src/tcompare.c
@@ -280,25 +280,26 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
-
- char pattern[128] = {0};
+
+ assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN);
+ char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
- assert(varDataLen(pRight) < 128);
size_t sz = varDataLen(pLeft);
- char *buf = malloc(sz + 1);
- memcpy(buf, varDataVal(pLeft), sz);
+ char *buf = malloc(sz + 1);
+ memcpy(buf, varDataVal(pLeft), sz);
buf[sz] = 0;
int32_t ret = patternMatch(pattern, buf, sz, &pInfo);
free(buf);
+ free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
int32_t taosArrayCompareString(const void* a, const void* b) {
const char* x = *(const char**)a;
const char* y = *(const char**)b;
-
+
return compareLenPrefixedStr(x, y);
}
@@ -307,19 +308,19 @@ int32_t taosArrayCompareString(const void* a, const void* b) {
// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1;
//}
int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
- return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
+ return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
}
int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
- wchar_t pattern[128] = {0};
- assert(TSDB_PATTERN_STRING_MAX_LEN < 128);
+ assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
+ wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
- assert(varDataLen(pRight) < 128);
-
+
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
+ free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh
index 5b2c860122c1719d9cb3d1555580a786e0481da3..d4853c082599151ed5764674d9be5965f7e6a0f4 100755
--- a/tests/perftest-scripts/perftest-query.sh
+++ b/tests/perftest-scripts/perftest-query.sh
@@ -101,7 +101,14 @@ function runQueryPerfTest {
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT
+ echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT
+
+ echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
+ python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT
+
+ echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT
+ python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT
}
diff --git a/tests/pytest/alter/alterColMultiTimes.py b/tests/pytest/alter/alterColMultiTimes.py
new file mode 100644
index 0000000000000000000000000000000000000000..173ca8158deeb1e98bd3fc5ff3b942b20b8bc26e
--- /dev/null
+++ b/tests/pytest/alter/alterColMultiTimes.py
@@ -0,0 +1,67 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import string
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def genColList(self):
+ '''
+ generate column list
+ '''
+ col_list = list()
+ for i in range(1, 18):
+ col_list.append(f'c{i}')
+ return col_list
+
+ def genIncreaseValue(self, input_value):
+ '''
+ add ', 1' to end of value every loop
+ '''
+ value_list = list(input_value)
+ value_list.insert(-1, ", 1")
+ return ''.join(value_list)
+
+ def insertAlter(self):
+ '''
+ after each alter and insert, when execute 'select * from {tbname};' taosd will coredump
+ '''
+ tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7))
+ input_value = '(now, 1)'
+ tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);')
+ tdSql.execute(f'insert into {tbname} values {input_value};')
+ for col in self.genColList():
+ input_value = self.genIncreaseValue(input_value)
+ tdSql.execute(f'alter table {tbname} add column {col} int;')
+ tdSql.execute(f'insert into {tbname} values {input_value};')
+ tdSql.query(f'select * from {tbname};')
+ tdSql.checkRows(18)
+
+ def run(self):
+ tdSql.prepare()
+ self.insertAlter()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp
index 376567b7e80cbb4544d48b0e28c5d6404b6db468..ec44a85d5b29c0471db64b0362126804ae73adec 100644
--- a/tests/pytest/crash_gen/valgrind_taos.supp
+++ b/tests/pytest/crash_gen/valgrind_taos.supp
@@ -17742,4 +17742,370 @@
fun:taosGetFqdn
fun:taosCheckGlobalCfg
fun:taos_init_imp
-}
\ No newline at end of file
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/bin/python3.8
+ fun:PyObject_GetItem
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:PyCode_NewWithPosOnlyArgs
+ fun:PyCode_New
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/local/lib/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_New
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_New
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun: malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun: malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallFunctionObjArgs
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_GetAttr
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8)
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8)
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8)
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8)
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8)
+ obj:/usr/bin/python3.8)
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_Pack
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
+ fun:PyModule_ExecDef
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ ...
+ obj:/usr/local/lib/python3.8/dist-packages/pandas/*
+ ...
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_New
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_New
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:_PyObject_MakeTpCall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:PyObject_GetAttr
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ obj:/usr/bin/python3.8
+ fun:PyTuple_New
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:_PyFunction_Vectorcall
+}
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index c929d8da22471b9b220c6eaa5066d25deac55e00..137069e6b634195002f20dc0709db72fef5670a0 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -284,7 +284,7 @@ python3 ./test.py -f alter/alterTabAddTagWithNULL.py
python3 ./test.py -f alter/alterTimestampColDataProcess.py
# client
-python3 ./test.py -f client/client.py
+#python3 ./test.py -f client/client.py
python3 ./test.py -f client/version.py
python3 ./test.py -f client/alterDatabase.py
python3 ./test.py -f client/noConnectionErrorTest.py
@@ -386,6 +386,7 @@ python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py
+python3 ./test.py -f alter/alterColMultiTimes.py
#======================p4-end===============
diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py
index d15085f75113892b63c0ac7a94299b15d4f29d2d..1fc46fe7d643d40c2768faf505102b9f579dd2d6 100644
--- a/tests/pytest/query/queryWildcardLength.py
+++ b/tests/pytest/query/queryWildcardLength.py
@@ -157,19 +157,6 @@ class TDTestCase:
tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")')
tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");')
- # TODO sc1 leave a bug ---> TD-5918
- # sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"',
- # f'select * from {table_name} where bi1 like "{lp_name}"',
- # f'select * from {table_name} where bi1 like "{ul_name}"',
- # f'select * from {table_name} where nc1 like "{hp_name}"',
- # f'select * from {table_name} where nc1 like "{lp_name}"',
- # f'select * from {table_name} where nc1 like "{ul_name}"',
- # f'select * from {table_name} where si1 like "{hp_name}"',
- # f'select * from {table_name} where si1 like "{lp_name}"',
- # f'select * from {table_name} where si1 like "{ul_name}"',
- # f'select * from {table_name} where sc1 like "{hp_name}"',
- # f'select * from {table_name} where sc1 like "{lp_name}"',
- # f'select * from {table_name} where sc1 like "{ul_name}"']
sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"',
f'select * from {table_name} where bi1 like "{lp_name}"',
f'select * from {table_name} where bi1 like "{ul_name}"',
@@ -178,7 +165,11 @@ class TDTestCase:
f'select * from {table_name} where nc1 like "{ul_name}"',
f'select * from {table_name} where si1 like "{hp_name}"',
f'select * from {table_name} where si1 like "{lp_name}"',
- f'select * from {table_name} where si1 like "{ul_name}"']
+ f'select * from {table_name} where si1 like "{ul_name}"',
+ f'select * from {table_name} where sc1 like "{hp_name}"',
+ f'select * from {table_name} where sc1 like "{lp_name}"',
+ f'select * from {table_name} where sc1 like "{ul_name}"']
+
for sql in sql_list:
tdSql.query(sql)
if len(table_name) >= 1:
@@ -211,7 +202,6 @@ class TDTestCase:
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
-
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
index f069bb8f7030dbd8d4eec8c9c741d246f261671b..643886f434d7694f55dce193e5cc2566a4347d3e 100644
--- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
+++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
@@ -47,7 +47,6 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
-
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
@@ -90,7 +89,6 @@ class TDTestCase:
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " %
binPath)
-
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
index 393ced14fddcc1b1eb7374ce5fb730aea5975f29..da02f45fa1141a028cfc305bae9babb1856ccb40 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
@@ -103,7 +103,6 @@ class TDTestCase:
os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
subTimes0 = self.subTimes("all_subscribe_res0.txt")
- print("pass")
self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202)
diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py
index 6b5681dfbc3773c51161d4c89a775ea653f81f7b..4a5abd49d86ff8af3881965c05a72746b262a1c8 100644
--- a/tests/pytest/tools/taosdemoPerformance.py
+++ b/tests/pytest/tools/taosdemoPerformance.py
@@ -19,11 +19,16 @@ import json
import sys
class taosdemoPerformace:
- def __init__(self, commitID, dbName, branch, type):
+ def __init__(self, commitID, dbName, branch, type, numOfTables, numOfRows, numOfInt, numOfDouble, numOfBinary):
self.commitID = commitID
self.dbName = dbName
self.branch = branch
self.type = type
+ self.numOfTables = numOfTables
+ self.numOfRows = numOfRows
+ self.numOfInt = numOfInt
+ self.numOfDouble = numOfDouble
+ self.numOfBinary = numOfBinary
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@@ -51,14 +56,14 @@ class taosdemoPerformace:
stb = {
"name": "meters",
"child_table_exists": "no",
- "childtable_count": 10000,
+ "childtable_count": self.numOfTables,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"batch_create_tbl_num": 10,
"insert_mode": "taosc",
- "insert_rows": 100000,
- "interlace_rows": 100,
+ "insert_rows": self.numOfRows,
+ "interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
@@ -68,7 +73,9 @@ class taosdemoPerformace:
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [
- {"type": "INT", "count": 4}
+ {"type": "INT", "count": self.numOfInt},
+ {"type": "DOUBLE", "count": self.numOfDouble},
+ {"type": "BINARY", "len": 128, "count": self.numOfBinary}
],
"tags": [
{"type": "INT", "count": 1},
@@ -76,6 +83,7 @@ class taosdemoPerformace:
]
}
+
stables = []
stables.append(stb)
@@ -163,21 +171,21 @@ class taosdemoPerformace:
cursor.execute("create database if not exists %s" % self.dbName)
cursor.execute("use %s" % self.dbName)
- cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20))")
- print("==================== taosdemo performance ====================")
+ cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)")
print("create tables time: %f" % float(self.createTableTime))
print("insert records time: %f" % float(self.insertRecordsTime))
print("records per second: %f" % float(self.recordsPerSecond))
print("avg delay: %f" % float(self.avgDelay))
print("max delay: %f" % float(self.maxDelay))
print("min delay: %f" % float(self.minDelay))
- cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s')" %
+ cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s', %d, %d, %d, %d, %d)" %
(float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond),
- self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, self.type))
+ self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch,
+ self.type, self.numOfTables, self.numOfRows, self.numOfInt, self.numOfDouble, self.numOfBinary))
cursor.close()
cursor1 = self.conn.cursor()
- cursor1.execute("drop database if exists %s" % self.insertDB)
+ # cursor1.execute("drop database if exists %s" % self.insertDB)
cursor1.close()
if __name__ == '__main__':
@@ -209,8 +217,43 @@ if __name__ == '__main__':
default='glibc',
type=str,
help='build type (default: glibc)')
+ parser.add_argument(
+ '-i',
+ '--num-of-int',
+ action='store',
+ default=4,
+ type=int,
+ help='num of int columns (default: 4)')
+ parser.add_argument(
+ '-D',
+ '--num-of-double',
+ action='store',
+ default=0,
+ type=int,
+ help='num of double columns (default: 4)')
+ parser.add_argument(
+ '-B',
+ '--num-of-binary',
+ action='store',
+ default=0,
+ type=int,
+ help='num of binary columns (default: 4)')
+ parser.add_argument(
+ '-t',
+ '--num-of-tables',
+ action='store',
+ default=10000,
+ type=int,
+ help='num of tables (default: 10000)')
+ parser.add_argument(
+ '-r',
+ '--num-of-rows',
+ action='store',
+ default=100000,
+ type=int,
+ help='num of rows (default: 100000)')
args = parser.parse_args()
- perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type)
+ perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type, args.num_of_tables, args.num_of_rows, args.num_of_int, args.num_of_double, args.num_of_binary)
perftest.insertData()
perftest.createTablesAndStoreData()
diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim
index d109dd50f7c0ba0684295dbf093ba1b280e04fce..3413a0b59652550701b7220e3c8cc1fc90785b91 100644
--- a/tests/script/general/parser/fill.sim
+++ b/tests/script/general/parser/fill.sim
@@ -1050,6 +1050,27 @@ sql_error select min(c3) from m_fl_mt0 interval(10w) fill(value, 20)
sql_error select max(c3) from m_fl_mt0 interval(1n) fill(prev)
sql_error select min(c3) from m_fl_mt0 interval(1y) fill(value, 20)
+sql create table nexttb1 (ts timestamp, f1 int);
+sql insert into nexttb1 values ('2021-08-08 1:1:1', NULL);
+sql insert into nexttb1 values ('2021-08-08 1:1:5', 3);
+
+sql select last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next);
+if $rows != 9 then
+ return -1
+endi
+if $data00 != @21-08-08 01:01:01.000@ then
+ return -1
+endi
+if $data01 != @21-08-08 01:01:01.000@ then
+ return -1
+endi
+if $data02 != 3 then
+ return -1
+endi
+
+
+
+
print =============== clear
#sql drop database $db
#sql show databases
@@ -1057,4 +1078,4 @@ print =============== clear
# return -1
#endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim
index 5edadad3a6686ad8fafee6d24e741bb63622c20e..0c93fe919a8f8d934017135ecf1d3cff4515a3e1 100644
--- a/tests/script/general/parser/function.sim
+++ b/tests/script/general/parser/function.sim
@@ -1148,3 +1148,21 @@ endi
sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s));
+
+sql create table smeters (ts timestamp, current float, voltage int);
+sql insert into smeters values ('2021-08-08 10:10:10', 10, 1);
+sql insert into smeters values ('2021-08-08 10:10:12', 10, 2);
+
+sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a);
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @21-08-08 10:10:10.000@ then
+ return -1
+endi
+if $data10 != @21-08-08 10:10:12.000@ then
+ return -1
+endi
+
+
+
diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim
index 3fb91e36c66985d90776b33b89607fa9a272d500..55c5701985e8968a88891e4b11415136f48ed346 100644
--- a/tests/script/general/parser/interp.sim
+++ b/tests/script/general/parser/interp.sim
@@ -55,6 +55,9 @@ while $i < $halfNum
endw
print ====== tables created
+sql create table ap1 (ts timestamp, pav float);
+sql INSERT INTO ap1 VALUES ('2021-07-25 02:19:54.100',1) ('2021-07-25 02:19:54.200',2) ('2021-07-25 02:19:54.300',3) ('2021-07-25 02:19:56.500',4) ('2021-07-25 02:19:57.500',5) ('2021-07-25 02:19:57.600',6) ('2021-07-25 02:19:57.900',7) ('2021-07-25 02:19:58.100',8) ('2021-07-25 02:19:58.300',9) ('2021-07-25 02:19:59.100',10) ('2021-07-25 02:19:59.300',11) ('2021-07-25 02:19:59.500',12) ('2021-07-25 02:19:59.700',13) ('2021-07-25 02:19:59.900',14) ('2021-07-25 02:20:05.000', 20) ('2021-07-25 02:25:00.000', 10000);
+
run general/parser/interp_test.sim
print ================== restart server to commit data into disk
@@ -65,4 +68,4 @@ print ================== server restart completed
run general/parser/interp_test.sim
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim
index 81a77995fb828db00b65d085e0839e3b652385e0..845afb0173685bf609897646eb188d689be6df10 100644
--- a/tests/script/general/parser/interp_test.sim
+++ b/tests/script/general/parser/interp_test.sim
@@ -927,4 +927,1323 @@ endi
if $data44 != @18-11-25 19:06:00.000@ then
return -1
-endi
\ No newline at end of file
+endi
+
+
+
+
+
+
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(linear);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(value, 1);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 1.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 1.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 1.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 1.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 1.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(NULL);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != NULL then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != NULL then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != NULL then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != NULL then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(prev);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(next);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(linear);
+if $rows != 0 then
+ return -1
+endi
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(prev);
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(next);
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(linear);
+if $rows != 3 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(prev);
+if $rows != 3 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(next);
+if $rows != 3 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(linear);
+if $rows != 10 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != NULL then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != NULL then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != NULL then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != NULL then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(prev);
+if $rows != 10 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 14.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 14.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 14.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(next);
+if $rows != 10 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != NULL then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != NULL then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != NULL then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != NULL then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(linear);
+if $rows != 12 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.11765 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 15.29412 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(prev);
+if $rows != 12 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 14.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 14.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 14.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(next);
+if $rows != 12 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 20.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 20.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 20.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 20.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:20:02' and ts<='2021-07-25 02:20:05' interval(1s) fill(value, 1);
+if $rows != 4 then
+ return -1
+endi
+if $data00 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data11 != 1.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:20:04.000@ then
+ return -1
+endi
+if $data21 != 1.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:20:05.000@ then
+ return -1
+endi
+if $data31 != 20.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:20:02' and ts<='2021-07-25 02:20:05' interval(1s) fill(null);
+if $rows != 4 then
+ return -1
+endi
+if $data00 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @21-07-25 02:20:04.000@ then
+ return -1
+endi
+if $data21 != NULL then
+ return -1
+endi
+if $data30 != @21-07-25 02:20:05.000@ then
+ return -1
+endi
+if $data31 != 20.00000 then
+ return -1
+endi
+
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(linear);
+if $rows != 32 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.11765 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 15.29412 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(prev);
+if $rows != 32 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 14.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 14.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 14.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(next);
+if $rows != 32 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 20.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 20.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 20.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 20.00000 then
+ return -1
+endi
+
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(linear);
+if $rows != 307 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.11765 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 15.29412 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(prev);
+if $rows != 307 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 14.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 14.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 14.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(next);
+if $rows != 307 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 20.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 20.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 20.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 20.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(linear);
+if $rows != 3907 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.31818 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.77273 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.50000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.50000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.87500 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.11765 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 15.29412 then
+ return -1
+endi
+
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(prev);
+if $rows != 3907 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 3.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 3.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 4.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 7.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 9.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 14.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 14.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 14.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 14.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(next);
+if $rows != 3907 then
+ return -1
+endi
+if $data00 != @21-07-25 02:19:54.000@ then
+ return -1
+endi
+if $data01 != 1.00000 then
+ return -1
+endi
+if $data10 != @21-07-25 02:19:55.000@ then
+ return -1
+endi
+if $data11 != 4.00000 then
+ return -1
+endi
+if $data20 != @21-07-25 02:19:56.000@ then
+ return -1
+endi
+if $data21 != 4.00000 then
+ return -1
+endi
+if $data30 != @21-07-25 02:19:57.000@ then
+ return -1
+endi
+if $data31 != 5.00000 then
+ return -1
+endi
+if $data40 != @21-07-25 02:19:58.000@ then
+ return -1
+endi
+if $data41 != 8.00000 then
+ return -1
+endi
+if $data50 != @21-07-25 02:19:59.000@ then
+ return -1
+endi
+if $data51 != 10.00000 then
+ return -1
+endi
+if $data60 != @21-07-25 02:20:00.000@ then
+ return -1
+endi
+if $data61 != 20.00000 then
+ return -1
+endi
+if $data70 != @21-07-25 02:20:01.000@ then
+ return -1
+endi
+if $data71 != 20.00000 then
+ return -1
+endi
+if $data80 != @21-07-25 02:20:02.000@ then
+ return -1
+endi
+if $data81 != 20.00000 then
+ return -1
+endi
+if $data90 != @21-07-25 02:20:03.000@ then
+ return -1
+endi
+if $data91 != 20.00000 then
+ return -1
+endi
+
+sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:07' interval(1s);
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-07-25 02:20:05.000@ then
+ return -1
+endi
+if $data01 != 20.00000 then
+ return -1
+endi
+