diff --git a/.gitmodules b/.gitmodules index 7c84eac8a4ee7529005855bc836387561c49ae2d..049b39abfb2cf5f31abe10f194e7a09c4dc932f0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin url = https://github.com/taosdata/grafanaplugin +[submodule "src/connector/hivemq-tdengine-extension"] + path = src/connector/hivemq-tdengine-extension + url = https://github.com/huskar-t/hivemq-tdengine-extension.git diff --git a/.travis.yml b/.travis.yml index 5ce9873d1534f8aec1b8e44640cce145c1254330..eb69370418a9c83c7b8bfe5daa1d6ead19150243 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,6 +32,8 @@ matrix: - python3-setuptools - valgrind - psmisc + - unixodbc + - unixodbc-dev before_script: - export TZ=Asia/Harbin diff --git a/Jenkinsfile b/Jenkinsfile index 4410d81be6974fc246f50044cbc142076212677b..8bf7e435fd5e96aa49e7f2e07af0c4b2decc365e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,11 +4,12 @@ pipeline { WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } + stages { stage('Parallel test stage') { parallel { stage('pytest') { - agent{label 'master'} + agent{label '184'} steps { sh ''' date @@ -33,13 +34,13 @@ pipeline { } } stage('test_b1') { - agent{label '184'} + agent{label 'master'} steps { sh ''' - date cd ${WKC} git checkout develop git pull + git submodule update cd ${WK} git checkout develop @@ -62,10 +63,10 @@ pipeline { agent{label "185"} steps { sh ''' - cd ${WKC} git checkout develop git pull + git submodule update cd ${WK} git checkout develop @@ -78,7 +79,21 @@ pipeline { cmake .. > /dev/null make > /dev/null cd ${WKC}/tests/pytest - ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4 + ./handle_crash_gen_val_log.sh + ''' + } + sh ''' date cd ${WKC}/tests ./test-all.sh b2 @@ -89,12 +104,13 @@ pipeline { stage('test_valgrind') { agent{label "186"} + steps { sh ''' - date cd ${WKC} git checkout develop git pull + git submodule update cd ${WK} git checkout develop @@ -116,10 +132,122 @@ pipeline { date''' } } + stage('connector'){ + agent{label "release"} + steps{ + sh''' + cd ${WORKSPACE} + git checkout develop + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/gotest + bash batchtest.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker + python3 PythonChecker.py + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ + mvn clean package assembly:single >/dev/null + java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# + dotnet run + ''' + } + + } + } } } } - + post { + success { + emailext ( + subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + } } \ No newline at end of file diff --git a/alert/README.md b/alert/README.md index 23179669349ebf94f3774c6101c06c2e375ef059..547f3a0381a74714b1f6c8c74b861678b3805619 100644 --- a/alert/README.md +++ b/alert/README.md @@ -61,7 +61,7 @@ The use of each configuration item is: * **port**: This is the `http` service port which enables other application to manage rules by `restful API`. * **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically). -* **tdengine**: connection string of `TDEngine` server, note in most cases the database information should be put in a rule, thus it should NOT be included here. +* **tdengine**: connection string of `TDEngine` server, note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string. * **log > level**: log level, could be `production` or `debug`. * **log > path**: log output file path. * **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL. diff --git a/alert/README_cn.md b/alert/README_cn.md index ec6e4566c8eb9fa2142c37b2b3e1b1a04f783a9a..938b23a58406f5d6f279191a47dc957c446911ce 100644 --- a/alert/README_cn.md +++ b/alert/README_cn.md @@ -58,7 +58,7 @@ $ go build * **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。 * **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。 -* **tdengine**:`TDEngine` 的连接信息,一般来说,数据库信息应该在报警规则中指定,所以这里 **不** 应包含这一部分信息。 +* **tdengine**:`TDEngine` 的连接字符串,一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。 * **log > level**:日志的记录级别,可选 `production` 或 `debug`。 * **log > path**:日志文件的路径。 * **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。 diff --git a/cmake/install.inc b/cmake/install.inc index c7fbd6df794f4a414b7b28e31b0c73ba3a4da906..dfca758b9362c96bec0ce45aa385d54a4e75a9e5 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -31,7 +31,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.8-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index ce2b960eeabc2de65d9abb14dd41d25f95c5d26b..a248f76f48ede5f1f483943c08fce6604756b6d4 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.5.1") + SET(TD_VER_NUMBER "2.0.6.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation/tdenginedocs-cn/advanced-features/index.html b/documentation/tdenginedocs-cn/advanced-features/index.html index 9a1b908cd5bae690f747c27e46416e12e110480e..b4953b4dd482341ce937f56b806b682ff7145409 100644 --- a/documentation/tdenginedocs-cn/advanced-features/index.html +++ b/documentation/tdenginedocs-cn/advanced-features/index.html @@ -51,8 +51,8 @@ INTERVAL(1M)
  • mseconds:查询数据库更新的时间间隔,单位为毫秒。一般设置为1000毫秒。返回值为指向TDengine_SUB 结构的指针,如果返回为空,表示失败。

  • TAOS_ROW taos_consume(TAOS_SUB *tsub)

    该函数用来获取订阅的结果,用户应用程序将其置于一个无限循环语句。如果数据库有新记录到达,该API将返回该最新的记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错。参数说明:

  • void taos_unsubscribe(TAOS_SUB *tsub)

    取消订阅。应用程序退出时,务必调用该函数以避免资源泄露。

  • -
  • int taos_num_subfields(TAOS_SUB *tsub)

    获取返回的一行记录中数据包含多少列。

  • -
  • TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)

    获取每列数据的属性(数据类型、名字、长度),与taos_num_subfileds配合使用,可解析返回的每行数据。

  • +
  • int taos_num_fields(TAOS_SUB *tsub)

    获取返回的一行记录中数据包含多少列。

  • +
  • TAOS_FIELD *taos_fetch_fields(TAOS_SUB *tsub)

    获取每列数据的属性(数据类型、名字、长度),与taos_num_subfileds配合使用,可解析返回的每行数据。

  • 示例代码:请看安装包中的的示范程序

    缓存 (Cache)

    TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。

    diff --git a/documentation/tdenginedocs-cn/connector/index.html b/documentation/tdenginedocs-cn/connector/index.html index 3167c1521f099f8acd7ae237cc37bd5867ee209a..34ea19813fbd7a9f70074ff109308b37ba7b647a 100644 --- a/documentation/tdenginedocs-cn/connector/index.html +++ b/documentation/tdenginedocs-cn/connector/index.html @@ -64,9 +64,9 @@

    该API用来获取最新消息,应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录,该API将返回,返回参数是一行记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错,需要检查系统是否还在正常运行。

  • void taos_unsubscribe(TAOS_SUB *tsub)

    该API用于取消订阅,参数tsub是taos_subscribe的返回值。应用程序退出时,需要调用该API,否则有资源泄露。

  • -
  • int taos_num_subfields(TAOS_SUB *tsub)

    +
  • int taos_num_fields(TAOS_SUB *tsub)

    该API用来获取返回的一排数据中数据的列数

  • -
  • TAOS_FIELD *taos_fetch_subfields(TAOS_RES *res)

    +
  • TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)

    该API用来获取每列数据的属性(数据类型、名字、字节数),与taos_num_subfileds配合使用,可用来解析返回的一排数据。

  • Java Connector

    @@ -259,4 +259,4 @@ conn.close() _ "taosSql" )

    taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与TDengine进行交互,因此,在数据库操作执行完成之前,客户端应用将处于阻塞状态。单个数据库连接,在同一时刻只能有一个线程调用API。客户应用可以建立多个连接,进行多线程的数据写入或查询处理。

    -

    更多使用的细节,请参考下载目录中的示例源码。

    回去 \ No newline at end of file +

    更多使用的细节,请参考下载目录中的示例源码。

    回去 diff --git a/documentation/tdenginedocs-en/connector/index.html b/documentation/tdenginedocs-en/connector/index.html index 0f9e6b4717c1c32716046a38bef18268cc2e0408..ea1f75ae00300a84560b93156853152ac8f77398 100644 --- a/documentation/tdenginedocs-en/connector/index.html +++ b/documentation/tdenginedocs-en/connector/index.html @@ -72,9 +72,9 @@ The API is used to start a subscription session by given a handle. The parameter The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter tsub is the handle returned by taos_subscribe. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If NULL pointer is returned, it means an error occurs.

  • void taos_unsubscribe(TAOS_SUB *tsub) Stop a subscription session by the handle returned by taos_subscribe.

  • -
  • int taos_num_subfields(TAOS_SUB *tsub) +

  • int taos_num_fields(TAOS_SUB *tsub) The API used to get the number of fields in a row.

  • -
  • TAOS_FIELD *taos_fetch_subfields(TAOS_RES *res) +

  • TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) The API used to get the description of each column.

  • Java Connector

    @@ -351,4 +351,4 @@ promise2.then(function(result) { })

    Example

    An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found here (The preferred method for using the connector)

    -

    An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found here

    Back \ No newline at end of file +

    An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found here

    Back diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md index ba45bc4796e6f0fb544f7383f74528ee486d94e5..d4705ccb05c092d8da38072368a167466bd78968 100644 --- a/documentation20/webdocs/markdowndocs/architecture-ch.md +++ b/documentation20/webdocs/markdowndocs/architecture-ch.md @@ -228,7 +228,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久 为充分利用时序数据特点,TDengine将一个vnode保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数days决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。 -对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统将自动删除,释放存储空间。 +对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。 给定days与keep两个参数,一个vnode总的数据文件数为:keep/days。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。 diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 0e7a26eb3a8eff7d0c09cbd430bae3c4688293e1..10c28c284c870d1a522a178beb5303901f079a5a 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -213,6 +213,6 @@ SHOW MNODES; ## Arbitrator的使用 -如果副本数为偶数,当一个vnode group里一半或超过一半的vnode不工作时,是无法从中选出master的。同理,一半或超过一半的mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。 +如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。 -TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。 +TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。 diff --git a/documentation20/webdocs/markdowndocs/taosd-ch.md b/documentation20/webdocs/markdowndocs/taosd-ch.md index e90bc2233ffb3efc62a39ca5239bd590136b4125..08be0c163e7076b58f03ff8ea3165e902a80fe64 100644 --- a/documentation20/webdocs/markdowndocs/taosd-ch.md +++ b/documentation20/webdocs/markdowndocs/taosd-ch.md @@ -61,7 +61,7 @@ vnode与其子模块是通过API直接调用,而不是通过消息队列传递 mnode是整个系统的大脑,负责整个系统的资源调度,负责meta data的管理与存储。 -一个运行的系统里,只有一个mnode,但它有多个副本(由系统配置参数numOfMpeers控制)。这些副本分布在不同的dnode里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个Master,其他副本是slave。所有数据更新类的操作,都只能在master上进行,而查询类的可以在slave节点上进行。代码实现上,同步模块与vnode共享,但mnode被分配一个特殊的vgroup ID: 1,而且quorum大于1。整个集群系统是由多个dnode组成的,运行的mnode的副本数不可能超过dnode的个数,但不会超过配置的副本数。如果某个mnode副本宕机一段时间,只要超过半数的mnode副本仍在运行,运行的mnode会自动根据整个系统的资源情况,在其他dnode里再启动一个mnode, 以保证运行的副本数。 +一个运行的系统里,只有一个mnode,但它有多个副本(由系统配置参数numOfMnodes控制)。这些副本分布在不同的dnode里,目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式,以确保数据的一致性,确保数据不会丢失。这些副本会自动选举一个Master,其他副本是slave。所有数据更新类的操作,都只能在master上进行,而查询类的可以在slave节点上进行。代码实现上,同步模块与vnode共享,但mnode被分配一个特殊的vgroup ID: 1,而且quorum大于1。整个集群系统是由多个dnode组成的,运行的mnode的副本数不可能超过dnode的个数,但不会超过配置的副本数。如果某个mnode副本宕机一段时间,只要超过半数的mnode副本仍在运行,运行的mnode会自动根据整个系统的资源情况,在其他dnode里再启动一个mnode, 以保证运行的副本数。 各个dnode通过信息交换,保存有mnode各个副本的End Point列表,并向其中的master节点定时(间隔由系统配置参数statusInterval控制)发送status消息,消息体里包含该dnode的CPU、内存、剩余存储空间、vnode个数,以及各个vnode的状态(存储空间、原始数据大小、记录条数、角色等)。这样mnode就了解整个系统的资源情况,如果用户创建新的表,就可以决定需要在哪个dnode创建;如果增加或删除dnode, 或者监测到某dnode数据过热、或离线太长,就可以决定需要挪动那些vnode,以实现负载均衡。 diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 450c6a8f551fe7dd8dff5e53f5dad639aec4656e..a7bb22f345a3fd3c35ecd8e47cad04b85b33b613 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -58,7 +58,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector -cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector +cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ chmod 755 ${pkg_dir}/DEBIAN/* diff --git a/packaging/release.sh b/packaging/release.sh index 7542a5b4cafb69d5cee16bddfc9a5651eb717b92..68f947ccab3ef18a1b351b91a58db64a8f465c8e 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -156,9 +156,15 @@ build_time=$(date +"%F %R") # get commint id from git gitinfo=$(git rev-parse --verify HEAD) -enterprise_dir="${top_dir}/../enterprise" -cd ${enterprise_dir} -gitinfoOfInternal=$(git rev-parse --verify HEAD) + +if [[ "$verMode" == "cluster" ]]; then + enterprise_dir="${top_dir}/../enterprise" + cd ${enterprise_dir} + gitinfoOfInternal=$(git rev-parse --verify HEAD) +else + gitinfoOfInternal=NULL +fi + cd ${curr_dir} # 2. cmake executable file @@ -193,23 +199,35 @@ cd ${curr_dir} # 3. Call the corresponding script for packaging if [ "$osType" != "Darwin" ]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then - echo "====do deb package for the ubuntu system====" - output_dir="${top_dir}/debs" - if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} + ret='0' + command -v dpkg >/dev/null 2>&1 || { ret='1'; } + if [ "$ret" -eq 0 ]; then + echo "====do deb package for the ubuntu system====" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/deb + ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} + else + echo "==========dpkg command not exist, so not release deb package!!!" fi - ${csudo} mkdir -p ${output_dir} - cd ${script_dir}/deb - ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - - echo "====do rpm package for the centos system====" - output_dir="${top_dir}/rpms" - if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} + + ret='0' + command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } + if [ "$ret" -eq 0 ]; then + echo "====do rpm package for the centos system====" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/rpm + ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} + else + echo "==========rpmbuild command not exist, so not release rpm package!!!" fi - ${csudo} mkdir -p ${output_dir} - cd ${script_dir}/rpm - ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} fi echo "====do tar.gz package for all systems====" diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 4e40263dc4ebaf6b566d20890ecd97f64e160340..e49baeffc7394e5f82bccd36877842c879b642b6 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -65,7 +65,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector -cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector +cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples #Scripts executed before installation diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index eff70d8035af0291f6dc7040ec13632fec4fa3be..831012851ad70d05080bfae161c0b925d5215ae9 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -278,11 +278,11 @@ function install_service_on_sysvinit() { # Install taosd service if ((${os_type}==1)); then - ${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d - ${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + ${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd elif ((${os_type}==2)); then - ${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d - ${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd + ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d + ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd fi #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index e17c678f263cb6b7a0ccbc32250265b9bc5cbd0e..69fa53c087fa06801119ac0ef472d1cb7baff483 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index b4416a68bb30751d5e9b02f5e83186d750d5a935..7e8bef0dffffa3028c82ed00ab5695970222770d 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -123,7 +123,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/R ${install_dir}/examples sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go + sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go fi # Copy driver mkdir -p ${install_dir}/driver @@ -135,7 +135,7 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 75b45b544e0a4abbf709cc4c5b3a3b55dc315f0f..a6d868ed1d033c826b910a423b1a4609fc5d76a3 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 3d625900c9d912ff835092c7c5675d618b42b06d..0ffcb63e3f93c300ce41d627ccf43b7f089d6c2e 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -146,7 +146,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/R ${install_dir}/examples sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go + sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go fi # Copy driver mkdir -p ${install_dir}/driver @@ -156,7 +156,7 @@ cp ${lib_files} ${install_dir}/driver connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 0feb64c795159b67920c8a39b53b0125dfb565bf..d91daaa5c44488e34dea7ec2ddec0863699446f2 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -10,6 +10,7 @@ data_dir="/var/lib/taos" log_dir="/var/log/taos" data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" +install_main_dir="/usr/local/taos" # static directory cfg_dir="/usr/local/taos/cfg" @@ -134,6 +135,29 @@ function install_config() { else break fi + done + + # user email + #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' + #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' + #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" + echo + echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: " + read emailAddr + while true; do + if [ ! -z "$emailAddr" ]; then + # check the format of the emailAddr + #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then + # Write the email address to temp file + email_file="${install_main_dir}/email" + ${csudo} bash -c "echo $emailAddr > ${email_file}" + break + #else + # read -p "Please enter the correct email address: " emailAddr + #fi + else + break + fi done } diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index b0d8d8983bab473b8711d19805cea0fef2f10ee7..233b7a15b4a9383ee65dac8c9c0dc107fb66dd0a 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.5.1' +version: '2.0.6.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.5.1 + - usr/lib/libtaos.so.2.0.6.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 898b7cb032846cb85e4cc8767ed6090b35f41e1a..f619edd221c005a8d8e707afa5271072b032f74a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -20,4 +20,6 @@ ADD_SUBDIRECTORY(tsdb) ADD_SUBDIRECTORY(wal) ADD_SUBDIRECTORY(cq) ADD_SUBDIRECTORY(dnode) +ADD_SUBDIRECTORY(connector/odbc) ADD_SUBDIRECTORY(connector/jdbc) + diff --git a/src/balance/src/balance.c b/src/balance/src/balance.c index 0fa4b3f34615aad3fb749d44cb27b3feab10f49c..0e9bb85b25defd169fea8711d3e0b40304500de4 100644 --- a/src/balance/src/balance.c +++ b/src/balance/src/balance.c @@ -216,8 +216,8 @@ static bool balanceCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) { SVnodeGid *pVnode = pVgroup->vnodeGid + i; if (pVnode == pRmVnode) continue; - mTrace("vgId:%d, change vgroup status, dnode:%d status:%d", pVgroup->vgId, pVnode->pDnode->dnodeId, - pVnode->pDnode->status); + mTrace("vgId:%d, check vgroup status, dnode:%d status:%d, vnode role:%s", pVgroup->vgId, pVnode->pDnode->dnodeId, + pVnode->pDnode->status, syncRole[pVnode->role]); if (pVnode->pDnode->status == TAOS_DN_STATUS_DROPPING) continue; if (pVnode->pDnode->status == TAOS_DN_STATUS_OFFLINE) continue; diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 7876069ccee841485fae9069d1116c13f385ce7b..bc01de110345e4c90cf5c15d3d7f6b010cb7308d 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -39,7 +39,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql); int32_t tscHandleInsertRetry(SSqlObj* pSql); void tscBuildResFromSubqueries(SSqlObj *pSql); -void **doSetResultRowData(SSqlObj *pSql, bool finalResult); +TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult); #ifdef __cplusplus } diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 76a9bbac10655e6487614c6b9de230cabf19e0c5..d86e1aa0fb38f0c7d0da8035352ce51fc318b6b3 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -82,6 +82,7 @@ typedef struct SJoinSupporter { char* pIdTagList; // result of first stage tags int32_t totalLen; int32_t num; + SArray* pVgroupTables; } SJoinSupporter; typedef struct SVgroupTableInfo { @@ -149,14 +150,13 @@ int tscAllocPayload(SSqlCmd* pCmd, int size); TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes); -SFieldSupInfo* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField); -SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field); +SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField); +SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field); -SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index); +SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index); TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index); void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); -void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src); void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); @@ -216,7 +216,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex); void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache); STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta, - SVgroupsInfo* vgroupList, SArray* pTagCols); + SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables); STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo); int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); @@ -225,16 +225,19 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo); void tscClearSubqueryInfo(SSqlCmd* pCmd); void tscFreeVgroupTableInfo(SArray* pVgroupTables); +SArray* tscCloneVgroupTableInfo(SArray* pVgroupTables); +void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index); int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex); int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo); int tscGetMeterMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); - -void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex); void tscDoQuery(SSqlObj* pSql); +SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); +void* tscVgroupInfoClear(SVgroupsInfo *pInfo); +void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src); /** * The create object function must be successful expect for the out of memory issue. * diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 8621f9d28bc90b22bb54bcdb8585c3db7a1bf429..78b0bcce9c5df3cbb0ea9b0528a529dfcdfe59b2 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -30,6 +30,7 @@ extern "C" { #include "tsqlfunction.h" #include "tutil.h" #include "tcache.h" +#include "tref.h" #include "qExecutor.h" #include "qSqlparser.h" @@ -90,10 +91,10 @@ typedef struct STableComInfo { } STableComInfo; typedef struct SCMCorVgroupInfo { - int32_t version; - int8_t inUse; - int8_t numOfEps; - SEpAddr epAddr[TSDB_MAX_REPLICA]; + int32_t version; + int8_t inUse; + int8_t numOfEps; + SEpAddr1 epAddr[TSDB_MAX_REPLICA]; } SCMCorVgroupInfo; typedef struct STableMeta { @@ -142,16 +143,17 @@ typedef struct SColumnIndex { int16_t columnIndex; } SColumnIndex; -typedef struct SFieldSupInfo { +typedef struct SInternalField { + TAOS_FIELD field; bool visible; SExprInfo *pArithExprInfo; SSqlExpr *pSqlExpr; -} SFieldSupInfo; +} SInternalField; typedef struct SFieldInfo { - int16_t numOfOutput; // number of column in result - SArray *pFields; // SArray - SArray *pSupportInfo; // SArray + int16_t numOfOutput; // number of column in result + TAOS_FIELD* final; + SArray *internalField; // SArray } SFieldInfo; typedef struct SColumn { @@ -308,7 +310,7 @@ typedef struct { int32_t numOfGroups; SResRec * pGroupRec; char * data; - void ** tsrow; + TAOS_ROW tsrow; int32_t* length; // length for each field for current row char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex * pColumnIndex; @@ -443,14 +445,14 @@ void tscPartiallyFreeSqlObj(SSqlObj *pSql); */ void tscFreeSqlObj(SSqlObj *pSql); void tscFreeRegisteredSqlObj(void *pSql); +void tscFreeTableMetaHelper(void *pTableMeta); -void tscCloseTscObj(STscObj *pObj); +void tscCloseTscObj(void *pObj); // todo move to taos? or create a new file: taos_internal.h TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos); + void *param, TAOS **taos); TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res); - void waitForQueryRsp(void *param, TAOS_RES *tres, int code); void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen); @@ -468,7 +470,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { - SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex); + SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex); assert(pInfo->pSqlExpr != NULL); int32_t type = pInfo->pSqlExpr->resType; @@ -481,11 +483,11 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { pData = pInfo->pSqlExpr->param[1].pz; pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; - pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : pData; + pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData; } else { assert(bytes == tDataTypeDesc[type].nSize); - pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : &pInfo->pSqlExpr->param[1].i64Key; + pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64Key; pRes->length[columnIndex] = bytes; } } else { @@ -493,7 +495,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField int32_t realLen = varDataLen(pData); assert(realLen <= bytes - VARSTR_HEADER_SIZE); - pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : ((tstr *)pData)->data; + pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : (unsigned char*)((tstr *)pData)->data; if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor *(pData + realLen + VARSTR_HEADER_SIZE) = 0; } @@ -502,7 +504,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField } else { assert(bytes == tDataTypeDesc[type].nSize); - pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : pData; + pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData; pRes->length[columnIndex] = bytes; } } @@ -515,6 +517,7 @@ extern void * tscQhandle; extern int tscKeepConn[]; extern int tsInsertHeadSize; extern int tscNumOfThreads; +extern int tscRefId; extern SRpcCorEpSet tscMgmtEpSet; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 72e8a4121ab72895e4c473378398a5059bdf8b5c..608a10e26142cb3a7aa2a40062f37fd27d0bd9f8 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -327,7 +327,7 @@ void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) { } for (int i = 0; i < pCmd->numOfCols; ++i){ - SFieldSupInfo* pSup = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i); + SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); if (pSup->pSqlExpr != NULL) { // pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i) + pSup->pSqlExpr->resBytes * pRes->row; } else { @@ -348,7 +348,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); for (int i = 0; i < pCmd->numOfCols; ++i) { - SFieldSupInfo* pSup = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i); + SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); if (pSup->pSqlExpr != NULL) { tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); @@ -405,11 +405,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlRes *pRes = &pSql->res; pRes->code = code; + const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; if (code != TSDB_CODE_SUCCESS) { - tscError("%p get tableMeta failed, code:%s", pSql, tstrerror(code)); + tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); goto _error; } else { - const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; tscDebug("%p get %s successfully", pSql, msg); } @@ -427,8 +427,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } else { assert(code == TSDB_CODE_SUCCESS); } - - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL); + // param already freed by other routine and pSql in tscCache when ctrl + c + if (atomic_load_ptr(&pSql->param) == NULL) { + return; + } + assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSql; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 1b4f92d3fc9fc78951e23eaec4c438d7522bf7f1..12d3b7dfd38e09b30aed6b8e66e56e7eead61034 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -2461,12 +2461,22 @@ static void percentile_function(SQLFunctionCtx *pCtx) { // the first stage, only acquire the min/max value if (pInfo->stage == 0) { if (pCtx->preAggVals.isSet) { - if (GET_DOUBLE_VAL(&pInfo->minval) > pCtx->preAggVals.statis.min) { - SET_DOUBLE_VAL(&pInfo->minval, (double)pCtx->preAggVals.statis.min); + double tmin = 0.0, tmax = 0.0; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + tmin = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.min); + tmax = (double)GET_INT64_VAL(&pCtx->preAggVals.statis.max); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + tmin = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.min); + tmax = GET_DOUBLE_VAL(&pCtx->preAggVals.statis.max); + } else { + assert(true); + } + if (GET_DOUBLE_VAL(&pInfo->minval) > tmin) { + SET_DOUBLE_VAL(&pInfo->minval, tmin); } - if (GET_DOUBLE_VAL(&pInfo->maxval) < pCtx->preAggVals.statis.max) { - SET_DOUBLE_VAL(&pInfo->maxval, (double)pCtx->preAggVals.statis.max); + if (GET_DOUBLE_VAL(&pInfo->maxval) < tmax) { + SET_DOUBLE_VAL(&pInfo->maxval, tmax); } pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull); @@ -4025,11 +4035,11 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) { // primary ts must be existed, so no need to check its existance if (pCtx->order == TSDB_ORDER_ASC) { - tsBufAppend(pTSbuf, 0, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, input, pCtx->size * TSDB_KEYSIZE); } else { for (int32_t i = pCtx->size - 1; i >= 0; --i) { char *d = GET_INPUT_CHAR_INDEX(pCtx, i); - tsBufAppend(pTSbuf, 0, &pCtx->tag, d, TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, d, (int32_t)TSDB_KEYSIZE); } } @@ -4048,7 +4058,7 @@ static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) { STSBuf *pTSbuf = pInfo->pTSBuf; - tsBufAppend(pTSbuf, 0, &pCtx->tag, pData, TSDB_KEYSIZE); + tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64Key, &pCtx->tag, pData, TSDB_KEYSIZE); SET_VAL(pCtx, pCtx->size, 1); pResInfo->hasResult = DATA_SET_FLAG; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 642b9005812c6756d4f0ef09caee1386925360fc..c13003009c530b2656f8d7f6ac90205a21bf6092 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -240,7 +240,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE}; tstrncpy(f.name, "Field", sizeof(f.name)); - SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false); @@ -297,7 +297,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { return tscSetValueToResObj(pSql, rowLen); } static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengths, int idx, char *result) { - const char *val = row[idx]; + const char *val = (const char*)row[idx]; if (val == NULL) { sprintf(result, "%s", TSDB_DATA_NULL_STR); return -1; @@ -486,7 +486,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const tstrncpy(f.name, "Database", sizeof(f.name)); } - SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, f.bytes - VARSTR_HEADER_SIZE, false); @@ -923,19 +923,17 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa pQueryInfo->order.order = TSDB_ORDER_ASC; tscFieldInfoClear(&pQueryInfo->fieldsInfo); - pQueryInfo->fieldsInfo.pFields = taosArrayInit(1, sizeof(TAOS_FIELD)); - pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(1, sizeof(SFieldSupInfo)); + pQueryInfo->fieldsInfo.internalField = taosArrayInit(1, sizeof(SInternalField)); TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength); - TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0); - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, 0); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0); pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0); - memcpy(pRes->data, val, pField->bytes); + memcpy(pRes->data, val, pInfo->field.bytes); } int tscProcessLocalCmd(SSqlObj *pSql) { diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index d800d62c8ad1da8c2a485411021770d6b3a368eb..18d72e2d1e23b63abde7ff7159eae2dad993b548 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -510,7 +510,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { taosTFree(pLocalReducer->pResultBuf); if (pLocalReducer->pResInfo != NULL) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + size_t num = tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < num; ++i) { taosTFree(pLocalReducer->pResInfo[i].interResultBuf); } @@ -697,7 +698,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr pg *= 2; } - size_t numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups; + size_t numOfSubs = pSql->subState.numOfSub; + assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups); for (int32_t i = 0; i < numOfSubs; ++i) { (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel); (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index b3bb4566be10cdeab19bcd1436f59a9bae2f6e9f..1739e4348ca2d1e90e4cdc292a14c7dcc5dde2da 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -139,7 +139,7 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { return TSDB_CODE_TSC_INVALID_VALUE; } } - + return TSDB_CODE_SUCCESS; } @@ -213,7 +213,7 @@ static char* normalStmtBuildSql(STscStmt* stmt) { case TSDB_DATA_TYPE_NULL: taosStringBuilderAppendNull(&sb); break; - + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_SMALLINT: @@ -266,6 +266,388 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { return TSDB_CODE_SUCCESS; } + if (1) { + // allow user bind param data with different type + short size = 0; + union { + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + unsigned char buf[32*1024]; + } u; + switch (param->type) { + case TSDB_DATA_TYPE_BOOL: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: { + u.v1 = *(int8_t*)bind->buffer; + if (u.v1==0 || u.v1==1) break; + } break; + case TSDB_DATA_TYPE_TINYINT: { + u.v1 = *(int8_t*)bind->buffer; + if (u.v1==0 || u.v1==1) break; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + u.v1 = (int8_t)*(int16_t*)bind->buffer; + if (u.v1==0 || u.v1==1) break; + } break; + case TSDB_DATA_TYPE_INT: { + u.v1 = (int8_t)*(int32_t*)bind->buffer; + if (u.v1==0 || u.v1==1) break; + } break; + case TSDB_DATA_TYPE_BIGINT: { + u.v1 = (int8_t)*(int64_t*)bind->buffer; + if (u.v1==0 || u.v1==1) break; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + // "0", "1" convertible + if (strncmp((const char*)bind->buffer, "0", *bind->length)==0) { + u.v1 = 0; + break; + } + if (strncmp((const char*)bind->buffer, "1", *bind->length)==0) { + u.v1 = 1; + break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.v1, sizeof(u.v1)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_TINYINT: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + int8_t v = *(int8_t*)bind->buffer; + u.v1 = v; + if (v >= SCHAR_MIN && v <= SCHAR_MAX) break; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t v = *(int16_t*)bind->buffer; + u.v1 = (int8_t)v; + if (v >= SCHAR_MIN && v <= SCHAR_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_INT: { + int32_t v = *(int32_t*)bind->buffer; + u.v1 = (int8_t)v; + if (v >= SCHAR_MIN && v <= SCHAR_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t*)bind->buffer; + u.v1 = (int8_t)v; + if (v >= SCHAR_MIN && v <= SCHAR_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + int64_t v; + int n,r; + r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.v1 = (int8_t)v; + if (v >= SCHAR_MIN && v <= SCHAR_MAX) break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.v1, sizeof(u.v1)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: { + int v = *(int16_t*)bind->buffer; + u.v2 = (int16_t)v; + } break; + case TSDB_DATA_TYPE_INT: { + int32_t v = *(int32_t*)bind->buffer; + u.v2 = (int16_t)v; + if (v >= SHRT_MIN && v <= SHRT_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t*)bind->buffer; + u.v2 = (int16_t)v; + if (v >= SHRT_MIN && v <= SHRT_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + int64_t v; + int n,r; + r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.v2 = (int16_t)v; + if (v >= SHRT_MIN && v <= SHRT_MAX) break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.v2, sizeof(u.v2)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_INT: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: { + u.v4 = *(int32_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t*)bind->buffer; + u.v4 = (int32_t)v; + if (v >= INT_MIN && v <= INT_MAX) break; + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + int64_t v; + int n,r; + r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.v4 = (int32_t)v; + if (v >= INT_MIN && v <= INT_MAX) break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.v2, sizeof(u.v2)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_FLOAT: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + u.f4 = *(int8_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + u.f4 = *(int16_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_INT: { + u.f4 = (float)*(int32_t*)bind->buffer; + // shall we check equality? + } break; + case TSDB_DATA_TYPE_BIGINT: { + u.f4 = (float)*(int64_t*)bind->buffer; + // shall we check equality? + } break; + case TSDB_DATA_TYPE_FLOAT: { + u.f4 = *(float*)bind->buffer; + } break; + case TSDB_DATA_TYPE_DOUBLE: { + u.f4 = *(float*)bind->buffer; + // shall we check equality? + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + float v; + int n,r; + r = sscanf((const char*)bind->buffer, "%f%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.f4 = v; + break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.f4, sizeof(u.f4)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_BIGINT: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + u.v8 = *(int8_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + u.v8 = *(int16_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_INT: { + u.v8 = *(int32_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BIGINT: { + u.v8 = *(int64_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + int64_t v; + int n,r; + r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.v8 = v; + break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + memcpy(data + param->offset, &u.v8, sizeof(u.v8)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_DOUBLE: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + u.f8 = *(int8_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_SMALLINT: { + u.f8 = *(int16_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_INT: { + u.f8 = *(int32_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BIGINT: { + u.f8 = (double)*(int64_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_FLOAT: { + u.f8 = *(float*)bind->buffer; + } break; + case TSDB_DATA_TYPE_DOUBLE: { + u.f8 = *(double*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + double v; + int n,r; + r = sscanf((const char*)bind->buffer, "%lf%n", &v, &n); + if (r==1 && n==strlen((const char*)bind->buffer)) { + u.f8 = v; + break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_TIMESTAMP: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } break; + memcpy(data + param->offset, &u.f8, sizeof(u.f8)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_TIMESTAMP: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_TIMESTAMP: { + u.v8 = *(int64_t*)bind->buffer; + } break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + // is this the correct way to call taosParseTime? + int32_t len = (int32_t)*bind->length; + if (taosParseTime(bind->buffer, &u.v8, len, 3, tsDaylight) == TSDB_CODE_SUCCESS) { + break; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_DOUBLE: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } break; + memcpy(data + param->offset, &u.v8, sizeof(u.v8)); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_BINARY: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_BINARY: { + if ((*bind->length) > (uintptr_t)param->bytes) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + size = (short)*bind->length; + STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_NCHAR: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + } break; + case TSDB_DATA_TYPE_NCHAR: { + switch (bind->buffer_type) { + case TSDB_DATA_TYPE_NCHAR: { + size_t output = 0; + if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { + return TSDB_CODE_TSC_INVALID_VALUE; + } + varDataSetLen(data + param->offset, output); + return TSDB_CODE_SUCCESS; + } break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BINARY: + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + } break; + default: { + return TSDB_CODE_TSC_INVALID_VALUE; + } break; + } + } + if (bind->buffer_type != param->type) { return TSDB_CODE_TSC_INVALID_VALUE; } @@ -299,12 +681,12 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { size = (short)*bind->length; STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size); return TSDB_CODE_SUCCESS; - + case TSDB_DATA_TYPE_NCHAR: { size_t output = 0; if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { return TSDB_CODE_TSC_INVALID_VALUE; - } + } varDataSetLen(data + param->offset, output); return TSDB_CODE_SUCCESS; } @@ -358,7 +740,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { } // actual work of all data blocks is done, update block size and numOfRows. - // note we don't do this block by block during the binding process, because + // note we don't do this block by block during the binding process, because // we cannot recover if something goes wrong. pCmd->batchSize = binded * 2 + 1; @@ -405,7 +787,7 @@ static int insertStmtReset(STscStmt* pStmt) { } } pCmd->batchSize = 0; - + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); pTableMetaInfo->vgroupIndex = 0; return TSDB_CODE_SUCCESS; @@ -447,7 +829,7 @@ static int insertStmtExecute(STscStmt* stmt) { pRes->numOfRows = 0; pRes->numOfTotal = 0; pRes->numOfClauseTotal = 0; - + pRes->qhandle = 0; pSql->cmd.insertType = 0; @@ -508,35 +890,35 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { SSqlObj* pSql = pStmt->pSql; size_t sqlLen = strlen(sql); - + SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; pSql->param = (void*) pSql; pSql->fp = waitForQueryRsp; pSql->cmd.insertType = TSDB_QUERY_TYPE_STMT_INSERT; - + if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { tscError("%p failed to malloc payload buffer", pSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - + pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); - + if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); free(pCmd->payload); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - + pRes->qhandle = 0; pRes->numOfRows = 1; - + strtolower(pSql->sqlstr, sql); tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); - if (tscIsInsertData(pSql->sqlstr)) { + if (tscIsInsertData(pSql->sqlstr)) { pStmt->isInsert = true; - + pSql->cmd.numOfParams = 0; pSql->cmd.batchSize = 0; @@ -548,7 +930,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { tsem_wait(&pSql->rspSem); return pSql->res.code; } - + return code; } @@ -637,3 +1019,80 @@ TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) { pStmt->pSql = NULL; return result; } + +int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + + if (insert) *insert = pStmt->isInsert; + + return TSDB_CODE_SUCCESS; +} + +int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + + if (pStmt->isInsert) { + SSqlObj* pSql = pStmt->pSql; + SSqlCmd *pCmd = &pSql->cmd; + *nums = pCmd->numOfParams; + return TSDB_CODE_SUCCESS; + } else { + SNormalStmt* normal = &pStmt->normal; + *nums = normal->numParams; + return TSDB_CODE_SUCCESS; + } +} + +int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return TSDB_CODE_TSC_DISCONNECTED; + } + + if (pStmt->isInsert) { + SSqlObj* pSql = pStmt->pSql; + SSqlCmd *pCmd = &pSql->cmd; + STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, 0); + + assert(pCmd->numOfParams == pBlock->numOfParams); + if (idx < 0 || idx >= pBlock->numOfParams) return -1; + + SParamInfo* param = pBlock->params + idx; + if (type) *type = param->type; + if (bytes) *bytes = param->bytes; + + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_TSC_APP_ERROR; + } +} + +const char *taos_data_type(int type) { + switch (type) { + case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL"; + case TSDB_DATA_TYPE_BOOL: return "TSDB_DATA_TYPE_BOOL"; + case TSDB_DATA_TYPE_TINYINT: return "TSDB_DATA_TYPE_TINYINT"; + case TSDB_DATA_TYPE_SMALLINT: return "TSDB_DATA_TYPE_SMALLINT"; + case TSDB_DATA_TYPE_INT: return "TSDB_DATA_TYPE_INT"; + case TSDB_DATA_TYPE_BIGINT: return "TSDB_DATA_TYPE_BIGINT"; + case TSDB_DATA_TYPE_FLOAT: return "TSDB_DATA_TYPE_FLOAT"; + case TSDB_DATA_TYPE_DOUBLE: return "TSDB_DATA_TYPE_DOUBLE"; + case TSDB_DATA_TYPE_BINARY: return "TSDB_DATA_TYPE_BINARY"; + case TSDB_DATA_TYPE_TIMESTAMP: return "TSDB_DATA_TYPE_TIMESTAMP"; + case TSDB_DATA_TYPE_NCHAR: return "TSDB_DATA_TYPE_NCHAR"; + default: return "UNKNOWN"; + } +} + diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index bae0f91dcc40e78c3d2f707ff93ee6ebd5322339..eb6843b0e4fd844c9d13da7f55dca47008f62a79 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -20,8 +20,10 @@ #include "tutil.h" #include "taosmsg.h" +#include "taos.h" + void tscSaveSlowQueryFp(void *handle, void *tmrId); -void *tscSlowQueryConn = NULL; +TAOS *tscSlowQueryConn = NULL; bool tscSlowQueryConnInitialized = false; void tscInitConnCb(void *param, TAOS_RES *result, int code) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e1744288bb42ff371b2225288bb4c2081a33c3a9..faf502d330789999cbf0b0cbe669e7b8d987bc81 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -125,7 +125,7 @@ static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSql static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); -static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols); +static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); /* * Used during parsing query sql. Since the query sql usually small in length, error position @@ -193,7 +193,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlRes* pRes = &pSql->res; int32_t code = TSDB_CODE_SUCCESS; - if (!pInfo->valid) { + if (!pInfo->valid || terrno == TSDB_CODE_TSC_SQL_SYNTAX_ERROR) { + terrno = TSDB_CODE_SUCCESS; // clear the error number return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg); } @@ -886,22 +887,13 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { int32_t nLen = 0; for (int32_t i = 0; i < pFieldList->nField; ++i) { - if (pFieldList->p[i].bytes == 0) { + TAOS_FIELD* pField = &pFieldList->p[i]; + + if (pField->bytes == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } - nLen += pFieldList->p[i].bytes; - } - // max row length must be less than TSDB_MAX_BYTES_PER_ROW - if (nLen > TSDB_MAX_BYTES_PER_ROW) { - invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - return false; - } - - // field name must be unique - for (int32_t i = 0; i < pFieldList->nField; ++i) { - TAOS_FIELD* pField = &pFieldList->p[i]; if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; @@ -918,10 +910,19 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { return false; } + // field name must be unique if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } + + nLen += pField->bytes; + } + + // max row length must be less than TSDB_MAX_BYTES_PER_ROW + if (nLen > TSDB_MAX_BYTES_PER_ROW) { + invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return false; } return true; @@ -1259,7 +1260,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t tExprNode* pNode = NULL; SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo, colList, NULL); if (ret != TSDB_CODE_SUCCESS) { taosArrayDestroy(colList); tExprTreeDestroy(&pNode, NULL); @@ -1315,17 +1316,17 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, aliasName, NULL); int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, slot); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); if (pInfo->pSqlExpr == NULL) { SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo)); // arithmetic expression always return result in the format of double float - pArithExprInfo->bytes = sizeof(double); + pArithExprInfo->bytes = sizeof(double); pArithExprInfo->interBytes = sizeof(double); - pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; - int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid); if (ret != TSDB_CODE_SUCCESS) { tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause"); @@ -1382,7 +1383,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) { int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL); - SFieldSupInfo* pSupInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, numOfCols); + SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; @@ -1479,7 +1480,7 @@ int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnLi } TAOS_FIELD f = tscCreateField(type, fieldName, bytes); - SFieldSupInfo* pInfo = tscFieldInfoInsert(&pQueryInfo->fieldsInfo, outputIndex, &f); + SInternalField* pInfo = tscFieldInfoInsert(&pQueryInfo->fieldsInfo, outputIndex, &f); pInfo->pSqlExpr = pSqlExpr; return TSDB_CODE_SUCCESS; @@ -3394,10 +3395,26 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQuer tSQLExprItem item = {.pNode = pExpr, .aliasName = NULL}; - // sql function in selection clause, append sql function info in pSqlCmd structure sequentially + // sql function list in selection clause. + // Append the sqlExpr into exprList of pQueryInfo structure sequentially if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } + + // It is invalid in case of more than one sqlExpr, such as first(ts, k) - last(ts, k) + int32_t inc = (int32_t) tscSqlExprNumOfExprs(pQueryInfo) - outputIndex; + if (inc > 1) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + // Not supported data type in arithmetic expression + for(int32_t i = 0; i < inc; ++i) { + SSqlExpr* p1 = tscSqlExprGet(pQueryInfo, i + outputIndex); + int16_t t = p1->resType; + if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) { + return TSDB_CODE_TSC_INVALID_SQL; + } + } } return TSDB_CODE_SUCCESS; @@ -4138,7 +4155,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE tExprNode* p = NULL; SArray* colList = taosArrayInit(10, sizeof(SColIndex)); - ret = exprTreeFromSqlExpr(pCmd, &p, p1, NULL, pQueryInfo, colList); + ret = exprTreeFromSqlExpr(pCmd, &p, p1, pQueryInfo, colList, NULL); SBufferWriter bw = tbufInitWriter(NULL, false); TRY(0) { @@ -4891,7 +4908,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // validate the length of binary if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) && - (pVarList->a[1].pVar.nLen + VARSTR_HEADER_SIZE) > pTagsSchema->bytes) { + varDataTLen(pAlterSQL->tagData.data) > pTagsSchema->bytes) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg14); } @@ -5417,26 +5434,6 @@ int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) { return TSDB_CODE_SUCCESS; } -//void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex) { -// // the first column not timestamp column, add it -// SSqlExpr* pExpr = NULL; -// if (tscSqlExprNumOfExprs(pQueryInfo) > 0) { -// pExpr = tscSqlExprGet(pQueryInfo, 0); -// } -// -// if (pExpr == NULL || pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX || pExpr->functionId != functionId) { -// SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; -// -// pExpr = tscSqlExprInsert(pQueryInfo, 0, functionId, &index, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, TSDB_KEYSIZE, false); -// pExpr->colInfo.flag = TSDB_COL_NORMAL; -// -// // NOTE: tag column does not add to source column list -// SColumnList ids = getColumnList(1, tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX); -// -// insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, "ts", pExpr); -// } -//} - void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex) { SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentObj->cmd, subClauseIndex); @@ -5493,7 +5490,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) { tscAddSpecialColumnForSelect(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL); - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, (int32_t)size); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, (int32_t)size); doLimitOutputNormalColOfGroupby(pInfo->pSqlExpr); pInfo->visible = false; } @@ -6538,19 +6535,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return TSDB_CODE_SUCCESS; // Does not build query message here } -int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols) { +int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid) { tExprNode* pLeft = NULL; tExprNode* pRight= NULL; if (pSqlExpr->pLeft != NULL) { - int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pExprInfo, pQueryInfo, pCols); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pQueryInfo, pCols, uid); if (ret != TSDB_CODE_SUCCESS) { return ret; } } if (pSqlExpr->pRight != NULL) { - int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pExprInfo, pQueryInfo, pCols); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pQueryInfo, pCols, uid); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -6577,14 +6574,19 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS strncpy((*pExpr)->pSchema->name, pSqlExpr->operand.z, pSqlExpr->operand.n); // set the input column data byte and type. - size_t size = taosArrayGetSize(pExprInfo); + size_t size = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < size; ++i) { - SSqlExpr* p1 = taosArrayGetP(pExprInfo, i); + SSqlExpr* p1 = taosArrayGetP(pQueryInfo->exprList, i); if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) { - (*pExpr)->pSchema->type = (uint8_t)p1->resType; + (*pExpr)->pSchema->type = (uint8_t)p1->resType; (*pExpr)->pSchema->bytes = p1->resBytes; + + if (uid != NULL) { + *uid = p1->uid; + } + break; } } diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 1e841c68fdad040e53b41ca749dd686c4cbb5e2e..ac740555af649c8c20dc1fe51cc7ea48592e064c 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -145,10 +145,11 @@ static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo corVgroupInfo->inUse = 0; corVgroupInfo->numOfEps = vgroupInfo->numOfEps; for (int32_t i = 0; i < corVgroupInfo->numOfEps; i++) { - strncpy(corVgroupInfo->epAddr[i].fqdn, vgroupInfo->epAddr[i].fqdn, TSDB_FQDN_LEN); + corVgroupInfo->epAddr[i].fqdn = strdup(vgroupInfo->epAddr[i].fqdn); corVgroupInfo->epAddr[i].port = vgroupInfo->epAddr[i].port; } } + STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size) { assert(pTableMetaMsg != NULL); @@ -162,11 +163,21 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size .numOfColumns = pTableMetaMsg->numOfColumns, }; - pTableMeta->id.tid = pTableMetaMsg->sid; + pTableMeta->id.tid = pTableMetaMsg->tid; pTableMeta->id.uid = pTableMetaMsg->uid; - pTableMeta->vgroupInfo = pTableMetaMsg->vgroup; - tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo); + SCMVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo; + pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps; + pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId; + + for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) { + SEpAddrMsg* pEpMsg = &pTableMetaMsg->vgroup.epAddr[i]; + + pVgroupInfo->epAddr[i].fqdn = strndup(pEpMsg->fqdn, tListLen(pEpMsg->fqdn)); + pVgroupInfo->epAddr[i].port = pEpMsg->port; + } + + tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo); pTableMeta->sversion = pTableMetaMsg->sversion; pTableMeta->tversion = pTableMetaMsg->tversion; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d24b98fe0830ef5c8d623ec4ac92a03b8ad9be10..aead5aa4cf41f203be5f77e5e571871fbb34649c 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -124,9 +124,11 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { pVgroupInfo->inUse = pEpSet->inUse; pVgroupInfo->numOfEps = pEpSet->numOfEps; for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) { - tstrncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN); + taosTFree(pVgroupInfo->epAddr[i].fqdn); + pVgroupInfo->epAddr[i].fqdn = strndup(pEpSet->fqdn[i], tListLen(pEpSet->fqdn[i])); pVgroupInfo->epAddr[i].port = pEpSet->port[i]; } + tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse); taosCorEndWrite(&pVgroupInfo->version); } @@ -188,18 +190,19 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { void tscProcessActivityTimer(void *handle, void *tmrId) { STscObj *pObj = (STscObj *)handle; - if (pObj == NULL || pObj->signature != pObj) { + + int ret = taosAcquireRef(tscRefId, pObj); + if (ret < 0) { + tscTrace("%p failed to acquire TSC obj, reason:%s", pObj, tstrerror(ret)); return; } SSqlObj* pHB = pObj->pHb; - if (pObj->pTimer != tmrId || pHB == NULL) { - return; - } void** p = taosCacheAcquireByKey(tscObjCache, &pHB, sizeof(TSDB_CACHE_PTR_TYPE)); if (p == NULL) { tscWarn("%p HB object has been released already", pHB); + taosReleaseRef(tscRefId, pObj); return; } @@ -211,6 +214,8 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { if (code != TSDB_CODE_SUCCESS) { tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); } + + taosReleaseRef(tscRefId, pObj); } int tscSendMsgToServer(SSqlObj *pSql) { @@ -492,14 +497,25 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int32_t vgIndex = pTableMetaInfo->vgroupIndex; - - SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList; - assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + if (pTableMetaInfo->pVgroupTables == NULL) { + SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; + assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + + pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex); + } else { + int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + assert(vgIndex >= 0 && vgIndex < numOfVgroups); + + SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); - pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); + pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex); + } } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId); + tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId); } pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg); @@ -688,12 +704,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->limit = htobe64(pQueryInfo->limit.limit); pQueryMsg->offset = htobe64(pQueryInfo->limit.offset); pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList)); - pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval); - pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding); + pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval); + pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding); pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset); pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit; - pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit; - pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit; + pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit; + pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit; pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); @@ -876,7 +892,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t numOfBlocks = 0; if (pQueryInfo->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, pTableMetaInfo->vgroupIndex); + int32_t vnodeId = htonl(pQueryMsg->head.vgId); + STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, vnodeId); assert(QUERY_IS_JOIN_QUERY(pQueryInfo->type) && pBlockInfo != NULL); // this query should not be sent // todo refactor @@ -1394,7 +1411,7 @@ static int tscSetResultPointer(SQueryInfo *pQueryInfo, SSqlRes *pRes) { for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i); - pRes->tsrow[i] = ((char*) pRes->data + offset * pRes->numOfRows); + pRes->tsrow[i] = (unsigned char*)((char*) pRes->data + offset * pRes->numOfRows); } return 0; @@ -1676,7 +1693,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscProcessTableMetaRsp(SSqlObj *pSql) { STableMetaMsg *pMetaMsg = (STableMetaMsg *)pSql->res.pRsp; - pMetaMsg->sid = htonl(pMetaMsg->sid); + pMetaMsg->tid = htonl(pMetaMsg->tid); pMetaMsg->sversion = htons(pMetaMsg->sversion); pMetaMsg->tversion = htons(pMetaMsg->tversion); pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId); @@ -1686,9 +1703,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns); if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) && - (pMetaMsg->sid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) { + (pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) { tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId, - pMetaMsg->sid, pMetaMsg->tableId); + pMetaMsg->tid, pMetaMsg->tableId); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -1867,22 +1884,30 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { SSqlCmd* pCmd = &parent->cmd; for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) { STableMetaInfo *pInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i); - SVgroupsInfo * pVgroupInfo = (SVgroupsInfo *)pMsg; - pVgroupInfo->numOfVgroups = htonl(pVgroupInfo->numOfVgroups); - size_t size = sizeof(SCMVgroupInfo) * pVgroupInfo->numOfVgroups + sizeof(SVgroupsInfo); - pInfo->vgroupList = calloc(1, size); + SVgroupsMsg * pVgroupMsg = (SVgroupsMsg *) pMsg; + pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups); + + size_t size = sizeof(SCMVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg); + + size_t vgroupsz = sizeof(SCMVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); + pInfo->vgroupList = calloc(1, vgroupsz); assert(pInfo->vgroupList != NULL); - memcpy(pInfo->vgroupList, pVgroupInfo, size); + pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { //just init, no need to lock SCMVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; - pVgroups->vgId = htonl(pVgroups->vgId); - assert(pVgroups->numOfEps >= 1); + + SCMVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; + pVgroups->vgId = htonl(vmsg->vgId); + pVgroups->numOfEps = vmsg->numOfEps; + + assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1); for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { - pVgroups->epAddr[k].port = htons(pVgroups->epAddr[k].port); + pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port); + pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); } } @@ -1918,7 +1943,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { pMetaMsg->numOfColumns = ntohs(pMetaMsg->numOfColumns); pSchema = pMetaMsg->schema; - pMetaMsg->sid = ntohs(pMetaMsg->sid); + pMetaMsg->tid = ntohs(pMetaMsg->tid); for (int i = 0; i < pMetaMsg->numOfColumns; ++i) { pSchema->bytes = htons(pSchema->bytes); pSchema++; @@ -1952,7 +1977,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { tscColumnListInsert(pQueryInfo->colList, &index); TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes); - SFieldSupInfo* pInfo = tscFieldInfoAppend(pFieldInfo, &f); + SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, pTableSchema[i].type, pTableSchema[i].bytes, pTableSchema[i].bytes, false); @@ -2289,7 +2314,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) { for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i); STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta); - tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList); + tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables); } if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index be91255b9c12af5fc36f6ae24dbc3c5baeac21a4..89dfa24e8fe2cef32673fb2016fa538f218d1bcf 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -51,8 +51,8 @@ static bool validPassword(const char* passwd) { return validImpl(passwd, TSDB_PASSWORD_LEN - 1); } -SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db, - uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { +static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db, + uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, TAOS **taos) { taos_init(); if (!validUserName(user)) { @@ -161,6 +161,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con registerSqlObj(pSql); tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg); + taosAddRef(tscRefId, pObj); return pSql; } @@ -243,16 +244,19 @@ static void asyncConnCallback(void *param, TAOS_RES *tres, int code) { } TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos) { - SSqlObj* pSql = taosConnectImpl(ip, user, pass, NULL, db, port, asyncConnCallback, param, taos); + void *param, TAOS **taos) { + STscObj *pObj = NULL; + SSqlObj *pSql = taosConnectImpl(ip, user, pass, NULL, db, port, asyncConnCallback, param, (void **)&pObj); if (pSql == NULL) { return NULL; } - + + if (taos) *taos = pObj; + pSql->fetchFp = fp; pSql->res.code = tscProcessSql(pSql); tscDebug("%p DB async connection is opening", taos); - return taos; + return pObj; } void taos_close(TAOS *taos) { @@ -293,7 +297,8 @@ void taos_close(TAOS *taos) { } tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn); - tscCloseTscObj(pObj); + + taosRemoveRef(tscRefId, pObj); } void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { @@ -370,7 +375,7 @@ int taos_num_fields(TAOS_RES *res) { size_t numOfCols = tscNumOfFields(pQueryInfo); for(int32_t i = 0; i < numOfCols; ++i) { - SFieldSupInfo* pInfo = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i); + SInternalField* pInfo = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); if (pInfo->visible) { num++; } @@ -406,8 +411,24 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { if (numOfCols == 0) { return NULL; } - - return pQueryInfo->fieldsInfo.pFields->pData; + + SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo; + + if (pFieldInfo->final == NULL) { + TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD)); + + int32_t j = 0; + for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(pFieldInfo, i); + if (pField->visible) { + f[j++] = pField->field; + } + } + + pFieldInfo->final = f; + } + + return pFieldInfo->final; } int taos_retrieve(TAOS_RES *res) { @@ -588,7 +609,8 @@ static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) { return true; } - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) { return true; } @@ -702,6 +724,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) { SSqlCmd* pCmd = &pSql->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) { return; } @@ -750,6 +773,7 @@ void taos_stop_query(TAOS_RES *res) { pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); + if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { assert(pSql->pRpcCtx == NULL); tscKillSTableQuery(pSql); @@ -878,7 +902,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { if (pSql->sqlstr == NULL) { pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer", pSql); - tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); taosTFree(pSql); return pRes->code; } @@ -903,7 +927,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { } if (code != TSDB_CODE_SUCCESS) { - tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(taos), pObj); + tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(pSql), pObj); } taos_free_result(pSql); @@ -1047,7 +1071,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { tscDoQuery(pSql); - tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); if ((code = pRes->code) != TSDB_CODE_SUCCESS) { tscFreeSqlObj(pSql); } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 61614b56fb0167122fb0e31ec879ef6a6cbbac5c..0f67911bbea992503979e5de019e4e10d3bf3c14 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -168,8 +168,8 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0); taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true); - taosTFree(pTableMetaInfo->vgroupList); - + pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); + tscSetRetryTimer(pStream, pStream->pSql, retryDelay); return; } @@ -275,7 +275,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf tscFreeSqlResult(pSql); taosTFree(pSql->pSubs); pSql->subState.numOfSub = 0; - taosTFree(pTableMetaInfo->vgroupList); + pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); tscSetNextLaunchTimer(pStream, pSql); } } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 49759bc4d30fa9a9a89591c33c3dc6666894d1b2..794b7a068b4ede0e8a5ea5fd1f22a664a8d1ca3c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -23,7 +23,6 @@ #include "tscSubquery.h" #include "tschemautil.h" #include "tsclient.h" -#include "tscSubquery.h" typedef struct SInsertSupporter { SSqlObj* pSql; @@ -59,6 +58,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ pSubQueryInfo1->tsBuf = output1; pSubQueryInfo2->tsBuf = output2; + TSKEY st = taosGetTimestampUs(); + // no result generated, return directly if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) { tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); @@ -95,7 +96,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ tscInfo("%" PRId64 ", tags:%"PRId64" \t %" PRId64 ", tags:%"PRId64, elem1.ts, elem1.tag.i64Key, elem2.ts, elem2.tag.i64Key); #endif - int32_t res = tVariantCompare(&elem1.tag, &elem2.tag); + int32_t res = tVariantCompare(elem1.tag, elem2.tag); if (res == -1 || (res == 0 && tsCompare(order, elem1.ts, elem2.ts))) { if (!tsBufNextPos(pSupporter1->pTSBuf)) { break; @@ -122,8 +123,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ win->ekey = elem1.ts; } - tsBufAppend(output1, elem1.vnode, &elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); - tsBufAppend(output2, elem2.vnode, &elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); + tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); + tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); + } else { pLimit->offset -= 1; } @@ -158,9 +160,10 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ tsBufDestroy(pSupporter1->pTSBuf); tsBufDestroy(pSupporter2->pTSBuf); - tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks " - "intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql, numOfInput1, numOfInput2, output1->numOfTotal, - win->skey, win->ekey); + TSKEY et = taosGetTimestampUs(); + tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " + "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elasped time:%"PRId64" us", pSql, numOfInput1, numOfInput2, output1->numOfTotal, + output1->numOfVnodes, win->skey, win->ekey, tsBufGetNumOfVnodes(output1), et - st); return output1->numOfTotal; } @@ -216,6 +219,11 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { pSupporter->f = NULL; } + if (pSupporter->pVgroupTables != NULL) { + taosArrayDestroy(pSupporter->pVgroupTables); + pSupporter->pVgroupTables = NULL; + } + taosTFree(pSupporter->pIdTagList); tscTagCondRelease(&pSupporter->tagCond); free(pSupporter); @@ -305,7 +313,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { // set the second stage sub query for join process TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE); - memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval)); tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond); @@ -324,7 +331,9 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { tscFieldInfoUpdateOffset(pNewQueryInfo); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); - + pTableMetaInfo->pVgroupTables = pSupporter->pVgroupTables; + pSupporter->pVgroupTables = NULL; + /* * When handling the projection query, the offset value will be modified for table-table join, which is changed * during the timestamp intersection. @@ -356,10 +365,39 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value - pExpr->param[0].i64Key = colId; + pExpr->param[0] = (tVariant) {.i64Key = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; pExpr->numOfParams = 1; } + int32_t num = 0; + int32_t *list = NULL; + tsBufGetVnodeIdList(pNewQueryInfo->tsBuf, &num, &list); + + if (pTableMetaInfo->pVgroupTables != NULL) { + for(int32_t k = 0; k < taosArrayGetSize(pTableMetaInfo->pVgroupTables);) { + SVgroupTableInfo* p = taosArrayGet(pTableMetaInfo->pVgroupTables, k); + + bool found = false; + for(int32_t f = 0; f < num; ++f) { + if (p->vgInfo.vgId == list[f]) { + found = true; + break; + } + } + + if (!found) { + tscRemoveVgroupTableGroup(pTableMetaInfo->pVgroupTables, k); + } else { + k++; + } + } + + assert(taosArrayGetSize(pTableMetaInfo->pVgroupTables) > 0); + TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY); + } + + taosTFree(list); + size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s", pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, taosArrayGetSize(pNewQueryInfo->exprList), @@ -418,6 +456,8 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) { static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) { assert(pQueryInfo->window.skey <= win->skey && pQueryInfo->window.ekey >= win->ekey); pQueryInfo->window = *win; + + } int32_t tscCompareTidTags(const void* p1, const void* p2) { @@ -449,7 +489,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr SVgroupTableInfo info = {{0}}; for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { - info.vgInfo = pvg->vgroups[m]; + tscSCMVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); break; } } @@ -474,10 +514,11 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* SSqlCmd* pCmd = &pSql->cmd; tscClearSubqueryInfo(pCmd); tscFreeSqlResult(pSql); - + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); + assert(pQueryInfo->numOfTables == 1); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - tscInitQueryInfo(pQueryInfo); TSDB_QUERY_CLEAR_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); @@ -524,13 +565,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* tscProcessSql(pSql); } -static bool checkForDuplicateTagVal(SQueryInfo* pQueryInfo, SJoinSupporter* p1, SSqlObj* pPSqlObj) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);// todo: tags mismatch, tags not completed - SColumn *pCol = taosArrayGetP(pTableMetaInfo->tagColList, 0); - SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex]; - +static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSqlObj* pPSqlObj) { for(int32_t i = 1; i < p1->num; ++i) { STidTags* prev = (STidTags*) varDataVal(p1->pIdTagList + (i - 1) * p1->tagSize); STidTags* p = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize); @@ -564,7 +599,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar *s1 = taosArrayInit(p1->num, p1->tagSize - sizeof(int16_t)); *s2 = taosArrayInit(p2->num, p2->tagSize - sizeof(int16_t)); - if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) { + if (!(checkForDuplicateTagVal(pColSchema, p1, pParentSql) && checkForDuplicateTagVal(pColSchema, p2, pParentSql))) { return TSDB_CODE_QRY_DUP_JOIN_KEY; } @@ -708,6 +743,12 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0); tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2); + SSqlObj* psub1 = pParentSql->pSubs[0]; + ((SJoinSupporter*)psub1->param)->pVgroupTables = tscCloneVgroupTableInfo(pTableMetaInfo1->pVgroupTables); + + SSqlObj* psub2 = pParentSql->pSubs[1]; + ((SJoinSupporter*)psub2->param)->pVgroupTables = tscCloneVgroupTableInfo(pTableMetaInfo2->pVgroupTables); + pParentSql->subState.numOfSub = 2; pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub; @@ -766,9 +807,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pSupporter->pTSBuf = pBuf; } else { assert(pQueryInfo->numOfTables == 1); // for subquery, only one - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - tsBufMerge(pSupporter->pTSBuf, pBuf, pTableMetaInfo->vgroupIndex); + tsBufMerge(pSupporter->pTSBuf, pBuf); tsBufDestroy(pBuf); } @@ -835,6 +874,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // launch the query the retrieve actual results from vnode along with the filtered timestamp SQueryInfo* pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex); updateQueryTimeRange(pPQueryInfo, &win); + + //update the vgroup that involved in real data query tscLaunchRealSubqueries(pParentSql); } @@ -868,20 +909,27 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR assert(pQueryInfo->numOfTables == 1); // for projection query, need to try next vnode if current vnode is exhausted - if ((++pTableMetaInfo->vgroupIndex) < pTableMetaInfo->vgroupList->numOfVgroups) { - pState->numOfRemain = 1; - pState->numOfSub = 1; + int32_t numOfVgroups = 0; // TODO refactor + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } else { + numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + } + if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { + tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSql, pTableMetaInfo->vgroupIndex); pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; - tscProcessSql(pSql); + tscProcessSql(pSql); return; + } else { + tscDebug("%p no result in current subquery anymore", pSql); } } - if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) { - tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pParentSql->subState.numOfRemain, pState->numOfSub); + if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) { + tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub); return; } @@ -895,60 +943,60 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR // update the records for each subquery in parent sql object. for (int32_t i = 0; i < pState->numOfSub; ++i) { if (pParentSql->pSubs[i] == NULL) { + tscDebug("%p %p sub:%d not retrieve data", pParentSql, NULL, i); continue; } SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; - pRes1->numOfClauseTotal += pRes1->numOfRows; - } - // data has retrieved to client, build the join results - tscBuildResFromSubqueries(pParentSql); -} - -static SJoinSupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) { - int32_t notInvolved = 0; - SJoinSupporter* pSupporter = NULL; - SSubqueryState* pState = &pSql->subState; - - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - if (pSql->pSubs[i] == NULL) { - notInvolved++; + if (pRes1->row > 0 && pRes1->numOfRows > 0) { + tscDebug("%p sub:%p index:%d numOfRows:%"PRId64" total:%"PRId64 " (not retrieve)", pParentSql, pParentSql->pSubs[i], i, + pRes1->numOfRows, pRes1->numOfTotal); + assert(pRes1->row < pRes1->numOfRows); } else { - pSupporter = (SJoinSupporter*)pSql->pSubs[i]->param; + pRes1->numOfClauseTotal += pRes1->numOfRows; + tscDebug("%p sub:%p index:%d numOfRows:%"PRId64" total:%"PRId64, pParentSql, pParentSql->pSubs[i], i, + pRes1->numOfRows, pRes1->numOfTotal); } } - - pState->numOfRemain = numOfFetch; - return pSupporter; + + // data has retrieved to client, build the join results + tscBuildResFromSubqueries(pParentSql); } void tscFetchDatablockFromSubquery(SSqlObj* pSql) { assert(pSql->subState.numOfSub >= 1); int32_t numOfFetch = 0; - bool hasData = true; + bool hasData = true; + bool reachLimit = false; + + // if the subquery is NULL, it does not involved in the final result generation for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - // if the subquery is NULL, it does not involved in the final result generation SSqlObj* pSub = pSql->pSubs[i]; if (pSub == NULL) { continue; } - + SSqlRes *pRes = &pSub->res; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); if (!tscHasReachLimitation(pQueryInfo, pRes)) { if (pRes->row >= pRes->numOfRows) { + // no data left in current result buffer hasData = false; + // The current query is completed for the active vnode, try next vnode if exists + // If it is completed, no need to fetch anymore. if (!pRes->completed) { numOfFetch++; } } } else { // has reach the limitation, no data anymore if (pRes->row >= pRes->numOfRows) { - hasData = false; + reachLimit = true; + hasData = false; break; } } @@ -958,29 +1006,102 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { if (hasData) { tscBuildResFromSubqueries(pSql); return; - } else if (numOfFetch <= 0) { + } + + // If at least one subquery is completed in current vnode, try the next vnode in case of multi-vnode + // super table projection query. + if (reachLimit) { pSql->res.completed = true; freeJoinSubqueryObj(pSql); - + if (pSql->res.code == TSDB_CODE_SUCCESS) { (*pSql->fp)(pSql->param, pSql, 0); } else { tscQueueAsyncRes(pSql); } - + + return; + } + + if (numOfFetch <= 0) { + bool tryNextVnode = false; + + SSqlObj* pp = pSql->pSubs[0]; + SQueryInfo* pi = tscGetQueryInfoDetail(&pp->cmd, 0); + + // get the number of subquery that need to retrieve the next vnode. + if (tscNonOrderedProjectionQueryOnSTable(pi, 0)) { + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) { + pSql->subState.numOfRemain++; + } + } + } + + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0); + + if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pSub->res.row >= pSub->res.numOfRows && + pSub->res.completed) { + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + assert(pQueryInfo->numOfTables == 1); + + // for projection query, need to try next vnode if current vnode is exhausted + int32_t numOfVgroups = 0; // TODO refactor + if (pTableMetaInfo->pVgroupTables != NULL) { + numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } else { + numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; + } + + if ((++pTableMetaInfo->vgroupIndex) < numOfVgroups) { + tscDebug("%p no result in current vnode anymore, try next vnode, vgIndex:%d", pSub, + pTableMetaInfo->vgroupIndex); + pSub->cmd.command = TSDB_SQL_SELECT; + pSub->fp = tscJoinQueryCallback; + + tscProcessSql(pSub); + tryNextVnode = true; + } else { + tscDebug("%p no result in current subquery anymore", pSub); + } + } + } + + if (tryNextVnode) { + return; + } + + pSql->res.completed = true; + freeJoinSubqueryObj(pSql); + + if (pSql->res.code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, 0); + } else { + tscQueueAsyncRes(pSql); + } + return; } // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled + // retrieve data from current vnode. tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch); - SJoinSupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch); - + SJoinSupporter* pSupporter = NULL; + pSql->subState.numOfRemain = numOfFetch; + for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSql1 = pSql->pSubs[i]; if (pSql1 == NULL) { continue; } - + SSqlRes* pRes1 = &pSql1->res; SSqlCmd* pCmd1 = &pSql1->cmd; @@ -1122,7 +1243,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { * data instead of returning to its invoker */ if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { - pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub; // reset the record value +// pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub; // reset the record value pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data pSql->cmd.command = TSDB_SQL_FETCH; @@ -1142,7 +1263,6 @@ static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code); static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj); -// TODO int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) { SSqlCmd * pCmd = &pSql->cmd; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); @@ -1298,14 +1418,6 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0); int32_t code = TSDB_CODE_SUCCESS; - - // todo add test -// SSubqueryState *pState = calloc(1, sizeof(SSubqueryState)); -// if (pState == NULL) { -// code = TSDB_CODE_TSC_OUT_OF_MEMORY; -// goto _error; -// } - pSql->subState.numOfSub = pQueryInfo->numOfTables; bool hasEmptySub = false; @@ -1395,7 +1507,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSubqueryState *pState = &pSql->subState; - pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; + pState->numOfSub = 0; + if (pTableMetaInfo->pVgroupTables == NULL) { + pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; + } else { + pState->numOfSub = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + } + assert(pState->numOfSub > 0); int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize); @@ -1645,9 +1763,9 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p // data in from current vnode is stored in cache and disk uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num); - tscDebug("%p sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql, - pTableMetaInfo->vgroupList->vgroups[0].epAddr[0].fqdn, pTableMetaInfo->vgroupList->vgroups[0].vgId, - numOfRowsFromSubquery, idx); + SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList; + tscDebug("%p sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql, + vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx); tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity); @@ -2022,11 +2140,11 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { static char* getResultBlockPosition(SSqlCmd* pCmd, SSqlRes* pRes, int32_t columnIndex, int16_t* bytes) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); - SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pSupportInfo, columnIndex); + SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, columnIndex); assert(pInfo->pSqlExpr != NULL); *bytes = pInfo->pSqlExpr->resBytes; - char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows; + char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + pRes->row * (*bytes); return pData; } @@ -2038,11 +2156,13 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { int32_t numOfRes = INT32_MAX; for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - if (pSql->pSubs[i] == NULL) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { continue; } - numOfRes = (int32_t)(MIN(numOfRes, pSql->pSubs[i]->res.numOfRows)); + int32_t remain = (int32_t)(pSub->res.numOfRows - pSub->res.row); + numOfRes = (int32_t)(MIN(numOfRes, remain)); } if (numOfRes == 0) { @@ -2068,14 +2188,23 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for(int32_t i = 0; i < numOfExprs; ++i) { SColumnIndex* pIndex = &pRes->pColumnIndex[i]; - SSqlRes *pRes1 = &pSql->pSubs[pIndex->tableIndex]->res; - SSqlCmd *pCmd1 = &pSql->pSubs[pIndex->tableIndex]->cmd; + SSqlRes* pRes1 = &pSql->pSubs[pIndex->tableIndex]->res; + SSqlCmd* pCmd1 = &pSql->pSubs[pIndex->tableIndex]->cmd; char* pData = getResultBlockPosition(pCmd1, pRes1, pIndex->columnIndex, &bytes); memcpy(data, pData, bytes * numOfRes); data += bytes * numOfRes; - pRes1->row = numOfRes; + } + + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + if (pSub == NULL) { + continue; + } + + pSub->res.row += numOfRes; + assert(pSub->res.row <= pSub->res.numOfRows); } pRes->numOfRows = numOfRes; @@ -2094,6 +2223,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + pRes->numOfCols = (int32_t)numOfExprs; + pRes->tsrow = calloc(numOfExprs, POINTER_BYTES); pRes->buffer = calloc(numOfExprs, POINTER_BYTES); pRes->length = calloc(numOfExprs, sizeof(int32_t)); @@ -2130,7 +2261,7 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF int32_t length = taosUcs4ToMbs(pRes->tsrow[columnIndex], pRes->length[columnIndex], pRes->buffer[columnIndex]); if ( length >= 0 ) { - pRes->tsrow[columnIndex] = pRes->buffer[columnIndex]; + pRes->tsrow[columnIndex] = (unsigned char*)pRes->buffer[columnIndex]; pRes->length[columnIndex] = length; } else { tscError("%p charset:%s to %s. val:%s convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)pRes->tsrow[columnIndex]); @@ -2158,7 +2289,7 @@ static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) return pSupport->data[index] + pSupport->offset * pExpr->resBytes; } -void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { +TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; @@ -2172,7 +2303,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { size_t size = tscNumOfFields(pQueryInfo); for (int i = 0; i < size; ++i) { - SFieldSupInfo* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pSupportInfo, i); + SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); if (pSup->pSqlExpr != NULL) { tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); } @@ -2182,7 +2313,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { continue; } - TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pFields, i); + TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) { transferNcharData(pSql, i, pField); } @@ -2211,7 +2342,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) { tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup, TSDB_ORDER_ASC, getArithemicInputSrc); - pRes->tsrow[i] = pRes->buffer[i]; + pRes->tsrow[i] = (unsigned char*)pRes->buffer[i]; } } diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 4c5dbb079fafa9eb40e99fd3f4d259ef28cf7a63..bff5062f16d4d6d04638c3de2f6738e159b77a1f 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -36,6 +36,7 @@ void * tscTmr; void * tscQhandle; void * tscCheckDiskUsageTmr; int tsInsertHeadSize; +int tscRefId; int tscNumOfThreads; @@ -77,6 +78,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon return 0; } + void taos_init_imp(void) { char temp[128]; @@ -124,8 +126,9 @@ void taos_init_imp(void) { double factor = (tscEmbedded == 0)? 2.0:4.0; tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor); - - if (tscNumOfThreads < 2) tscNumOfThreads = 2; + if (tscNumOfThreads < 2) { + tscNumOfThreads = 2; + } tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { @@ -140,10 +143,12 @@ void taos_init_imp(void) { int64_t refreshTime = 10; // 10 seconds by default if (tscMetaCache == NULL) { - tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta"); + tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta"); tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj"); } + tscRefId = taosOpenRef(200, tscCloseTscObj); + tscDebug("client is initialized successfully"); } @@ -163,6 +168,7 @@ void taos_cleanup() { tscQhandle = NULL; } + taosCloseRef(tscRefId); taosCleanupKeywordsTable(); taosCloseLog(); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 0235f037bd039a3e7a3152a6e320e62695124a76..85e7122b9d2ac9fa8f457d665f8e8a79c66b4f98 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -114,9 +114,9 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) { } // for select query super table, the super table vgroup list can not be null in any cases. - if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - assert(pTableMetaInfo->vgroupList != NULL); - } + // if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + // assert(pTableMetaInfo->vgroupList != NULL); + // } if ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE) { return false; @@ -404,7 +404,25 @@ void tscFreeRegisteredSqlObj(void *pSql) { tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", *p, pTscObj, ref); if (ref == 0) { tscDebug("%p all sqlObj freed, free tscObj:%p", *p, pTscObj); - tscCloseTscObj(pTscObj); + taosRemoveRef(tscRefId, pTscObj); + } +} + +void tscFreeTableMetaHelper(void *pTableMeta) { + STableMeta* p = (STableMeta*) pTableMeta; + + int32_t numOfEps = p->vgroupInfo.numOfEps; + assert(numOfEps >= 0 && numOfEps <= TSDB_MAX_REPLICA); + + for(int32_t i = 0; i < numOfEps; ++i) { + taosTFree(p->vgroupInfo.epAddr[i].fqdn); + } + + int32_t numOfEps1 = p->corVgroupInfo.numOfEps; + assert(numOfEps1 >= 0 && numOfEps1 <= TSDB_MAX_REPLICA); + + for(int32_t i = 0; i < numOfEps1; ++i) { + taosTFree(p->corVgroupInfo.epAddr[i].fqdn); } } @@ -686,16 +704,14 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { SSqlCmd* pCmd = &pSql->cmd; - // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, 0); - int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); - void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); size_t total = taosArrayGetSize(pTableDataBlockList); for (int32_t i = 0; i < total; ++i) { - pOneTableBlock = taosArrayGetP(pTableDataBlockList, i); + // the maximum expanded size in byte when a row-wise data is converted to SDataRow format + STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i); + int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); STableDataBlocks* dataBuf = NULL; int32_t ret = @@ -770,8 +786,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { } // TODO: all subqueries should be freed correctly before close this connection. -void tscCloseTscObj(STscObj* pObj) { - assert(pObj != NULL); +void tscCloseTscObj(void *param) { + STscObj *pObj = param; pObj->signature = NULL; taosTmrStopA(&(pObj->pTimer)); @@ -829,35 +845,30 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) { return f; } -SFieldSupInfo* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { +SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { assert(pFieldInfo != NULL); - taosArrayPush(pFieldInfo->pFields, pField); pFieldInfo->numOfOutput++; - struct SFieldSupInfo info = { + struct SInternalField info = { .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, }; - - return taosArrayPush(pFieldInfo->pSupportInfo, &info); -} -SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index) { - return TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, index); + info.field = *pField; + return taosArrayPush(pFieldInfo->internalField, &info); } -SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { - taosArrayInsert(pFieldInfo->pFields, index, field); +SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { pFieldInfo->numOfOutput++; - - struct SFieldSupInfo info = { + struct SInternalField info = { .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, }; - - return taosArrayInsert(pFieldInfo->pSupportInfo, index, &info); + + info.field = *field; + return taosArrayInsert(pFieldInfo->internalField, index, &info); } void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { @@ -891,29 +902,18 @@ void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) { } } -void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src) { - dst->numOfOutput = src->numOfOutput; - - if (dst->pFields == NULL) { - dst->pFields = taosArrayClone(src->pFields); - } else { - taosArrayCopy(dst->pFields, src->pFields); - } - - if (dst->pSupportInfo == NULL) { - dst->pSupportInfo = taosArrayClone(src->pSupportInfo); - } else { - taosArrayCopy(dst->pSupportInfo, src->pSupportInfo); - } +SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index) { + assert(index < pFieldInfo->numOfOutput); + return TARRAY_GET_ELEM(pFieldInfo->internalField, index); } TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { assert(index < pFieldInfo->numOfOutput); - return TARRAY_GET_ELEM(pFieldInfo->pFields, index); + return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field; } int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, index); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index); assert(pInfo != NULL && pInfo->pSqlExpr != NULL); return pInfo->pSqlExpr->offset; @@ -960,10 +960,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { return; } - taosArrayDestroy(pFieldInfo->pFields); - for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) { - SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, i); + SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i); if (pInfo->pArithExprInfo != NULL) { tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL); @@ -971,7 +969,9 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { } } - taosArrayDestroy(pFieldInfo->pSupportInfo); + taosArrayDestroy(pFieldInfo->internalField); + taosTFree(pFieldInfo->final); + memset(pFieldInfo, 0, sizeof(SFieldInfo)); } @@ -1121,6 +1121,8 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco } *p1 = *pExpr; + memset(p1->param, 0, sizeof(tVariant) * tListLen(p1->param)); + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { tVariantAssign(&p1->param[j], &pExpr->param[j]); } @@ -1615,11 +1617,8 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i } void tscInitQueryInfo(SQueryInfo* pQueryInfo) { - assert(pQueryInfo->fieldsInfo.pFields == NULL); - pQueryInfo->fieldsInfo.pFields = taosArrayInit(4, sizeof(TAOS_FIELD)); - - assert(pQueryInfo->fieldsInfo.pSupportInfo == NULL); - pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(4, sizeof(SFieldSupInfo)); + assert(pQueryInfo->fieldsInfo.internalField == NULL); + pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField)); assert(pQueryInfo->exprList == NULL); pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); @@ -1681,13 +1680,62 @@ void tscClearSubqueryInfo(SSqlCmd* pCmd) { } void tscFreeVgroupTableInfo(SArray* pVgroupTables) { - if (pVgroupTables != NULL) { - for (size_t i = 0; i < taosArrayGetSize(pVgroupTables); i++) { - SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); - taosArrayDestroy(pInfo->itemList); + if (pVgroupTables == NULL) { + return; + } + + size_t num = taosArrayGetSize(pVgroupTables); + for (size_t i = 0; i < num; i++) { + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); + + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + taosTFree(pInfo->vgInfo.epAddr[j].fqdn); } - taosArrayDestroy(pVgroupTables); + + taosArrayDestroy(pInfo->itemList); + } + + taosArrayDestroy(pVgroupTables); +} + +void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { + assert(pVgroupTable != NULL && index >= 0); + + size_t size = taosArrayGetSize(pVgroupTable); + assert(size > index); + + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + taosTFree(pInfo->vgInfo.epAddr[j].fqdn); } + + taosArrayDestroy(pInfo->itemList); + taosArrayRemove(pVgroupTable, index); +} + +SArray* tscCloneVgroupTableInfo(SArray* pVgroupTables) { + if (pVgroupTables == NULL) { + return NULL; + } + + size_t num = taosArrayGetSize(pVgroupTables); + SArray* pa = taosArrayInit(num, sizeof(SVgroupTableInfo)); + + SVgroupTableInfo info; + for (size_t i = 0; i < num; i++) { + SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); + memset(&info, 0, sizeof(SVgroupTableInfo)); + + info.vgInfo = pInfo->vgInfo; + for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { + info.vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); + } + + info.itemList = taosArrayClone(pInfo->itemList); + taosArrayPush(pa, &info); + } + + return pa; } void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) { @@ -1695,6 +1743,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool rem for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); + tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); tscClearTableMetaInfo(pTableMetaInfo, removeFromCache); free(pTableMetaInfo); @@ -1704,7 +1753,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool rem } STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta, - SVgroupsInfo* vgroupList, SArray* pTagCols) { + SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) { void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES); if (pAlloc == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -1727,13 +1776,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST pTableMetaInfo->pTableMeta = pTableMeta; if (vgroupList != NULL) { - size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups; - pTableMetaInfo->vgroupList = malloc(size); - if (pTableMetaInfo->vgroupList == NULL) { - return NULL; - } - - memcpy(pTableMetaInfo->vgroupList, vgroupList, size); + pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); } pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES); @@ -1744,13 +1787,15 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST if (pTagCols != NULL) { tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1); } + + pTableMetaInfo->pVgroupTables = tscCloneVgroupTableInfo(pVgroupTables); pQueryInfo->numOfTables += 1; return pTableMetaInfo; } STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo* pQueryInfo) { - return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL); + return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL, NULL); } void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache) { @@ -1762,8 +1807,7 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache) taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache); } - taosTFree(pTableMetaInfo->vgroupList); - + pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); tscColumnListDestroy(pTableMetaInfo->tagColList); pTableMetaInfo->tagColList = NULL; } @@ -1825,60 +1869,29 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm assert(pSql->cmd.clauseIndex == 0); STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); - tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL); + tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); registerSqlObj(pNew); return pNew; } -// current sql function is not direct output result, so create a dummy output field -static void doSetNewFieldInfo(SQueryInfo* pNewQueryInfo, SSqlExpr* pExpr) { - TAOS_FIELD f = {.type = (uint8_t)pExpr->resType, .bytes = pExpr->resBytes}; - tstrncpy(f.name, pExpr->aliasName, sizeof(f.name)); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); - - pInfo1->pSqlExpr = pExpr; - pInfo1->visible = false; -} - static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) { int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo); if (numOfOutput == 0) { return; } - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); - SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; - - // set the field info in pNewQueryInfo object + // set the field info in pNewQueryInfo object according to sqlExpr information + size_t numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + SSqlExpr* pExpr = tscSqlExprGet(pNewQueryInfo, i); - if (pExpr->uid == uid) { - if (i < pFieldInfo->numOfOutput) { - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, i); - - if (pInfo->pSqlExpr != NULL) { - TAOS_FIELD* p = tscFieldInfoGetField(pFieldInfo, i); - assert(strcmp(p->name, pExpr->aliasName) == 0); - - SFieldSupInfo* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, p); - *pInfo1 = *pInfo; - } else { - assert(pInfo->pArithExprInfo != NULL); - doSetNewFieldInfo(pNewQueryInfo, pExpr); - } - } else { // it is a arithmetic column, does not have actual field for sqlExpr, so build it - doSetNewFieldInfo(pNewQueryInfo, pExpr); - } - } + TAOS_FIELD f = tscCreateField((int8_t) pExpr->resType, pExpr->aliasName, pExpr->resBytes); + SInternalField* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f); + pInfo1->pSqlExpr = pExpr; } - // make sure the the sqlExpr for each fields is correct - numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo); - - // update the pSqlExpr pointer in SFieldSupInfo according the field name + // update the pSqlExpr pointer in SInternalField according the field name // make sure the pSqlExpr point to the correct SqlExpr in pNewQueryInfo, not SqlExpr in pQueryInfo for (int32_t f = 0; f < pNewQueryInfo->fieldsInfo.numOfOutput; ++f) { TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f); @@ -1888,7 +1901,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1); if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name - SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pNewQueryInfo->fieldsInfo, f); + SInternalField* pInfo = tscFieldInfoGetInternalField(&pNewQueryInfo->fieldsInfo, f); pInfo->pSqlExpr = pExpr1; matched = true; @@ -2021,14 +2034,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup assert(pTableMeta != NULL); - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, + pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, + pTableMetaInfo->pVgroupTables); } if (pFinalInfo->pTableMeta == NULL) { @@ -2403,3 +2418,58 @@ void tscClearSqlOwner(SSqlObj* pSql) { assert(taosCheckPthreadValid(pSql->owner)); atomic_store_64(&pSql->owner, 0); } + +SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { + if (vgroupList == NULL) { + return NULL; + } + + size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups; + SVgroupsInfo* pNew = calloc(1, size); + if (pNew == NULL) { + return NULL; + } + + pNew->numOfVgroups = vgroupList->numOfVgroups; + + for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { + SCMVgroupInfo* pNewVInfo = &pNew->vgroups[i]; + + SCMVgroupInfo* pvInfo = &vgroupList->vgroups[i]; + pNewVInfo->vgId = pvInfo->vgId; + pNewVInfo->numOfEps = pvInfo->numOfEps; + + for(int32_t j = 0; j < pvInfo->numOfEps; ++j) { + pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); + pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port; + } + } + + return pNew; +} + +void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { + if (vgroupList == NULL) { + return NULL; + } + + for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { + SCMVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; + + for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { + taosTFree(pVgroupInfo->epAddr[j].fqdn); + } + } + + taosTFree(vgroupList); + return NULL; +} + +void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src) { + dst->vgId = src->vgId; + dst->numOfEps = src->numOfEps; + for(int32_t i = 0; i < dst->numOfEps; ++i) { + dst->epAddr[i].port = src->epAddr[i].port; + dst->epAddr[i].fqdn = strdup(src->epAddr[i].fqdn); + } +} diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 515115c323294a67318b5eb1dd17660e651d09f8..4636eaac08db4943e1837b5d6e8db40341ba6546 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -44,14 +44,17 @@ extern int32_t tsMaxShellConns; extern int32_t tsShellActivityTimer; extern uint32_t tsMaxTmrCtrl; extern float tsNumOfThreadsPerCore; -extern float tsRatioOfQueryThreads; +extern float tsRatioOfQueryThreads; // todo remove it extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; -extern char tsCharset[]; // default encode string +extern char tsCharset[]; // default encode string extern int32_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +//query buffer management +extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing + // client extern int32_t tsTableMetaKeepTimer; extern int32_t tsMaxSQLStringLen; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index c24ba490ba7f4cb25ba032b0404790d68540c826..32569e39823416a25bc0f0bb88d1feb9faafcff1 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -45,14 +45,14 @@ int32_t tsEnableTelemetryReporting = 1; char tsEmail[TSDB_FQDN_LEN] = {0}; // common -int32_t tsRpcTimer = 1000; -int32_t tsRpcMaxTime = 600; // seconds; -int32_t tsMaxShellConns = 5000; +int32_t tsRpcTimer = 1000; +int32_t tsRpcMaxTime = 600; // seconds; +int32_t tsMaxShellConns = 5000; int32_t tsMaxConnections = 5000; -int32_t tsShellActivityTimer = 3; // second -float tsNumOfThreadsPerCore = 1.0; -float tsRatioOfQueryThreads = 0.5; -int8_t tsDaylight = 0; +int32_t tsShellActivityTimer = 3; // second +float tsNumOfThreadsPerCore = 1.0f; +float tsRatioOfQueryThreads = 0.5f; +int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string @@ -99,6 +99,12 @@ float tsStreamComputDelayRatio = 0.1f; int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance +// the maximum allowed query buffer size during query processing for each data node. +// -1 no limit (default) +// 0 no query allowed, queries are disabled +// positive value (in MB) +int32_t tsQueryBufferSize = -1; + // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; @@ -676,7 +682,7 @@ static void doInitGlobalConfig(void) { cfg.minValue = TSDB_MIN_CACHE_BLOCK_SIZE; cfg.maxValue = TSDB_MAX_CACHE_BLOCK_SIZE; cfg.ptrLength = 0; - cfg.unitType = TAOS_CFG_UTYPE_Mb; + cfg.unitType = TAOS_CFG_UTYPE_MB; taosInitConfigOption(cfg); cfg.option = "blocks"; @@ -839,6 +845,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "queryBufferSize"; + cfg.ptr = &tsQueryBufferSize; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = -1; + cfg.maxValue = 500000000000.0f; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + // locale & charset cfg.option = "timezone"; cfg.ptr = tsTimezone; diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index 005def6dc597361436b03c15535840af2bd3461e..9eb9924932c7757c3c999eb5afadd8c719dc16bc 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -144,21 +144,24 @@ void tVariantDestroy(tVariant *pVar) { void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; - *pDst = *pSrc; - + pDst->nType = pSrc->nType; if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) { - int32_t len = pSrc->nLen + 1; - if (pSrc->nType == TSDB_DATA_TYPE_NCHAR) { - len = len * TSDB_NCHAR_SIZE; - } - - pDst->pz = calloc(1, len); - memcpy(pDst->pz, pSrc->pz, len); + int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE; + char* p = realloc(pDst->pz, len); + assert(p); + + memset(p, 0, len); + pDst->pz = p; + + memcpy(pDst->pz, pSrc->pz, pSrc->nLen); + pDst->nLen = pSrc->nLen; return; + } - // this is only for string array - if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { + if (pSrc->nType >= TSDB_DATA_TYPE_BOOL && pSrc->nType <= TSDB_DATA_TYPE_DOUBLE) { + pDst->i64Key = pSrc->i64Key; + } else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array size_t num = taosArrayGetSize(pSrc->arr); pDst->arr = taosArrayInit(num, sizeof(char*)); for(size_t i = 0; i < num; i++) { @@ -166,8 +169,6 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { char* n = strdup(p); taosArrayPush(pDst->arr, &n); } - - return; } pDst->nLen = tDataTypeDesc[pDst->nType].nSize; diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension new file mode 160000 index 0000000000000000000000000000000000000000..b62a26ecc164a310104df57691691b237e091c89 --- /dev/null +++ b/src/connector/hivemq-tdengine-extension @@ -0,0 +1 @@ +Subproject commit b62a26ecc164a310104df57691691b237e091c89 diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 7f823b97b266dae11ce1ba384cc812a1a9d6b691..c565853ab0fd3fa961643725bbf3d17ff1dc349a 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.0-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.8-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 99409fe27722fb43f9d9fa50a0c0f17b5c1f76be..3b62f66d2ec88002d2f749166fb00bff670617ee 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.0 + 2.0.8 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index e5515c24b7a298f0d82f5ad5f880fc1a166f9a3f..c1d9d2af8e5a5c24dcfed6039e3ce06530b95276 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -587,7 +587,6 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - /** add by zyyang **********/ Statement stmt = null; if (null != conn && !conn.isClosed()) { diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..58e7b6acf1f8424c8b8f72578a9ece027d1a9447 --- /dev/null +++ b/src/connector/odbc/CMakeLists.txt @@ -0,0 +1,33 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +IF (TD_LINUX_64) + find_program(HAVE_ODBCINST NAMES odbcinst) + + IF (HAVE_ODBCINST) + include(CheckSymbolExists) + # shall we revert CMAKE_REQUIRED_LIBRARIES and how? + set(CMAKE_REQUIRED_LIBRARIES odbc) + check_symbol_exists(SQLExecute "sql.h" HAVE_ODBC_DEV) + if(NOT (HAVE_ODBC_DEV)) + unset(HAVE_ODBC_DEV CACHE) + message(WARNING "unixodbc-dev is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc-dev") + else () + message(STATUS "unixodbc/unixodbc-dev are installed, and odbc connector will be built") + AUX_SOURCE_DIRECTORY(src SRC) + + # generate dynamic library (*.so) + ADD_LIBRARY(todbc SHARED ${SRC}) + SET_TARGET_PROPERTIES(todbc PROPERTIES CLEAN_DIRECT_OUTPUT 1) + SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1) + TARGET_LINK_LIBRARIES(todbc taos) + + install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/src/install.sh ${CMAKE_BINARY_DIR})") + + ADD_SUBDIRECTORY(tests) + endif() + ELSE () + message(WARNING "unixodbc is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc") + ENDIF () +ENDIF () + diff --git a/src/connector/odbc/src/install.sh b/src/connector/odbc/src/install.sh new file mode 100755 index 0000000000000000000000000000000000000000..b8c04677c7199384f7bc0b66515eb04d0fe560fc --- /dev/null +++ b/src/connector/odbc/src/install.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -u + +BLD_DIR="$1" + +rm -f "${BLD_DIR}/template.ini" +rm -f "${BLD_DIR}/template.dsn" + +cat > "${BLD_DIR}/template.ini" < "${BLD_DIR}/template.dsn" < + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// #define _BSD_SOURCE +#define _XOPEN_SOURCE +#define _DEFAULT_SOURCE + +#include "taos.h" + +#include "os.h" +#include "taoserror.h" +#include "todbc_util.h" + +#include +#include + +#include + + +#define GET_REF(obj) atomic_load_64(&obj->refcount) +#define INC_REF(obj) atomic_add_fetch_64(&obj->refcount, 1) +#define DEC_REF(obj) atomic_sub_fetch_64(&obj->refcount, 1) + +#define LOCK(obj) pthread_mutex_lock(&obj->lock); +#define UNLOCK(obj) pthread_mutex_unlock(&obj->lock); + +#define SET_ERROR(obj, sqlstate, eno, err_fmt, ...) \ +do { \ + obj->err.err_no = eno; \ + const char* estr = tstrerror(eno); \ + if (!estr) estr = "Unknown error"; \ + int n = snprintf(NULL, 0, "%s: @[%d][TSDB:%x]" err_fmt "", estr, __LINE__, eno, ##__VA_ARGS__); \ + if (n<0) break; \ + char *err_str = (char*)realloc(obj->err.err_str, n+1); \ + if (!err_str) break; \ + obj->err.err_str = err_str; \ + snprintf(obj->err.err_str, n+1, "%s: @[%d][TSDB:%x]" err_fmt "", estr, __LINE__, eno, ##__VA_ARGS__); \ + snprintf((char*)obj->err.sql_state, sizeof(obj->err.sql_state), "%s", sqlstate); \ +} while (0) + +#define CLR_ERROR(obj) \ +do { \ + obj->err.err_no = TSDB_CODE_SUCCESS; \ + if (obj->err.err_str) obj->err.err_str[0] = '\0'; \ + obj->err.sql_state[0] = '\0'; \ +} while (0) + +#define FILL_ERROR(obj) \ +do { \ + size_t n = sizeof(obj->err.sql_state); \ + if (Sqlstate) strncpy((char*)Sqlstate, (char*)obj->err.sql_state, n); \ + if (NativeError) *NativeError = obj->err.err_no; \ + snprintf((char*)MessageText, BufferLength, "%s", obj->err.err_str); \ + if (TextLength && obj->err.err_str) *TextLength = strlen(obj->err.err_str); \ + if (TextLength && obj->err.err_str) *TextLength = utf8_chars(obj->err.err_str); \ +} while (0) + +#define FREE_ERROR(obj) \ +do { \ + obj->err.err_no = TSDB_CODE_SUCCESS; \ + if (obj->err.err_str) { \ + free(obj->err.err_str); \ + obj->err.err_str = NULL; \ + } \ + obj->err.sql_state[0] = '\0'; \ +} while (0) + +#define SET_UNSUPPORT_ERROR(obj, sqlstate, err_fmt, ...) \ +do { \ + SET_ERROR(obj, sqlstate, TSDB_CODE_ODBC_NOT_SUPPORT, err_fmt, ##__VA_ARGS__); \ +} while (0) \ + +#define SET_HANDLE_INVALID(obj, sqlstate, err_fmt, ...) \ +do { \ + SET_ERROR(obj, sqlstate, TSDB_CODE_QRY_INVALID_QHANDLE, err_fmt, ##__VA_ARGS__); \ +} while (0); + +#define SDUP(s,n) (s ? (s[n] ? (const char*)strndup((const char*)s,n) : (const char*)s) : strdup("")) +#define SFRE(x,s,n) \ +do { \ + if (x==(const char*)s) break; \ + if (x) { \ + free((char*)x); \ + x = NULL; \ + } \ +} while (0) + +#define CHK_CONN(obj) \ +do { \ + if (!obj->conn) { \ + SET_ERROR(obj, "HY000", TSDB_CODE_ODBC_INVALID_HANDLE, "connection closed or not ready"); \ + return SQL_ERROR; \ + } \ +} while (0); + +#define CHK_CONN_TAOS(obj) \ +do { \ + if (!obj->conn->taos) { \ + SET_ERROR(obj, "HY000", TSDB_CODE_ODBC_INVALID_HANDLE, "connection to data source closed or not ready"); \ + return SQL_ERROR; \ + } \ +} while (0); + +#define CHK_RS(r_091c, sql_091c, fmt_091c, ...) \ +do { \ + r_091c = SQL_ERROR; \ + int e = sql_091c->rs ? taos_errno(sql_091c->rs) : terrno; \ + if (e != TSDB_CODE_SUCCESS) { \ + SET_ERROR(sql_091c, "HY000", e, fmt_091c, ##__VA_ARGS__); \ + break; \ + } \ + r_091c = SQL_SUCCESS; \ +} while (0) + +#define PROFILING 0 + +#define PROFILE(statement) \ +do { \ + if (!PROFILING) { \ + statement; \ + break; \ + } \ + struct timeval tv0, tv1; \ + gettimeofday(&tv0, NULL); \ + statement; \ + gettimeofday(&tv1, NULL); \ + double delta = difftime(tv1.tv_sec, tv0.tv_sec); \ + delta *= 1000000; \ + delta += (tv1.tv_usec-tv0.tv_usec); \ + delta /= 1000000; \ + D("%s: elapsed: [%.6f]s", #statement, delta); \ +} while (0) + + +#define CHK_CONV(statement) \ +do { \ + const char *sqlstate = statement; \ + if (sqlstate) { \ + SET_ERROR(sql, sqlstate, TSDB_CODE_ODBC_OUT_OF_RANGE, \ + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", \ + sql_c_type(valueType), valueType, valueType, \ + taos_data_type(type), type, type, idx+1); \ + return SQL_ERROR; \ + } \ +} while (0) + +typedef struct env_s env_t; +typedef struct conn_s conn_t; +typedef struct sql_s sql_t; +typedef struct taos_error_s taos_error_t; +typedef struct param_bind_s param_bind_t; + +struct param_bind_s { + SQLUSMALLINT ParameterNumber; + SQLSMALLINT ValueType; + SQLSMALLINT ParameterType; + SQLULEN LengthPrecision; + SQLSMALLINT ParameterScale; + SQLPOINTER ParameterValue; + SQLLEN *StrLen_or_Ind; + + unsigned int valid; +}; + +struct taos_error_s { + char *err_str; + int err_no; + + SQLCHAR sql_state[6]; +}; + +struct env_s { + uint64_t refcount; + unsigned int destroying:1; + + taos_error_t err; +}; + +struct conn_s { + uint64_t refcount; + env_t *env; + + TAOS *taos; + + taos_error_t err; +}; + +struct sql_s { + uint64_t refcount; + conn_t *conn; + + TAOS_STMT *stmt; + param_bind_t *params; + int n_params; + size_t rowlen; + size_t n_rows; + size_t ptr_offset; + + TAOS_RES *rs; + TAOS_ROW row; + + taos_error_t err; + unsigned int is_prepared:1; + unsigned int is_insert:1; + unsigned int is_executed:1; +}; + +typedef struct c_target_s c_target_t; +struct c_target_s { + SQLUSMALLINT col; + SQLSMALLINT ct; // c type: SQL_C_XXX + char *ptr; + SQLLEN len; + SQLLEN *soi; +}; + +static pthread_once_t init_once = PTHREAD_ONCE_INIT; +static void init_routine(void); + +// conversions + +const char* tsdb_int64_to_bit(int64_t src, int8_t *dst); +const char* tsdb_int64_to_tinyint(int64_t src, int8_t *dst); +const char* tsdb_int64_to_smallint(int64_t src, int16_t *dst); +const char* tsdb_int64_to_int(int64_t src, int32_t *dst); +const char* tsdb_int64_to_bigint(int64_t src, int64_t *dst); +const char* tsdb_int64_to_ts(int64_t src, int64_t *dst); +const char* tsdb_int64_to_float(int64_t src, float *dst); +const char* tsdb_int64_to_double(int64_t src, double *dst); +const char* tsdb_int64_to_char(int64_t src, char *dst, size_t dlen); + +const char* tsdb_double_to_bit(double src, int precision, int8_t *dst); +const char* tsdb_double_to_tinyint(double src, int precision, int8_t *dst); +const char* tsdb_double_to_smallint(double src, int precision, int16_t *dst); +const char* tsdb_double_to_int(double src, int precision, int32_t *dst); +const char* tsdb_double_to_bigint(double src, int precision, int64_t *dst); +const char* tsdb_double_to_ts(double src, int precision, int64_t *dst); +const char* tsdb_double_to_float(double src, int precision, float *dst); +const char* tsdb_double_to_double(double src, int precision, double *dst); +const char* tsdb_double_to_char(double src, int precision, char *dst, size_t dlen); + +const char* tsdb_chars_to_bit(const char *src, int8_t *dst); +const char* tsdb_chars_to_tinyint(const char *src, int8_t *dst); +const char* tsdb_chars_to_smallint(const char *src, int16_t *dst); +const char* tsdb_chars_to_int(const char *src, int32_t *dst); +const char* tsdb_chars_to_bigint(const char *src, int64_t *dst); +const char* tsdb_chars_to_ts(const char *src, int64_t *dst); +const char* tsdb_chars_to_float(const char *src, float *dst); +const char* tsdb_chars_to_double(const char *src, double *dst); +const char* tsdb_chars_to_char(const char *src, char *dst, size_t dlen); + + +static int do_field_display_size(TAOS_FIELD *field); + +static SQLRETURN doSQLAllocEnv(SQLHENV *EnvironmentHandle) +{ + pthread_once(&init_once, init_routine); + + env_t *env = (env_t*)calloc(1, sizeof(*env)); + if (!env) return SQL_ERROR; + + DASSERT(INC_REF(env)>0); + + *EnvironmentHandle = env; + + CLR_ERROR(env); + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLAllocEnv(SQLHENV *EnvironmentHandle) +{ + SQLRETURN r; + r = doSQLAllocEnv(EnvironmentHandle); + return r; +} + +static SQLRETURN doSQLFreeEnv(SQLHENV EnvironmentHandle) +{ + env_t *env = (env_t*)EnvironmentHandle; + if (!env) return SQL_ERROR; + + DASSERT(GET_REF(env)==1); + + DASSERT(!env->destroying); + + env->destroying = 1; + DASSERT(env->destroying == 1); + + DASSERT(DEC_REF(env)==0); + + FREE_ERROR(env); + free(env); + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLFreeEnv(SQLHENV EnvironmentHandle) +{ + SQLRETURN r; + r = doSQLFreeEnv(EnvironmentHandle); + return r; +} + +static SQLRETURN doSQLAllocConnect(SQLHENV EnvironmentHandle, + SQLHDBC *ConnectionHandle) +{ + env_t *env = (env_t*)EnvironmentHandle; + if (!env) return SQL_ERROR; + + DASSERT(INC_REF(env)>1); + + conn_t *conn = NULL; + do { + conn = (conn_t*)calloc(1, sizeof(*conn)); + if (!conn) { + SET_ERROR(env, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + + conn->env = env; + *ConnectionHandle = conn; + + DASSERT(INC_REF(conn)>0); + + return SQL_SUCCESS; + } while (0); + + DASSERT(DEC_REF(env)>0); + + return SQL_ERROR; +} + +SQLRETURN SQL_API SQLAllocConnect(SQLHENV EnvironmentHandle, + SQLHDBC *ConnectionHandle) +{ + SQLRETURN r; + r = doSQLAllocConnect(EnvironmentHandle, ConnectionHandle); + return r; +} + +static SQLRETURN doSQLFreeConnect(SQLHDBC ConnectionHandle) +{ + conn_t *conn = (conn_t*)ConnectionHandle; + if (!conn) return SQL_ERROR; + + DASSERT(GET_REF(conn)==1); + + DASSERT(conn->env); + + do { + if (conn->taos) { + taos_close(conn->taos); + conn->taos = NULL; + } + + DASSERT(DEC_REF(conn->env)>0); + DASSERT(DEC_REF(conn)==0); + + conn->env = NULL; + FREE_ERROR(conn); + free(conn); + } while (0); + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLFreeConnect(SQLHDBC ConnectionHandle) +{ + SQLRETURN r; + r = doSQLFreeConnect(ConnectionHandle); + return r; +} + +static SQLRETURN doSQLConnect(SQLHDBC ConnectionHandle, + SQLCHAR *ServerName, SQLSMALLINT NameLength1, + SQLCHAR *UserName, SQLSMALLINT NameLength2, + SQLCHAR *Authentication, SQLSMALLINT NameLength3) +{ + conn_t *conn = (conn_t*)ConnectionHandle; + if (!conn) return SQL_ERROR; + + if (conn->taos) { + SET_ERROR(conn, "08002", TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still in use"); + return SQL_ERROR; + } + + const char *serverName = SDUP(ServerName, NameLength1); + const char *userName = SDUP(UserName, NameLength2); + const char *auth = SDUP(Authentication, NameLength3); + + do { + if ((ServerName && !serverName) || (UserName && !userName) || (Authentication && !auth)) { + SET_ERROR(conn, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + + // TODO: data-race + // TODO: shall receive ip/port from odbc.ini + conn->taos = taos_connect("localhost", userName, auth, NULL, 0); + if (!conn->taos) { + SET_ERROR(conn, "08001", terrno, "failed to connect to data source"); + break; + } + } while (0); + + SFRE(serverName, ServerName, NameLength1); + SFRE(userName, UserName, NameLength2); + SFRE(auth, Authentication, NameLength3); + + return conn->taos ? SQL_SUCCESS : SQL_ERROR; +} + +SQLRETURN SQL_API SQLConnect(SQLHDBC ConnectionHandle, + SQLCHAR *ServerName, SQLSMALLINT NameLength1, + SQLCHAR *UserName, SQLSMALLINT NameLength2, + SQLCHAR *Authentication, SQLSMALLINT NameLength3) +{ + SQLRETURN r; + r = doSQLConnect(ConnectionHandle, ServerName, NameLength1, + UserName, NameLength2, + Authentication, NameLength3); + return r; +} + +static SQLRETURN doSQLDisconnect(SQLHDBC ConnectionHandle) +{ + conn_t *conn = (conn_t*)ConnectionHandle; + if (!conn) return SQL_ERROR; + + if (conn->taos) { + taos_close(conn->taos); + conn->taos = NULL; + } + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLDisconnect(SQLHDBC ConnectionHandle) +{ + SQLRETURN r; + r = doSQLDisconnect(ConnectionHandle); + return r; +} + +static SQLRETURN doSQLAllocStmt(SQLHDBC ConnectionHandle, + SQLHSTMT *StatementHandle) +{ + conn_t *conn = (conn_t*)ConnectionHandle; + if (!conn) return SQL_ERROR; + + DASSERT(INC_REF(conn)>1); + + do { + sql_t *sql = (sql_t*)calloc(1, sizeof(*sql)); + if (!sql) { + SET_ERROR(conn, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + + sql->conn = conn; + DASSERT(INC_REF(sql)>0); + + *StatementHandle = sql; + + return SQL_SUCCESS; + } while (0); + + DASSERT(DEC_REF(conn)>0); + + return SQL_ERROR; +} + +SQLRETURN SQL_API SQLAllocStmt(SQLHDBC ConnectionHandle, + SQLHSTMT *StatementHandle) +{ + SQLRETURN r; + r = doSQLAllocStmt(ConnectionHandle, StatementHandle); + return r; +} + +static SQLRETURN doSQLAllocHandle(SQLSMALLINT HandleType, SQLHANDLE InputHandle, SQLHANDLE *OutputHandle) +{ + switch (HandleType) { + case SQL_HANDLE_ENV: { + SQLHENV env = {0}; + SQLRETURN r = doSQLAllocEnv(&env); + if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = env; + return r; + } break; + case SQL_HANDLE_DBC: { + SQLHDBC dbc = {0}; + SQLRETURN r = doSQLAllocConnect(InputHandle, &dbc); + if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = dbc; + return r; + } break; + case SQL_HANDLE_STMT: { + SQLHSTMT stmt = {0}; + SQLRETURN r = doSQLAllocStmt(InputHandle, &stmt); + if (r==SQL_SUCCESS && OutputHandle) *OutputHandle = stmt; + return r; + } break; + default: { + return SQL_ERROR; + } break; + } +} + +SQLRETURN SQL_API SQLAllocHandle(SQLSMALLINT HandleType, SQLHANDLE InputHandle, SQLHANDLE *OutputHandle) +{ + SQLRETURN r; + r = doSQLAllocHandle(HandleType, InputHandle, OutputHandle); + return r; +} + +static SQLRETURN doSQLFreeStmt(SQLHSTMT StatementHandle, + SQLUSMALLINT Option) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + if (Option == SQL_CLOSE) return SQL_SUCCESS; + if (Option != SQL_DROP) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "free statement with Option[%x] not supported yet", Option); + return SQL_ERROR; + } + + DASSERT(GET_REF(sql)==1); + + if (sql->rs) { + taos_free_result(sql->rs); + sql->rs = NULL; + } + + if (sql->stmt) { + taos_stmt_close(sql->stmt); + sql->stmt = NULL; + } + + if (sql->params) { + free(sql->params); + sql->params = NULL; + } + sql->n_params = 0; + + DASSERT(DEC_REF(sql->conn)>0); + DASSERT(DEC_REF(sql)==0); + + sql->conn = NULL; + + FREE_ERROR(sql); + free(sql); + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLFreeStmt(SQLHSTMT StatementHandle, + SQLUSMALLINT Option) +{ + SQLRETURN r; + r = doSQLFreeStmt(StatementHandle, Option); + return r; +} + +static SQLRETURN doSQLExecDirect(SQLHSTMT StatementHandle, + SQLCHAR *StatementText, SQLINTEGER TextLength) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (sql->rs) { + taos_free_result(sql->rs); + sql->rs = NULL; + sql->row = NULL; + } + + if (sql->stmt) { + taos_stmt_close(sql->stmt); + sql->stmt = NULL; + } + + if (sql->params) { + free(sql->params); + sql->params = NULL; + } + sql->n_params = 0; + + const char *stxt = SDUP(StatementText, TextLength); + + SQLRETURN r = SQL_ERROR; + do { + if (!stxt) { + SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + sql->rs = taos_query(sql->conn->taos, stxt); + CHK_RS(r, sql, "failed to execute"); + } while (0); + + SFRE(stxt, StatementText, TextLength); + + return r; +} + +SQLRETURN SQL_API SQLExecDirect(SQLHSTMT StatementHandle, + SQLCHAR *StatementText, SQLINTEGER TextLength) +{ + SQLRETURN r; + r = doSQLExecDirect(StatementHandle, StatementText, TextLength); + return r; +} + +SQLRETURN SQL_API SQLExecDirectW(SQLHSTMT hstmt, SQLWCHAR *szSqlStr, SQLINTEGER cbSqlStr) +{ + size_t bytes = 0; + SQLCHAR *utf8 = wchars_to_chars(szSqlStr, cbSqlStr, &bytes); + return SQLExecDirect(hstmt, utf8, bytes); +} + +static SQLRETURN doSQLNumResultCols(SQLHSTMT StatementHandle, + SQLSMALLINT *ColumnCount) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (sql->is_insert) { + if (ColumnCount) { + *ColumnCount = 0; + } + return SQL_SUCCESS; + } + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + int fields = taos_field_count(sql->rs); + if (ColumnCount) { + *ColumnCount = fields; + } + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLNumResultCols(SQLHSTMT StatementHandle, + SQLSMALLINT *ColumnCount) +{ + SQLRETURN r; + r = doSQLNumResultCols(StatementHandle, ColumnCount); + return r; +} + +static SQLRETURN doSQLRowCount(SQLHSTMT StatementHandle, + SQLLEN *RowCount) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (sql->is_insert) { + if (RowCount) *RowCount = 0; + return SQL_SUCCESS; + } + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + int rows = taos_affected_rows(sql->rs); + if (RowCount) { + *RowCount = rows; + } + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLRowCount(SQLHSTMT StatementHandle, + SQLLEN *RowCount) +{ + SQLRETURN r; + r = doSQLRowCount(StatementHandle, RowCount); + return r; +} + +static SQLRETURN doSQLColAttribute(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLUSMALLINT FieldIdentifier, + SQLPOINTER CharacterAttribute, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLLEN *NumericAttribute ) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + int nfields = taos_field_count(sql->rs); + TAOS_FIELD *fields = taos_fetch_fields(sql->rs); + + if (nfields==0 || fields==NULL) { + SET_ERROR(sql, "07005", TSDB_CODE_ODBC_NO_FIELDS, ""); + return SQL_ERROR; + } + + if (ColumnNumber<=0 || ColumnNumber>nfields) { + SET_ERROR(sql, "07009", TSDB_CODE_ODBC_OUT_OF_RANGE, "invalid column number [%d]", ColumnNumber); + return SQL_ERROR; + } + + TAOS_FIELD *field = fields + ColumnNumber-1; + + switch (FieldIdentifier) { + case SQL_COLUMN_DISPLAY_SIZE: { + *NumericAttribute = do_field_display_size(field); + } break; + case SQL_COLUMN_LABEL: { + size_t n = sizeof(field->name); + strncpy(CharacterAttribute, field->name, (n>BufferLength ? BufferLength : n)); + } break; + case SQL_COLUMN_UNSIGNED: { + *NumericAttribute = SQL_FALSE; + } break; + default: { + SET_ERROR(sql, "HY091", TSDB_CODE_ODBC_OUT_OF_RANGE, + "FieldIdentifier[%d/0x%x] for Column [%d] not supported yet", + FieldIdentifier, FieldIdentifier, ColumnNumber); + return SQL_ERROR; + } break; + } + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLColAttribute(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLUSMALLINT FieldIdentifier, + SQLPOINTER CharacterAttribute, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLLEN *NumericAttribute ) +{ + SQLRETURN r; + r = doSQLColAttribute(StatementHandle, ColumnNumber, FieldIdentifier, + CharacterAttribute, BufferLength, + StringLength, NumericAttribute); + return r; +} + +static SQLRETURN conv_tsdb_bool_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_bool_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b); +static SQLRETURN conv_tsdb_v1_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v1_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1); +static SQLRETURN conv_tsdb_v2_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v2_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2); +static SQLRETURN conv_tsdb_v4_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v4_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4); +static SQLRETURN conv_tsdb_v8_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); +static SQLRETURN conv_tsdb_v8_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); +static SQLRETURN conv_tsdb_v8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); +static SQLRETURN conv_tsdb_v8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); +static SQLRETURN conv_tsdb_v8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8); +static SQLRETURN conv_tsdb_f4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); +static SQLRETURN conv_tsdb_f4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); +static SQLRETURN conv_tsdb_f4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); +static SQLRETURN conv_tsdb_f4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4); +static SQLRETURN conv_tsdb_f8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); +static SQLRETURN conv_tsdb_f8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); +static SQLRETURN conv_tsdb_f8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8); +static SQLRETURN conv_tsdb_ts_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); +static SQLRETURN conv_tsdb_ts_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); +static SQLRETURN conv_tsdb_ts_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); +static SQLRETURN conv_tsdb_ts_to_c_ts(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts); +static SQLRETURN conv_tsdb_bin_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin); +static SQLRETURN conv_tsdb_bin_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin); +static SQLRETURN conv_tsdb_str_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_v1(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_v2(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_v4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_f4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_f8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); +static SQLRETURN conv_tsdb_str_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str); + +static SQLRETURN doSQLGetData(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLSMALLINT TargetType, + SQLPOINTER TargetValue, SQLLEN BufferLength, + SQLLEN *StrLen_or_Ind) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + if (!sql->row) { + SET_ERROR(sql, "24000", TSDB_CODE_ODBC_INVALID_CURSOR, ""); + return SQL_ERROR; + } + + DASSERT(TargetValue); + + int nfields = taos_field_count(sql->rs); + TAOS_FIELD *fields = taos_fetch_fields(sql->rs); + + if (ColumnNumber<=0 || ColumnNumber>nfields) { + SET_ERROR(sql, "07009", TSDB_CODE_ODBC_OUT_OF_RANGE, "invalid column number [%d]", ColumnNumber); + return SQL_ERROR; + } + + if (TargetValue == NULL) { + SET_ERROR(sql, "HY009", TSDB_CODE_ODBC_BAD_ARG, "NULL TargetValue not allowed for col [%d]", ColumnNumber); + return SQL_ERROR; + } + + TAOS_FIELD *field = fields + ColumnNumber-1; + void *row = sql->row[ColumnNumber-1]; + + if (!row) { + if (StrLen_or_Ind) { + *StrLen_or_Ind = SQL_NULL_DATA; + } + return SQL_SUCCESS; + } + + c_target_t target = {0}; + target.col = ColumnNumber; + target.ct = TargetType; + target.ptr = TargetValue; + target.len = BufferLength; + target.soi = StrLen_or_Ind; + + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: { + int8_t v = *(int8_t*)row; + if (v) v = 1; + switch (target.ct) { + case SQL_C_BIT: return conv_tsdb_bool_to_c_bit(sql, &target, field, v); + case SQL_C_TINYINT: return conv_tsdb_bool_to_c_tinyint(sql, &target, field, v); + case SQL_C_SHORT: return conv_tsdb_bool_to_c_short(sql, &target, field, v); + case SQL_C_LONG: return conv_tsdb_bool_to_c_long(sql, &target, field, v); + case SQL_C_SBIGINT: return conv_tsdb_bool_to_c_sbigint(sql, &target, field, v); + case SQL_C_FLOAT: return conv_tsdb_bool_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_bool_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_bool_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_bool_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_TINYINT: { + int8_t v = *(int8_t*)row; + switch (target.ct) { + case SQL_C_TINYINT: return conv_tsdb_v1_to_c_tinyint(sql, &target, field, v); + case SQL_C_SHORT: return conv_tsdb_v1_to_c_short(sql, &target, field, v); + case SQL_C_LONG: return conv_tsdb_v1_to_c_long(sql, &target, field, v); + case SQL_C_SBIGINT: return conv_tsdb_v1_to_c_sbigint(sql, &target, field, v); + case SQL_C_FLOAT: return conv_tsdb_v1_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_v1_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_v1_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_v1_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t v = *(int16_t*)row; + switch (target.ct) { + case SQL_C_SHORT: return conv_tsdb_v2_to_c_short(sql, &target, field, v); + case SQL_C_LONG: return conv_tsdb_v2_to_c_long(sql, &target, field, v); + case SQL_C_SBIGINT: return conv_tsdb_v2_to_c_sbigint(sql, &target, field, v); + case SQL_C_FLOAT: return conv_tsdb_v2_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_v2_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_v2_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_v2_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_INT: { + int32_t v = *(int32_t*)row; + switch (target.ct) { + case SQL_C_LONG: return conv_tsdb_v4_to_c_long(sql, &target, field, v); + case SQL_C_SBIGINT: return conv_tsdb_v4_to_c_sbigint(sql, &target, field, v); + case SQL_C_FLOAT: return conv_tsdb_v4_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_v4_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_v4_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_v4_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t*)row; + switch (target.ct) { + case SQL_C_SBIGINT: return conv_tsdb_v8_to_c_sbigint(sql, &target, field, v); + case SQL_C_FLOAT: return conv_tsdb_v8_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_v8_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_v8_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_v8_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_FLOAT: { + float v = *(float*)row; + switch (target.ct) { + case SQL_C_FLOAT: return conv_tsdb_f4_to_c_float(sql, &target, field, v); + case SQL_C_DOUBLE: return conv_tsdb_f4_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_f4_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_f4_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_DOUBLE: { + double v = *(double*)row; + switch (target.ct) { + case SQL_C_DOUBLE: return conv_tsdb_f8_to_c_double(sql, &target, field, v); + case SQL_C_CHAR: return conv_tsdb_f8_to_c_char(sql, &target, field, v); + case SQL_C_BINARY: return conv_tsdb_f8_to_c_binary(sql, &target, field, v); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_TIMESTAMP: { + SQL_TIMESTAMP_STRUCT ts = {0}; + int64_t v = *(int64_t*)row; + time_t t = v/1000; + struct tm tm = {0}; + localtime_r(&t, &tm); + ts.year = tm.tm_year + 1900; + ts.month = tm.tm_mon + 1; + ts.day = tm.tm_mday; + ts.hour = tm.tm_hour; + ts.minute = tm.tm_min; + ts.second = tm.tm_sec; + ts.fraction = v%1000 * 1000000; + switch (target.ct) { + case SQL_C_SBIGINT: return conv_tsdb_ts_to_c_v8(sql, &target, field, &ts); + case SQL_C_CHAR: return conv_tsdb_ts_to_c_str(sql, &target, field, &ts); + case SQL_C_BINARY: return conv_tsdb_ts_to_c_bin(sql, &target, field, &ts); + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_TIMESTAMP: return conv_tsdb_ts_to_c_ts(sql, &target, field, &ts); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_BINARY: { + const unsigned char *bin = (const unsigned char *)row; + switch (target.ct) { + case SQL_C_CHAR: return conv_tsdb_bin_to_c_str(sql, &target, field, bin); + case SQL_C_BINARY: return conv_tsdb_bin_to_c_bin(sql, &target, field, bin); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + case TSDB_DATA_TYPE_NCHAR: { + const char *str = (const char *)row; + switch (target.ct) { + case SQL_C_BIT: return conv_tsdb_str_to_c_bit(sql, &target, field, str); + case SQL_C_TINYINT: return conv_tsdb_str_to_c_v1(sql, &target, field, str); + case SQL_C_SHORT: return conv_tsdb_str_to_c_v2(sql, &target, field, str); + case SQL_C_LONG: return conv_tsdb_str_to_c_v4(sql, &target, field, str); + case SQL_C_SBIGINT: return conv_tsdb_str_to_c_v8(sql, &target, field, str); + case SQL_C_FLOAT: return conv_tsdb_str_to_c_f4(sql, &target, field, str); + case SQL_C_DOUBLE: return conv_tsdb_str_to_c_f8(sql, &target, field, str); + case SQL_C_CHAR: return conv_tsdb_str_to_c_str(sql, &target, field, str); + case SQL_C_BINARY: return conv_tsdb_str_to_c_bin(sql, &target, field, str); + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, + "no convertion from [%s] to [%s[%d][0x%x]] for col [%d]", + taos_data_type(field->type), sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } + } + } break; + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for col [%d]", + taos_data_type(field->type), field->type, field->type, + sql_c_type(target.ct), target.ct, target.ct, ColumnNumber); + return SQL_ERROR; + } break; + } +} + +SQLRETURN SQL_API SQLGetData(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLSMALLINT TargetType, + SQLPOINTER TargetValue, SQLLEN BufferLength, + SQLLEN *StrLen_or_Ind) +{ + SQLRETURN r; + r = doSQLGetData(StatementHandle, ColumnNumber, TargetType, + TargetValue, BufferLength, + StrLen_or_Ind); + return r; +} + +static SQLRETURN doSQLFetch(SQLHSTMT StatementHandle) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + sql->row = taos_fetch_row(sql->rs); + return sql->row ? SQL_SUCCESS : SQL_NO_DATA; +} + +SQLRETURN SQL_API SQLFetch(SQLHSTMT StatementHandle) +{ + SQLRETURN r; + r = doSQLFetch(StatementHandle); + return r; +} + +static SQLRETURN doSQLPrepare(SQLHSTMT StatementHandle, + SQLCHAR *StatementText, SQLINTEGER TextLength) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (sql->rs) { + taos_free_result(sql->rs); + sql->rs = NULL; + sql->row = NULL; + } + + if (sql->stmt) { + taos_stmt_close(sql->stmt); + sql->stmt = NULL; + } + + if (sql->params) { + free(sql->params); + sql->params = NULL; + } + sql->n_params = 0; + sql->is_insert = 0; + + do { + sql->stmt = taos_stmt_init(sql->conn->taos); + if (!sql->stmt) { + SET_ERROR(sql, "HY001", terrno, "failed to initialize TAOS statement internally"); + break; + } + + int ok = 0; + do { + int r = taos_stmt_prepare(sql->stmt, (const char *)StatementText, TextLength); + if (r) { + SET_ERROR(sql, "HY000", r, "failed to prepare a TAOS statement"); + break; + } + sql->is_prepared = 1; + + int is_insert = 0; + r = taos_stmt_is_insert(sql->stmt, &is_insert); + if (r) { + SET_ERROR(sql, "HY000", r, "failed to determine if a prepared-statement is of insert"); + break; + } + sql->is_insert = is_insert ? 1 : 0; + + int params = 0; + r = taos_stmt_num_params(sql->stmt, ¶ms); + if (r) { + SET_ERROR(sql, "HY000", terrno, "fetch num of statement params failed"); + break; + } + DASSERT(params>=0); + + if (params>0) { + param_bind_t *ar = (param_bind_t*)calloc(1, params * sizeof(*ar)); + if (!ar) { + SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + sql->params = ar; + } + + sql->n_params = params; + + ok = 1; + } while (0); + + if (!ok) { + taos_stmt_close(sql->stmt); + sql->stmt = NULL; + sql->is_prepared = 0; + sql->is_insert = 0; + sql->is_executed = 0; + } + } while (0); + + return sql->stmt ? SQL_SUCCESS : SQL_ERROR; +} + +SQLRETURN SQL_API SQLPrepare(SQLHSTMT StatementHandle, + SQLCHAR *StatementText, SQLINTEGER TextLength) +{ + SQLRETURN r; + r = doSQLPrepare(StatementHandle, StatementText, TextLength); + return r; +} + +static const int yes = 1; +static const int no = 0; + +static SQLRETURN do_bind_param_value(sql_t *sql, int idx_row, int idx, param_bind_t *param, TAOS_BIND *bind) +{ + if (!param->valid) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "parameter [@%d] not bound yet", idx+1); + return SQL_ERROR; + } + + SQLPOINTER paramValue = param->ParameterValue; + SQLSMALLINT valueType = param->ValueType; + SQLLEN *soi = param->StrLen_or_Ind; + + size_t offset = idx_row * sql->rowlen + sql->ptr_offset; + + if (paramValue) paramValue += offset; + if (soi) soi = (SQLLEN*)((char*)soi + offset); + + + if (soi && *soi == SQL_NULL_DATA) { + bind->is_null = (int*)&yes; + return SQL_SUCCESS; + } + bind->is_null = (int*)&no; + int type = 0; + int bytes = 0; + if (sql->is_insert) { + int r = taos_stmt_get_param(sql->stmt, idx, &type, &bytes); + if (r) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_OUT_OF_RANGE, "parameter [@%d] not valid", idx+1); + return SQL_ERROR; + } + } else { + switch (valueType) { + case SQL_C_LONG: { + type = TSDB_DATA_TYPE_INT; + } break; + case SQL_C_WCHAR: { + type = TSDB_DATA_TYPE_NCHAR; + bytes = SQL_NTS; + } break; + case SQL_C_CHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + idx+1); + return SQL_ERROR; + } break; + } + } + + // ref: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/converting-data-from-c-to-sql-data-types?view=sql-server-ver15 + switch (type) { + case TSDB_DATA_TYPE_BOOL: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.b); + bind->buffer = &bind->u.b; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_LONG: { + CHK_CONV(tsdb_int64_to_bit(*(int32_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_BIT: { + CHK_CONV(tsdb_int64_to_bit(*(int8_t*)paramValue, &bind->u.b)); + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_TINYINT: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.v1); + bind->buffer = &bind->u.v1; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_TINYINT: { + CHK_CONV(tsdb_int64_to_tinyint(*(int8_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_SHORT: { + CHK_CONV(tsdb_int64_to_tinyint(*(int16_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_LONG: { + CHK_CONV(tsdb_int64_to_tinyint(*(int32_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_SBIGINT: { + CHK_CONV(tsdb_int64_to_tinyint(*(int64_t*)paramValue, &bind->u.v1)); + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_SMALLINT: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.v2); + bind->buffer = &bind->u.v2; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_LONG: { + CHK_CONV(tsdb_int64_to_smallint(*(int32_t*)paramValue, &bind->u.v2)); + } break; + case SQL_C_SHORT: { + CHK_CONV(tsdb_int64_to_smallint(*(int16_t*)paramValue, &bind->u.v2)); + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_INT: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.v4); + bind->buffer = &bind->u.v4; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_LONG: { + CHK_CONV(tsdb_int64_to_int(*(int32_t*)paramValue, &bind->u.v4)); + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_BIGINT: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.v8); + bind->buffer = &bind->u.v8; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_SBIGINT: { + bind->u.v8 = *(int64_t*)paramValue; + } break; + case SQL_C_LONG: { + bind->u.v8 = *(int32_t*)paramValue; + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_FLOAT: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.f4); + bind->buffer = &bind->u.f4; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_DOUBLE: { + bind->u.f4 = *(double*)paramValue; + } break; + case SQL_C_FLOAT: { + bind->u.f4 = *(float*)paramValue; + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_LONG: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_DOUBLE: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.f8); + bind->buffer = &bind->u.f8; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_DOUBLE: { + bind->u.f8 = *(double*)paramValue; + } break; + case SQL_C_CHAR: + case SQL_C_WCHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_LONG: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_BINARY: { + bind->buffer_type = type; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_WCHAR: { + DASSERT(soi); + DASSERT(*soi != SQL_NTS); + size_t bytes = 0; + SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); + bind->allocated = 1; + bind->u.bin = utf8; + bind->buffer_length = bytes; + bind->buffer = bind->u.bin; + } break; + case SQL_C_BINARY: { + bind->u.bin = (unsigned char*)paramValue; + if (*soi == SQL_NTS) { + bind->buffer_length = strlen((const char*)paramValue); + } else { + bind->buffer_length = *soi; + } + bind->buffer = bind->u.bin; + } break; + case SQL_C_CHAR: + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_LONG: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_TIMESTAMP: { + bind->buffer_type = type; + bind->buffer_length = sizeof(bind->u.v8); + bind->buffer = &bind->u.v8; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_WCHAR: { + DASSERT(soi); + DASSERT(*soi != SQL_NTS); + size_t bytes = 0; + int r = 0; + int64_t t = 0; + SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); + // why cast utf8 to 'char*' ? + r = taosParseTime((char*)utf8, &t, strlen((const char*)utf8), TSDB_TIME_PRECISION_MILLI, 0); + bind->u.v8 = t; + free(utf8); + if (r) { + SET_ERROR(sql, "22007", TSDB_CODE_ODBC_OUT_OF_RANGE, + "convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d] failed", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } + } break; + case SQL_C_SBIGINT: { + int64_t t = *(int64_t*)paramValue; + bind->u.v8 = t; + } break; + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_LONG: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + case TSDB_DATA_TYPE_NCHAR: { + bind->buffer_type = type; + bind->length = &bind->buffer_length; + switch (valueType) { + case SQL_C_WCHAR: { + DASSERT(soi); + DASSERT(*soi != SQL_NTS); + size_t bytes = 0; + SQLCHAR *utf8 = wchars_to_chars(paramValue, *soi/2, &bytes); + bind->allocated = 1; + bind->u.nchar = (char*)utf8; + bind->buffer_length = bytes; + bind->buffer = bind->u.nchar; + } break; + case SQL_C_CHAR: { + bind->u.nchar = (char*)paramValue; + if (*soi == SQL_NTS) { + bind->buffer_length = strlen((const char*)paramValue); + } else { + bind->buffer_length = *soi; + } + bind->buffer = bind->u.nchar; + } break; + case SQL_C_SHORT: + case SQL_C_SSHORT: + case SQL_C_USHORT: + case SQL_C_LONG: + case SQL_C_SLONG: + case SQL_C_ULONG: + case SQL_C_FLOAT: + case SQL_C_DOUBLE: + case SQL_C_BIT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + case SQL_C_UTINYINT: + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + case SQL_C_BINARY: + case SQL_C_DATE: + case SQL_C_TIME: + case SQL_C_TIMESTAMP: + case SQL_C_TYPE_DATE: + case SQL_C_TYPE_TIME: + case SQL_C_TYPE_TIMESTAMP: + case SQL_C_NUMERIC: + case SQL_C_GUID: + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + } break; + default: { + SET_ERROR(sql, "HYC00", TSDB_CODE_ODBC_OUT_OF_RANGE, + "no convertion from [%s[%d/0x%x]] to [%s[%d/0x%x]] for parameter [%d]", + sql_c_type(valueType), valueType, valueType, + taos_data_type(type), type, type, idx+1); + return SQL_ERROR; + } break; + } + return SQL_SUCCESS; +} + +static SQLRETURN do_bind_batch(sql_t *sql, int idx_row, TAOS_BIND *binds) +{ + for (int j=0; jn_params; ++j) { + SQLRETURN r = do_bind_param_value(sql, idx_row, j, sql->params+j, binds+j); + if (r==SQL_SUCCESS) continue; + return r; + } + if (sql->n_params > 0) { + int tr = 0; + PROFILE(tr = taos_stmt_bind_param(sql->stmt, binds)); + if (tr) { + SET_ERROR(sql, "HY000", tr, "failed to bind parameters[%d in total]", sql->n_params); + return SQL_ERROR; + } + + if (sql->is_insert) { + int r = 0; + PROFILE(r = taos_stmt_add_batch(sql->stmt)); + if (r) { + SET_ERROR(sql, "HY000", r, "failed to add batch"); + return SQL_ERROR; + } + } + } + return SQL_SUCCESS; +} + +static SQLRETURN do_execute(sql_t *sql) +{ + int tr = TSDB_CODE_SUCCESS; + if (sql->n_rows==0) sql->n_rows = 1; + for (int i=0; in_rows; ++i) { + TAOS_BIND *binds = NULL; + if (sql->n_params>0) { + binds = (TAOS_BIND*)calloc(sql->n_params, sizeof(*binds)); + if (!binds) { + SET_ERROR(sql, "HY001", TSDB_CODE_ODBC_OOM, ""); + return SQL_ERROR; + } + } + + SQLRETURN r = do_bind_batch(sql, i, binds); + + if (binds) { + for (int i = 0; in_params; ++i) { + TAOS_BIND *bind = binds + i; + if (bind->allocated) { + free(bind->u.nchar); + bind->u.nchar = NULL; + } + } + free(binds); + } + + if (r) return r; + } + + PROFILE(tr = taos_stmt_execute(sql->stmt)); + if (tr) { + SET_ERROR(sql, "HY000", tr, "failed to execute statement"); + return SQL_ERROR; + } + + sql->is_executed = 1; + if (sql->is_insert) return SQL_SUCCESS; + + SQLRETURN r = SQL_SUCCESS; + PROFILE(sql->rs = taos_stmt_use_result(sql->stmt)); + CHK_RS(r, sql, "failed to use result"); + + return r; +} + +static SQLRETURN doSQLExecute(SQLHSTMT StatementHandle) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->stmt) { + SET_ERROR(sql, "HY010", TSDB_CODE_ODBC_STATEMENT_NOT_READY, ""); + return SQL_ERROR; + } + + if (sql->rs) { + taos_free_result(sql->rs); + sql->rs = NULL; + sql->row = NULL; + } + + SQLRETURN r = do_execute(sql); + + return r; +} + +SQLRETURN SQL_API SQLExecute(SQLHSTMT StatementHandle) +{ + SQLRETURN r; + PROFILE(r = doSQLExecute(StatementHandle)); + return r; +} + +static SQLRETURN doSQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLSMALLINT DiagIdentifier, + SQLPOINTER DiagInfo, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength) +{ + // if this function is not exported, isql will never call SQLGetDiagRec + return SQL_ERROR; +} + +SQLRETURN SQL_API SQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLSMALLINT DiagIdentifier, + SQLPOINTER DiagInfo, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength) +{ + SQLRETURN r; + r = doSQLGetDiagField(HandleType, Handle, + RecNumber, DiagIdentifier, + DiagInfo, BufferLength, + StringLength); + return r; +} + +static SQLRETURN doSQLGetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, SQLSMALLINT *TextLength) +{ + if (RecNumber>1) return SQL_NO_DATA; + + switch (HandleType) { + case SQL_HANDLE_ENV: { + env_t *env = (env_t*)Handle; + if (!env) break; + FILL_ERROR(env); + return SQL_SUCCESS; + } break; + case SQL_HANDLE_DBC: { + conn_t *conn = (conn_t*)Handle; + if (!conn) break; + FILL_ERROR(conn); + return SQL_SUCCESS; + } break; + case SQL_HANDLE_STMT: { + sql_t *sql = (sql_t*)Handle; + if (!sql) break; + FILL_ERROR(sql); + return SQL_SUCCESS; + } break; + default: { + } break; + } + + // how to return error? + return SQL_ERROR; +} + +SQLRETURN SQL_API SQLGetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLCHAR *MessageText, + SQLSMALLINT BufferLength, SQLSMALLINT *TextLength) +{ + SQLRETURN r; + r = doSQLGetDiagRec(HandleType, Handle, + RecNumber, Sqlstate, + NativeError, MessageText, + BufferLength, TextLength); + return r; +} + +static SQLRETURN doSQLBindParameter( + SQLHSTMT StatementHandle, + SQLUSMALLINT ParameterNumber, + SQLSMALLINT fParamType, + SQLSMALLINT ValueType, + SQLSMALLINT ParameterType, + SQLULEN LengthPrecision, + SQLSMALLINT ParameterScale, + SQLPOINTER ParameterValue, + SQLLEN cbValueMax, // ignore for now, since only SQL_PARAM_INPUT is supported now + SQLLEN *StrLen_or_Ind) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->stmt) { + SET_ERROR(sql, "HY010", TSDB_CODE_ODBC_STATEMENT_NOT_READY, ""); + return SQL_ERROR; + } + + if (ParameterNumber<=0 || ParameterNumber>sql->n_params) { + SET_ERROR(sql, "07009", TSDB_CODE_ODBC_BAD_ARG, + "parameter [@%d] invalid", ParameterNumber); + return SQL_ERROR; + } + + if (fParamType != SQL_PARAM_INPUT) { + SET_ERROR(sql, "HY105", TSDB_CODE_ODBC_NOT_SUPPORT, "non-input parameter [@%d] not supported yet", ParameterNumber); + return SQL_ERROR; + } + + if (ValueType == SQL_C_DEFAULT) { + SET_ERROR(sql, "HY003", TSDB_CODE_ODBC_NOT_SUPPORT, "default value for parameter [@%d] not supported yet", ParameterNumber); + return SQL_ERROR; + } + + if (!is_valid_sql_c_type(ValueType)) { + SET_ERROR(sql, "HY003", TSDB_CODE_ODBC_NOT_SUPPORT, + "SQL_C_TYPE [%s/%d/0x%x] for parameter [@%d] unknown", + sql_c_type(ValueType), ValueType, ValueType, ParameterNumber); + return SQL_ERROR; + } + + if (!is_valid_sql_sql_type(ParameterType)) { + SET_ERROR(sql, "HY004", TSDB_CODE_ODBC_NOT_SUPPORT, + "SQL_TYPE [%s/%d/0x%x] for parameter [@%d] unknown", + sql_c_type(ParameterType), ParameterType, ParameterType, ParameterNumber); + return SQL_ERROR; + } + + param_bind_t *pb = sql->params + ParameterNumber - 1; + + pb->ParameterNumber = ParameterNumber; + pb->ValueType = ValueType; + pb->ParameterType = ParameterType; + pb->LengthPrecision = LengthPrecision; + pb->ParameterScale = ParameterScale; + pb->ParameterValue = ParameterValue; + pb->StrLen_or_Ind = StrLen_or_Ind; + + pb->valid = 1; + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLBindParameter( + SQLHSTMT StatementHandle, + SQLUSMALLINT ParameterNumber, + SQLSMALLINT fParamType, + SQLSMALLINT ValueType, + SQLSMALLINT ParameterType, + SQLULEN LengthPrecision, + SQLSMALLINT ParameterScale, + SQLPOINTER ParameterValue, + SQLLEN cbValueMax, // ignore for now, since only SQL_PARAM_INPUT is supported now + SQLLEN *StrLen_or_Ind) +{ + SQLRETURN r; + r = doSQLBindParameter(StatementHandle, ParameterNumber, fParamType, ValueType, ParameterType, + LengthPrecision, ParameterScale, ParameterValue, cbValueMax, StrLen_or_Ind); + return r; +} + +static SQLRETURN doSQLDriverConnect( + SQLHDBC hdbc, + SQLHWND hwnd, + SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut, + SQLUSMALLINT fDriverCompletion) +{ + conn_t *conn = (conn_t*)hdbc; + if (!conn) return SQL_ERROR; + + if (fDriverCompletion!=SQL_DRIVER_NOPROMPT) { + SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "option[%d] other than SQL_DRIVER_NOPROMPT not supported yet", fDriverCompletion); + return SQL_ERROR; + } + + if (conn->taos) { + SET_ERROR(conn, "08002", TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still in use"); + return SQL_ERROR; + } + + // DSN=; UID=; PWD= + + const char *connStr = SDUP(szConnStrIn, cbConnStrIn); + + char *serverName = NULL; + char *userName = NULL; + char *auth = NULL; + int bytes = 0; + + do { + if (szConnStrIn && !connStr) { + SET_ERROR(conn, "HY001", TSDB_CODE_ODBC_OOM, ""); + break; + } + + int n = sscanf((const char*)connStr, "DSN=%m[^;]; UID=%m[^;]; PWD=%m[^;] %n", &serverName, &userName, &auth, &bytes); + if (n<1) { + SET_ERROR(conn, "HY000", TSDB_CODE_ODBC_BAD_CONNSTR, "unrecognized connection string: [%s]", (const char*)szConnStrIn); + break; + } + + // TODO: data-race + // TODO: shall receive ip/port from odbc.ini + conn->taos = taos_connect("localhost", userName, auth, NULL, 0); + if (!conn->taos) { + SET_ERROR(conn, "HY000", terrno, "failed to connect to data source"); + break; + } + + if (szConnStrOut) { + snprintf((char*)szConnStrOut, cbConnStrOutMax, "%s", connStr); + } + if (pcbConnStrOut) { + *pcbConnStrOut = cbConnStrIn; + } + + } while (0); + + if (serverName) free(serverName); + if (userName) free(userName); + if (auth) free(auth); + + SFRE(connStr, szConnStrIn, cbConnStrIn); + + return conn->taos ? SQL_SUCCESS : SQL_ERROR; +} + +SQLRETURN SQL_API SQLDriverConnect( + SQLHDBC hdbc, + SQLHWND hwnd, + SQLCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut, + SQLUSMALLINT fDriverCompletion) +{ + SQLRETURN r; + r = doSQLDriverConnect(hdbc, hwnd, szConnStrIn, cbConnStrIn, szConnStrOut, cbConnStrOutMax, pcbConnStrOut, fDriverCompletion); + return r; +} + +static SQLRETURN doSQLSetConnectAttr(SQLHDBC ConnectionHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength) +{ + conn_t *conn = (conn_t*)ConnectionHandle; + if (!conn) return SQL_ERROR; + + if (Attribute != SQL_ATTR_AUTOCOMMIT) { + SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "Attribute other than SQL_ATTR_AUTOCOMMIT not supported yet"); + return SQL_ERROR; + } + if (Value != (SQLPOINTER)SQL_AUTOCOMMIT_ON) { + SET_ERROR(conn, "HYC00", TSDB_CODE_ODBC_NOT_SUPPORT, "Attribute Value other than SQL_AUTOCOMMIT_ON not supported yet[%p]", Value); + return SQL_ERROR; + } + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLSetConnectAttr(SQLHDBC ConnectionHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength) +{ + SQLRETURN r; + r = doSQLSetConnectAttr(ConnectionHandle, Attribute, Value, StringLength); + return r; +} + +static SQLRETURN doSQLDescribeCol(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLCHAR *ColumnName, + SQLSMALLINT BufferLength, SQLSMALLINT *NameLength, + SQLSMALLINT *DataType, SQLULEN *ColumnSize, + SQLSMALLINT *DecimalDigits, SQLSMALLINT *Nullable) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->rs) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NO_RESULT, ""); + return SQL_ERROR; + } + + int nfields = taos_field_count(sql->rs); + TAOS_FIELD *fields = taos_fetch_fields(sql->rs); + + if (ColumnNumber<=0 || ColumnNumber>nfields) { + SET_ERROR(sql, "07009", TSDB_CODE_ODBC_OUT_OF_RANGE, "invalid column number [%d]", ColumnNumber); + return SQL_ERROR; + } + + TAOS_FIELD *field = fields + ColumnNumber - 1; + if (ColumnName) { + size_t n = sizeof(field->name); + if (n>BufferLength) n = BufferLength; + strncpy((char*)ColumnName, field->name, n); + } + if (NameLength) { + *NameLength = strnlen(field->name, sizeof(field->name)); + } + if (ColumnSize) { + *ColumnSize = field->bytes; + } + if (DecimalDigits) *DecimalDigits = 0; + + if (DataType) { + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: { + *DataType = SQL_TINYINT; + } break; + + case TSDB_DATA_TYPE_TINYINT: { + *DataType = SQL_TINYINT; + } break; + + case TSDB_DATA_TYPE_SMALLINT: { + *DataType = SQL_SMALLINT; + } break; + + case TSDB_DATA_TYPE_INT: { + *DataType = SQL_INTEGER; + } break; + + case TSDB_DATA_TYPE_BIGINT: { + *DataType = SQL_BIGINT; + } break; + + case TSDB_DATA_TYPE_FLOAT: { + *DataType = SQL_FLOAT; + } break; + + case TSDB_DATA_TYPE_DOUBLE: { + *DataType = SQL_DOUBLE; + } break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + // *DataType = SQL_TIMESTAMP; + // *ColumnSize = 30; + // *DecimalDigits = 3; + *DataType = SQL_TIMESTAMP; + *ColumnSize = sizeof(SQL_TIMESTAMP_STRUCT); + *DecimalDigits = 0; + } break; + + case TSDB_DATA_TYPE_NCHAR: { + *DataType = SQL_CHAR; // unicode ? + if (ColumnSize) *ColumnSize -= VARSTR_HEADER_SIZE; + } break; + + case TSDB_DATA_TYPE_BINARY: { + *DataType = SQL_BINARY; + if (ColumnSize) *ColumnSize -= VARSTR_HEADER_SIZE; + } break; + + default: + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, + "unknown [%s[%d/0x%x]]", taos_data_type(field->type), field->type, field->type); + return SQL_ERROR; + break; + } + } + if (Nullable) { + *Nullable = SQL_NULLABLE_UNKNOWN; + } + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLDescribeCol(SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLCHAR *ColumnName, + SQLSMALLINT BufferLength, SQLSMALLINT *NameLength, + SQLSMALLINT *DataType, SQLULEN *ColumnSize, + SQLSMALLINT *DecimalDigits, SQLSMALLINT *Nullable) +{ + SQLRETURN r; + r = doSQLDescribeCol(StatementHandle, ColumnNumber, ColumnName, + BufferLength, NameLength, + DataType, ColumnSize, + DecimalDigits, Nullable); + return r; +} + +static SQLRETURN doSQLNumParams(SQLHSTMT hstmt, SQLSMALLINT *pcpar) +{ + sql_t *sql = (sql_t*)hstmt; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->stmt) { + SET_ERROR(sql, "HY010", TSDB_CODE_ODBC_STATEMENT_NOT_READY, ""); + return SQL_ERROR; + } + + int insert = 0; + int r = taos_stmt_is_insert(sql->stmt, &insert); + if (r) { + SET_ERROR(sql, "HY000", terrno, ""); + return SQL_ERROR; + } + // if (!insert) { + // SET_ERROR(sql, "HY000", terrno, "taos does not provide count of parameters for statement other than insert"); + // return SQL_ERROR; + // } + + int params = 0; + r = taos_stmt_num_params(sql->stmt, ¶ms); + if (r) { + SET_ERROR(sql, "HY000", terrno, "fetch num of statement params failed"); + return SQL_ERROR; + } + + if (pcpar) *pcpar = params; + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API SQLNumParams(SQLHSTMT hstmt, SQLSMALLINT *pcpar) +{ + SQLRETURN r; + r = doSQLNumParams(hstmt, pcpar); + return r; +} + +static SQLRETURN doSQLSetStmtAttr(SQLHSTMT StatementHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength) +{ + sql_t *sql = (sql_t*)StatementHandle; + if (!sql) return SQL_ERROR; + + CHK_CONN(sql); + CHK_CONN_TAOS(sql); + + if (!sql->stmt) { + SET_ERROR(sql, "HY010", TSDB_CODE_ODBC_STATEMENT_NOT_READY, ""); + return SQL_ERROR; + } + + if (sql->is_executed) { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "change attr after executing statement not supported yet"); + return SQL_ERROR; + } + + switch (Attribute) { + case SQL_ATTR_PARAM_BIND_TYPE: { + SQLULEN val = (SQLULEN)Value; + if (val==SQL_BIND_BY_COLUMN) { + sql->rowlen = 0; + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "SQL_ATTR_PARAM_BIND_TYPE/SQL_BIND_BY_COLUMN"); + return SQL_ERROR; + } + sql->rowlen = val; + return SQL_SUCCESS; + } break; + case SQL_ATTR_PARAMSET_SIZE: { + SQLULEN val = (SQLULEN)Value; + DASSERT(val>0); + sql->n_rows = val; + return SQL_SUCCESS; + } break; + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: { + if (Value) { + SQLULEN val = *(SQLULEN*)Value; + sql->ptr_offset = val; + } else { + sql->ptr_offset = 0; + } + return SQL_SUCCESS; + } break; + default: { + SET_ERROR(sql, "HY000", TSDB_CODE_ODBC_NOT_SUPPORT, "Attribute:%d", Attribute); + } break; + } + return SQL_ERROR; +} + +SQLRETURN SQL_API SQLSetStmtAttr(SQLHSTMT StatementHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength) +{ + SQLRETURN r; + r = doSQLSetStmtAttr(StatementHandle, Attribute, Value, StringLength); + return r; +} + + + + +static void init_routine(void) { + if (0) { + string_conv(NULL, NULL, NULL, 0, NULL, 0, NULL, NULL); + utf8_to_ucs4le(NULL, NULL); + ucs4le_to_utf8(NULL, 0, NULL); + } + taos_init(); +} + +static int do_field_display_size(TAOS_FIELD *field) { + switch (field->type) { + case TSDB_DATA_TYPE_TINYINT: + return 5; + break; + + case TSDB_DATA_TYPE_SMALLINT: + return 7; + break; + + case TSDB_DATA_TYPE_INT: + return 12; + break; + + case TSDB_DATA_TYPE_BIGINT: + return 22; + break; + + case TSDB_DATA_TYPE_FLOAT: { + return 12; + } break; + + case TSDB_DATA_TYPE_DOUBLE: { + return 20; + } break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: { + return 3*(field->bytes - VARSTR_HEADER_SIZE) + 2; + } break; + + case TSDB_DATA_TYPE_TIMESTAMP: + return 26; + break; + + case TSDB_DATA_TYPE_BOOL: + return 7; + default: + break; + } + + return 10; +} + +// convertion from TSDB_DATA_TYPE_XXX to SQL_C_XXX +static SQLRETURN conv_tsdb_bool_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + int8_t v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + int8_t v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + int16_t v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + int32_t v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + int64_t v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + float v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + double v = b; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + DASSERT(target->len>0); + *target->soi = 1; + target->ptr[0] = '0' + b; + if (target->len>1) { + target->ptr[1] = '\0'; + } + + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bool_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t b) +{ + DASSERT(target->len>0); + *target->soi = 1; + target->ptr[0] = '0' + b; + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_tinyint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + int8_t v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + int16_t v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + int32_t v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + int64_t v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + float v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + double v = v1; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v1_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v1); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TINYINT -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_v1_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int8_t v1) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v1); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TINYINT -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_v2_to_c_short(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + int16_t v = v2; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v2_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + int32_t v = v2; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v2_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + int64_t v = v2; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v2_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + float v = v2; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v2_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + double v = v2; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v2_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v2); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_SMALLINT -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_v2_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int16_t v2) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v2); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_SMALLINT -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + + +static SQLRETURN conv_tsdb_v4_to_c_long(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + int32_t v = v4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v4_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + int64_t v = v4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + float v = v4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + double v = v4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v4); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_INTEGER -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_v4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int32_t v4) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%d", v4); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_INTEGER -> SQL_C_BINARY"); + return SQL_SUCCESS_WITH_INFO; +} + + +static SQLRETURN conv_tsdb_v8_to_c_sbigint(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) +{ + int64_t v = v8; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v8_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) +{ + float v = v8; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) +{ + double v = v8; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_v8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%" PRId64 "", v8); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_BIGINT -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_v8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, int64_t v8) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%" PRId64 "", v8); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_BIGINT -> SQL_C_BINARY"); + return SQL_SUCCESS_WITH_INFO; +} + + +static SQLRETURN conv_tsdb_f4_to_c_float(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) +{ + float v = f4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_f4_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) +{ + double v = f4; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_f4_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%g", f4); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_FLOAT -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_f4_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, float f4) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%g", f4); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_FLOAT -> SQL_C_BINARY"); + return SQL_SUCCESS_WITH_INFO; +} + + +static SQLRETURN conv_tsdb_f8_to_c_double(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) +{ + double v = f8; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_f8_to_c_char(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%.6f", f8); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>=target->len ? target->len : n+1)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_DOUBLE -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_f8_to_c_binary(sql_t *sql, c_target_t *target, TAOS_FIELD *field, double f8) +{ + char buf[64]; + int n = snprintf(buf, sizeof(buf), "%g", f8); + DASSERT(nsoi = n; + strncpy(target->ptr, buf, (n>target->len ? target->len : n)); + if (n<=target->len) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_DOUBLE -> SQL_C_BINARY"); + return SQL_SUCCESS_WITH_INFO; +} + + +static SQLRETURN conv_tsdb_ts_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) +{ + struct tm tm = {0}; + tm.tm_sec = ts->second; + tm.tm_min = ts->minute; + tm.tm_hour = ts->hour; + tm.tm_mday = ts->day; + tm.tm_mon = ts->month - 1; + tm.tm_year = ts->year - 1900; + time_t t = mktime(&tm); + DASSERT(sizeof(t) == sizeof(int64_t)); + int64_t v = (int64_t)t; + v *= 1000; + v += ts->fraction / 1000000; + memcpy(target->ptr, &v, sizeof(v)); + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_ts_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) +{ + struct tm tm = {0}; + tm.tm_sec = ts->second; + tm.tm_min = ts->minute; + tm.tm_hour = ts->hour; + tm.tm_mday = ts->day; + tm.tm_mon = ts->month - 1; + tm.tm_year = ts->year - 1900; + + char buf[64]; + int n = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + DASSERT(n < sizeof(buf)); + + *target->soi = n; + + unsigned int fraction = ts->fraction; + fraction /= 1000000; + snprintf(target->ptr, target->len, "%s.%03d", buf, fraction); + if (target->soi) *target->soi = strlen((const char*)target->ptr); + + if (n <= target->len) { + return SQL_SUCCESS; + } + + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TIMESTAMP -> SQL_C_CHAR"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_ts_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) +{ + struct tm tm = {0}; + tm.tm_sec = ts->second; + tm.tm_min = ts->minute; + tm.tm_hour = ts->hour; + tm.tm_mday = ts->day; + tm.tm_mon = ts->month - 1; + tm.tm_year = ts->year - 1900; + + char buf[64]; + int n = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tm); + DASSERT(n < sizeof(buf)); + + unsigned int fraction = ts->fraction; + fraction /= 1000000; + snprintf(target->ptr, target->len, "%s.%03d", buf, fraction); + if (target->soi) *target->soi = strlen((const char*)target->ptr); + + if (n <= target->len) { + return SQL_SUCCESS; + } + + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_TIMESTAMP -> SQL_C_BINARY"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_ts_to_c_ts(sql_t *sql, c_target_t *target, TAOS_FIELD *field, SQL_TIMESTAMP_STRUCT *ts) +{ + DASSERT(target->len == sizeof(*ts)); + memcpy(target->ptr, ts, sizeof(*ts)); + *target->soi = target->len; + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_bin_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin) +{ + if (target->len<1) { + SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } + size_t field_bytes = field->bytes - VARSTR_HEADER_SIZE; + size_t n = strnlen((const char*)bin, field_bytes); + + if (n < target->len) { + memcpy(target->ptr, bin, n); + target->ptr[n] = '\0'; + *target->soi = n; + return SQL_SUCCESS; + } + n = target->len - 1; + *target->soi = n; + if (n > 0) { + memcpy(target->ptr, bin, n-1); + target->ptr[n-1] = '\0'; + } + SET_ERROR(sql, "01004", TSDB_CODE_ODBC_CONV_TRUNC, ""); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_bin_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const unsigned char *bin) +{ + if (target->len<1) { + SET_ERROR(sql, "HY090", TSDB_CODE_ODBC_BAD_ARG, ""); + return SQL_ERROR; + } + size_t field_bytes = field->bytes - VARSTR_HEADER_SIZE; + size_t n = strnlen((const char*)bin, field_bytes); + + if (n <= target->len) { + memcpy(target->ptr, bin, n); + if (nlen) target->ptr[n] = '\0'; + *target->soi = n; + return SQL_SUCCESS; + } + + n = target->len; + memcpy(target->ptr, bin, n); + *target->soi = n; + SET_ERROR(sql, "01004", TSDB_CODE_ODBC_CONV_TRUNC, ""); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_bit(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + int8_t v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 1; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; + } + + char buf[64]; + snprintf(buf, sizeof(buf), "%d", v); + + if (strcmp(buf, str)==0) { + if (v==0 || v==1) return SQL_SUCCESS; + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; + } + + if (f8>0 || f8<2) { + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; + } + + if (f8<0 || f8>2) { + SET_ERROR(sql, "22003", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; + } + + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_BIT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_v1(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + int8_t v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 1; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_TINYINT"); + return SQL_SUCCESS_WITH_INFO; + } + + char buf[64]; + snprintf(buf, sizeof(buf), "%d", v); + + if (strcmp(buf, str)==0) return SQL_SUCCESS; + + if (f8>INT8_MAX || f8 SQL_C_TINYINT"); + return SQL_SUCCESS_WITH_INFO; + } + + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_TINYINT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_v2(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + int16_t v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 2; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SHORT"); + return SQL_SUCCESS_WITH_INFO; + } + + char buf[64]; + snprintf(buf, sizeof(buf), "%d", v); + + if (strcmp(buf, str)==0) return SQL_SUCCESS; + + if (f8>INT16_MAX || f8 SQL_C_SHORT"); + return SQL_SUCCESS_WITH_INFO; + } + + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SHORT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_v4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + int32_t v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 4; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_LONG"); + return SQL_SUCCESS_WITH_INFO; + } + + char buf[64]; + snprintf(buf, sizeof(buf), "%d", v); + + if (strcmp(buf, str)==0) return SQL_SUCCESS; + + if (f8>INT32_MAX || f8 SQL_C_LONG"); + return SQL_SUCCESS_WITH_INFO; + } + + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_LONG"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_v8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + int64_t v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 8; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SBIGINT"); + return SQL_SUCCESS_WITH_INFO; + } + + char buf[64]; + snprintf(buf, sizeof(buf), "%" PRId64 "", v); + + if (strcmp(buf, str)==0) return SQL_SUCCESS; + + if (f8>INT64_MAX || f8 SQL_C_SBIGINT"); + return SQL_SUCCESS_WITH_INFO; + } + + SET_ERROR(sql, "01S07", TSDB_CODE_ODBC_CONV_TRUNC, "TSDB_DATA_TYPE_NCHAR -> SQL_C_SBIGINT"); + return SQL_SUCCESS_WITH_INFO; +} + +static SQLRETURN conv_tsdb_str_to_c_f4(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + float v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 4; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_FLOAT"); + return SQL_SUCCESS_WITH_INFO; + } + + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_str_to_c_f8(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + int bytes = 0; + double f8 = 0; + int n = sscanf(str, "%lf%n", &f8, &bytes); + + float v = f8; + memcpy(target->ptr, &v, sizeof(v)); + + *target->soi = 8; + + if (n!=1 || bytes!=strlen(str)) { + SET_ERROR(sql, "22018", TSDB_CODE_ODBC_CONV_UNDEF, "TSDB_DATA_TYPE_NCHAR -> SQL_C_DOUBLE"); + return SQL_SUCCESS_WITH_INFO; + } + + return SQL_SUCCESS; +} + +static SQLRETURN conv_tsdb_str_to_c_str(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + return conv_tsdb_bin_to_c_str(sql, target, field, (const unsigned char*)str); +} + +static SQLRETURN conv_tsdb_str_to_c_bin(sql_t *sql, c_target_t *target, TAOS_FIELD *field, const char *str) +{ + return conv_tsdb_bin_to_c_bin(sql, target, field, (const unsigned char*)str); +} + + + + +const char* tsdb_int64_to_bit(int64_t src, int8_t *dst) +{ + *dst = src; + if (src==0 || src==1) return NULL; + return "22003"; +} + +const char* tsdb_int64_to_tinyint(int64_t src, int8_t *dst) +{ + *dst = src; + if (src>=SCHAR_MIN && src<=SCHAR_MAX) return NULL; + return "22003"; +} + +const char* tsdb_int64_to_smallint(int64_t src, int16_t *dst) +{ + *dst = src; + if (src>=SHRT_MIN && src<=SHRT_MAX) return NULL; + return "22003"; +} + +const char* tsdb_int64_to_int(int64_t src, int32_t *dst) +{ + *dst = src; + if (src>=LONG_MIN && src<=LONG_MAX) return NULL; + return "22003"; +} + +const char* tsdb_int64_to_bigint(int64_t src, int64_t *dst) +{ + *dst = src; + return NULL; +} + +const char* tsdb_int64_to_ts(int64_t src, int64_t *dst) +{ + *dst = src; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%" PRId64 "", src); + DASSERT(n>=0); + DASSERT(n=2) return "22003"; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); + DASSERT(n>=0); + DASSERT(nSCHAR_MAX) return "22003"; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); + DASSERT(n>=0); + DASSERT(nSHRT_MAX) return "22003"; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); + DASSERT(n>=0); + DASSERT(nLONG_MAX) return "22003"; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); + DASSERT(n>=0); + DASSERT(nLLONG_MAX) return "22003"; + + char buf[4096]; + int n = snprintf(buf, sizeof(buf), "%.*g", precision, src); + DASSERT(n>=0); + DASSERT(n=0); + DASSERT(n=0); + DASSERT(n2>=0); + DASSERT(n1=0); + if (n>=dlen) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_bit(const char *src, int8_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (v==0 || v==1) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (v==0 || v==1) return NULL; + + return "22003"; +} + +const char* tsdb_chars_to_tinyint(const char *src, int8_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (vSCHAR_MAX) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (vSCHAR_MAX) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_smallint(const char *src, int16_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (vSHRT_MAX) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (vSHRT_MAX) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_int(const char *src, int32_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (vLONG_MAX) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (vLONG_MAX) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_bigint(const char *src, int64_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (vLLONG_MAX) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (vLLONG_MAX) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_ts(const char *src, int64_t *dst) +{ + int bytes = 0; + int64_t v = 0; + int n = sscanf(src, "%" PRId64 "%n", &v, &bytes); + if (n!=1) return "22018"; + + + if (bytes!=strlen(src)) { + if (src[bytes-1]=='.') { + if (vLLONG_MAX) return "22001"; + + return "22003"; + } + return "22018"; + } + + if (vLLONG_MAX) return "22001"; + + return NULL; +} + +const char* tsdb_chars_to_float(const char *src, float *dst) +{ + int bytes = 0; + int n = sscanf(src, "%f%n", dst, &bytes); + if (n!=1) return "22018"; + + if (bytes!=strlen(src)) return "22018"; + + return NULL; +} + +const char* tsdb_chars_to_double(const char *src, double *dst) +{ + int bytes = 0; + int n = sscanf(src, "%lf%n", dst, &bytes); + if (n!=1) return "22018"; + + if (bytes!=strlen(src)) return "22018"; + + return NULL; +} + +const char* tsdb_chars_to_char(const char *src, char *dst, size_t dlen) +{ + int n = snprintf(dst, dlen, "%s", src); + if (n>=dlen) return "22001"; + + return NULL; +} + diff --git a/src/connector/odbc/src/todbc_util.c b/src/connector/odbc/src/todbc_util.c new file mode 100644 index 0000000000000000000000000000000000000000..b6b45d8120d28da31dcc1d323893984f9b93519b --- /dev/null +++ b/src/connector/odbc/src/todbc_util.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "todbc_util.h" + +#include "iconv.h" + +#include +#include +#include +#include +#include + +const char* sql_sql_type(int type) { + switch (type) { + case SQL_BIT: return "SQL_BIT"; + case SQL_TINYINT: return "SQL_TINYINT"; + case SQL_SMALLINT: return "SQL_SMALLINT"; + case SQL_INTEGER: return "SQL_INTEGER"; + case SQL_BIGINT: return "SQL_BIGINT"; + case SQL_FLOAT: return "SQL_FLOAT"; + case SQL_DOUBLE: return "SQL_DOUBLE"; + case SQL_DECIMAL: return "SQL_DECIMAL"; + case SQL_NUMERIC: return "SQL_NUMERIC"; + case SQL_REAL: return "SQL_REAL"; + case SQL_CHAR: return "SQL_CHAR"; + case SQL_VARCHAR: return "SQL_VARCHAR"; + case SQL_LONGVARCHAR: return "SQL_LONGVARCHAR"; + case SQL_WCHAR: return "SQL_WCHAR"; + case SQL_WVARCHAR: return "SQL_WVARCHAR"; + case SQL_WLONGVARCHAR: return "SQL_WLONGVARCHAR"; + case SQL_BINARY: return "SQL_BINARY"; + case SQL_VARBINARY: return "SQL_VARBINARY"; + case SQL_LONGVARBINARY: return "SQL_LONGVARBINARY"; + case SQL_DATE: return "SQL_DATE"; + case SQL_TIME: return "SQL_TIME"; + case SQL_TIMESTAMP: return "SQL_TIMESTAMP"; + case SQL_TYPE_DATE: return "SQL_TYPE_DATE"; + case SQL_TYPE_TIME: return "SQL_TYPE_TIME"; + case SQL_TYPE_TIMESTAMP: return "SQL_TYPE_TIMESTAMP"; + case SQL_INTERVAL_MONTH: return "SQL_INTERVAL_MONTH"; + case SQL_INTERVAL_YEAR: return "SQL_INTERVAL_YEAR"; + case SQL_INTERVAL_YEAR_TO_MONTH: return "SQL_INTERVAL_YEAR_TO_MONTH"; + case SQL_INTERVAL_DAY: return "SQL_INTERVAL_DAY"; + case SQL_INTERVAL_HOUR: return "SQL_INTERVAL_HOUR"; + case SQL_INTERVAL_MINUTE: return "SQL_INTERVAL_MINUTE"; + case SQL_INTERVAL_SECOND: return "SQL_INTERVAL_SECOND"; + case SQL_INTERVAL_DAY_TO_HOUR: return "SQL_INTERVAL_DAY_TO_HOUR"; + case SQL_INTERVAL_DAY_TO_MINUTE: return "SQL_INTERVAL_DAY_TO_MINUTE"; + case SQL_INTERVAL_DAY_TO_SECOND: return "SQL_INTERVAL_DAY_TO_SECOND"; + case SQL_INTERVAL_HOUR_TO_MINUTE: return "SQL_INTERVAL_HOUR_TO_MINUTE"; + case SQL_INTERVAL_HOUR_TO_SECOND: return "SQL_INTERVAL_HOUR_TO_SECOND"; + case SQL_INTERVAL_MINUTE_TO_SECOND: return "SQL_INTERVAL_MINUTE_TO_SECOND"; + case SQL_GUID: return "SQL_GUID"; + default: return "UNKNOWN"; + } +} + +const char* sql_c_type(int type) { + switch (type) { + case SQL_C_CHAR: return "SQL_C_CHAR"; + case SQL_C_WCHAR: return "SQL_C_WCHAR"; + case SQL_C_SHORT: return "SQL_C_SHORT"; + case SQL_C_SSHORT: return "SQL_C_SSHORT"; + case SQL_C_USHORT: return "SQL_C_USHORT"; + case SQL_C_LONG: return "SQL_C_LONG"; + case SQL_C_SLONG: return "SQL_C_SLONG"; + case SQL_C_ULONG: return "SQL_C_ULONG"; + case SQL_C_FLOAT: return "SQL_C_FLOAT"; + case SQL_C_DOUBLE: return "SQL_C_DOUBLE"; + case SQL_C_BIT: return "SQL_C_BIT"; + case SQL_C_TINYINT: return "SQL_C_TINYINT"; + case SQL_C_STINYINT: return "SQL_C_STINYINT"; + case SQL_C_UTINYINT: return "SQL_C_UTINYINT"; + case SQL_C_SBIGINT: return "SQL_C_SBIGINT"; + case SQL_C_UBIGINT: return "SQL_C_UBIGINT"; + case SQL_C_BINARY: return "SQL_C_BINARY"; + case SQL_C_DATE: return "SQL_C_DATE"; + case SQL_C_TIME: return "SQL_C_TIME"; + case SQL_C_TIMESTAMP: return "SQL_C_TIMESTAMP"; + case SQL_C_TYPE_DATE: return "SQL_C_TYPE_DATE"; + case SQL_C_TYPE_TIME: return "SQL_C_TYPE_TIME"; + case SQL_C_TYPE_TIMESTAMP: return "SQL_C_TYPE_TIMESTAMP"; + case SQL_C_NUMERIC: return "SQL_C_NUMERIC"; + case SQL_C_GUID: return "SQL_C_GUID"; + default: return "UNKNOWN"; + } +} + +int is_valid_sql_c_type(int type) { + const char *ctype = sql_c_type(type); + if (strcmp(ctype, "UNKNOWN")==0) return 0; + return 1; +} + +int is_valid_sql_sql_type(int type) { + const char *sqltype = sql_sql_type(type); + if (strcmp(sqltype, "UNKNOWN")==0) return 0; + return 1; +} + +int string_conv(const char *fromcode, const char *tocode, + const unsigned char *src, size_t sbytes, + unsigned char *dst, size_t dbytes, + size_t *consumed, size_t *generated) +{ + if (consumed) *consumed = 0; + if (generated) *generated = 0; + + if (dbytes <= 0) return -1; + dst[0] = '\0'; + + iconv_t conv = iconv_open(tocode, fromcode); + if (!conv) return -1; + + int r = 0; + do { + char *s = (char*)src; + char *d = (char*)dst; + size_t sl = sbytes; + size_t dl = dbytes; + + r = iconv(conv, &s, &sl, &d, &dl); + *d = '\0'; + + if (consumed) *consumed = sbytes - sl; + if (generated) *generated = dbytes - dl; + + } while (0); + + iconv_close(conv); + return r; +} + +int utf8_chars(const char *src) +{ + const char *fromcode = "UTF-8"; + const char *tocode = "UCS-2LE"; + iconv_t conv = iconv_open(tocode, fromcode); + if (!conv) return -1; + + size_t slen = strlen(src); + char buf[4096]; + size_t dlen = sizeof(buf); + char *ps = (char*)src; + char *pd = buf; + iconv(conv, &ps, &slen, &pd, &dlen); + DASSERT(slen==0); + + size_t chars = (sizeof(buf) - dlen) / 2; + iconv_close(conv); + return chars; +} + +unsigned char* utf8_to_ucs4le(const char *utf8, size_t *chars) +{ + const char *tocode = "UCS-4LE"; + const char *fromcode = "UTF-8"; + + iconv_t conv = iconv_open(tocode, fromcode); + if (!conv) return NULL; + + unsigned char *ucs4le = NULL; + + do { + size_t slen = strlen(utf8); + size_t dlen = slen * 4; + + ucs4le = (unsigned char*)malloc(dlen+1); + if (!ucs4le) break; + + char *src = (char*)utf8; + char *dst = (char*)ucs4le; + size_t s = slen; + size_t d = dlen; + iconv(conv, &src, &s, &dst, &d); + dst[0] = '\0'; + + if (chars) *chars = (dlen - d) / 4; + } while (0); + + iconv_close(conv); + return ucs4le; +} + +char* ucs4le_to_utf8(const unsigned char *ucs4le, size_t slen, size_t *chars) +{ + const char *fromcode = "UCS-4LE"; + const char *tocode = "UTF-8"; + + iconv_t conv = iconv_open(tocode, fromcode); + if (!conv) return NULL; + + char *utf8 = NULL; + + do { + size_t dlen = slen; + + utf8 = (char*)malloc(dlen+1); + if (!utf8) break; + + char *dst = utf8; + char *src = (char*)ucs4le; + size_t s = slen; + size_t d = dlen; + iconv(conv, &src, &s, &dst, &d); + dst[0] = '\0'; + + if (chars) *chars = (slen - s) / 4; + } while (0); + + iconv_close(conv); + return utf8; +} + +SQLCHAR* wchars_to_chars(const SQLWCHAR *wchars, size_t chs, size_t *bytes) +{ + size_t dlen = chs * 4; + SQLCHAR *dst = (SQLCHAR*)malloc(dlen + 1); + if (!dst) return NULL; + + string_conv("UCS-2LE", "UTF-8", (const unsigned char*)wchars, chs * sizeof(*wchars), dst, dlen + 1, NULL, bytes); + + return dst; +} + diff --git a/src/connector/odbc/src/todbc_util.h b/src/connector/odbc/src/todbc_util.h new file mode 100644 index 0000000000000000000000000000000000000000..43264975b4e618bd495691e59fb9df59f6664e03 --- /dev/null +++ b/src/connector/odbc/src/todbc_util.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TODBC_UTIL_H_ +#define _TODBC_UTIL_H_ + +#include +#include +#include +#include +#include + +#define D(fmt, ...) \ + fprintf(stderr, \ + "%s[%d]:%s() " fmt "\n", \ + basename((char*)__FILE__), __LINE__, __func__, \ + ##__VA_ARGS__) + +#define DASSERT(statement) \ +do { \ + if (statement) break; \ + D("Assertion failure: %s", #statement); \ + abort(); \ +} while (0) + +#define DASSERTX(statement, fmt, ...) \ +do { \ + if (statement) break; \ + D("Assertion failure: %s, " fmt "", #statement, ##__VA_ARGS__); \ + abort(); \ +} while (0) + + + +const char* sql_sql_type(int type); +const char* sql_c_type(int type); + +int is_valid_sql_c_type(int type); +int is_valid_sql_sql_type(int type); + +int string_conv(const char *fromcode, const char *tocode, + const unsigned char *src, size_t sbytes, + unsigned char *dst, size_t dbytes, + size_t *consumed, size_t *generated); +int utf8_chars(const char *src); + +unsigned char* utf8_to_ucs4le(const char *utf8, size_t *chars); +char* ucs4le_to_utf8(const unsigned char *ucs4le, size_t slen, size_t *chars); +SQLCHAR* wchars_to_chars(const SQLWCHAR *wchars, size_t chs, size_t *bytes); + +#endif // _TODBC_UTIL_H_ diff --git a/src/connector/odbc/tests/CMakeLists.txt b/src/connector/odbc/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac57a5647fce8bd036e133936284f3f4c847d8c8 --- /dev/null +++ b/src/connector/odbc/tests/CMakeLists.txt @@ -0,0 +1,7 @@ +PROJECT(TDengine) + +IF (TD_LINUX) + AUX_SOURCE_DIRECTORY(. SRC) + ADD_EXECUTABLE(tcodbc main.c) + TARGET_LINK_LIBRARIES(tcodbc odbc) +ENDIF () diff --git a/src/connector/odbc/tests/main.c b/src/connector/odbc/tests/main.c new file mode 100644 index 0000000000000000000000000000000000000000..1ac9b71369e8a526b051008b9379fdda4b6a5e77 --- /dev/null +++ b/src/connector/odbc/tests/main.c @@ -0,0 +1,231 @@ +#include +#include + +#include +#include + +#include "os.h" + +// static const char *dsn = "TAOS_DSN"; +// static const char *uid = "root"; +// static const char *pwd = "taosdata"; + +typedef struct data_s data_t; +struct data_s { + int64_t ts; + int8_t b; + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + char bin[40+1]; + char blob[40+1]; // why 80? ref: tests/examples/c/apitest.c +}; + +static const char *pre_stmts[] = { + "create database db", + "use db", + "create table t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))" +}; + +static const char *pro_stmts[] = { + // "insert into t values ('2019-07-15 00:00:00', 1)", + // "insert into t values ('2019-07-15 01:00:00', 2)", + "select * from t" + // "drop database db" +}; + +#define CHK_RESULT(r, ht, h) \ +do { \ + if (r==0) break; \ + SQLCHAR ss[10]; \ + SQLINTEGER ne = 0; \ + SQLCHAR es[4096]; \ + SQLSMALLINT n = 0; \ + ss[0] = '\0'; \ + es[0] = '\0'; \ + SQLRETURN ret = SQLGetDiagRec(ht, h, 1, ss, &ne, es, sizeof(es), &n); \ + if (ret) break; \ + fprintf(stderr, "%s%s\n", ss, es); \ +} while (0) + +static int do_statement(SQLHSTMT stmt, const char *statement) { + SQLRETURN r = 0; + do { + fprintf(stderr, "prepare [%s]\n", statement); + r = SQLPrepare(stmt, (SQLCHAR*)statement, strlen(statement)); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + fprintf(stderr, "execute [%s]\n", statement); + r = SQLExecute(stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + fprintf(stderr, "done\n"); + } while (0); + fprintf(stderr, "r: [%x][%d]\n", r, r); + return r; +} + +static int do_insert(SQLHSTMT stmt, data_t data) { + SQLRETURN r = 0; + SQLLEN lbin; + SQLLEN lblob; + + const char *statement = "insert into t values (?, ?, ?, ?, ?, ?, ?, ?, ?,?)"; + int ignored = 0; + + do { + fprintf(stderr, "prepare [%s]\n", statement); + r = SQLPrepare(stmt, (SQLCHAR*)statement, strlen(statement)); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 1 [%s]\n", statement); + r = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_TIMESTAMP, ignored, ignored, &data.ts, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 2 [%s]\n", statement); + r = SQLBindParameter(stmt, 2, SQL_PARAM_INPUT, SQL_C_BIT, SQL_BIT, ignored, ignored, &data.b, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 3 [%s]\n", statement); + r = SQLBindParameter(stmt, 3, SQL_PARAM_INPUT, SQL_C_TINYINT, SQL_TINYINT, ignored, ignored, &data.v1, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 4 [%s]\n", statement); + r = SQLBindParameter(stmt, 4, SQL_PARAM_INPUT, SQL_C_SHORT, SQL_SMALLINT, ignored, ignored, &data.v2, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 5 [%s]\n", statement); + r = SQLBindParameter(stmt, 5, SQL_PARAM_INPUT, SQL_C_LONG, SQL_INTEGER, ignored, ignored, &data.v4, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 6 [%s]\n", statement); + r = SQLBindParameter(stmt, 6, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_BIGINT, ignored, ignored, &data.v8, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 7 [%s]\n", statement); + r = SQLBindParameter(stmt, 7, SQL_PARAM_INPUT, SQL_C_FLOAT, SQL_FLOAT, ignored, ignored, &data.f4, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 8 [%s]\n", statement); + r = SQLBindParameter(stmt, 8, SQL_PARAM_INPUT, SQL_C_DOUBLE, SQL_DOUBLE, ignored, ignored, &data.f8, ignored, NULL); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 9 [%s]\n", statement); + lbin = SQL_NTS; + r = SQLBindParameter(stmt, 9, SQL_PARAM_INPUT, SQL_C_BINARY, SQL_VARBINARY, sizeof(data.bin)-1, ignored, &data.bin, ignored, &lbin); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "bind 10 [%s]\n", statement); + lblob = SQL_NTS; + r = SQLBindParameter(stmt, 10, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_VARCHAR, sizeof(data.blob)-1, ignored, &data.blob, ignored, &lblob); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + fprintf(stderr, "execute [%s]\n", statement); + r = SQLExecute(stmt); + CHK_RESULT(r, SQL_HANDLE_STMT, stmt); + if (r) break; + + // ts += 1; + // v = 2; + // fprintf(stderr, "execute [%s]\n", statement); + // r = SQLExecute(stmt); + // if (r) break; + + fprintf(stderr, "done\n"); + } while (0); + fprintf(stderr, "r: [%x][%d]\n", r, r); + return r; +} + +int main(int argc, char *argv[]) { + if (argc < 4) return 1; + const char *dsn = argv[1]; + const char *uid = argv[2]; + const char *pwd = argv[3]; + SQLRETURN r; + SQLHENV env = {0}; + SQLHDBC conn = {0}; + r = SQLAllocEnv(&env); + if (r!=SQL_SUCCESS) return 1; + do { + r = SQLAllocConnect(env, &conn); + CHK_RESULT(r, SQL_HANDLE_ENV, env); + if (r!=SQL_SUCCESS) break; + do { + r = SQLConnect(conn, (SQLCHAR*)dsn, strlen(dsn), + (SQLCHAR*)uid, strlen(uid), + (SQLCHAR*)pwd, strlen(pwd)); + CHK_RESULT(r, SQL_HANDLE_DBC, conn); + if (r!=SQL_SUCCESS) break; + do { + SQLHSTMT stmt = {0}; + r = SQLAllocHandle(SQL_HANDLE_STMT, conn, &stmt); + if (r!=SQL_SUCCESS) break; + do { + do_statement(stmt, "drop database db"); + for (size_t i=0; i ?", 4) +row = cursor.fetchone() +while row: + print(row) + row = cursor.fetchone() +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("SELECT * from db.v where v1 > ?", '5') +row = cursor.fetchone() +while row: + print(row) + row = cursor.fetchone() +cursor.close() + diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index c26e5c0967918bd45f362b3df6267561e80dfd66..32859f6b340b4393b523d475bce1ac89cef1a040 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -225,6 +225,7 @@ class CTaosInterface(object): if connection.value == None: print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") # sys.exit(1) else: print('connect to TDengine success') diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index 8c268d8afba2b971709fba6f157abfebdc8dfd1a..37c02d330e856717b5ed0bdac76723cf64d3860b 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -192,8 +192,10 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index c9d0551af5faedc0b9726fa9be28337f26bba079..609154a3a4b38cfd1cf7976fb755e4aef7be3354 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -225,6 +225,7 @@ class CTaosInterface(object): if connection.value == None: print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") # sys.exit(1) #else: # print('connect to TDengine success') @@ -414,4 +415,4 @@ if __name__ == '__main__': print(data) cinter.freeResult(result) - cinter.close(conn) \ No newline at end of file + cinter.close(conn) diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index 3f0f315d3388fcac1c752c847b7fa1c412b06749..ec7a85ee1a3f8cb0cd49aca8c2a4242dca89021e 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -207,8 +207,10 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py index 7eee3bfc8f8559454f53aa967f5ad0294a1cb2bf..8714fe77cb739f23f79247a41d72aa127b6d6d25 100644 --- a/src/connector/python/windows/python2/taos/cursor.py +++ b/src/connector/python/windows/python2/taos/cursor.py @@ -142,6 +142,9 @@ class TDengineCursor(object): self._rowcount = 0 while True: block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py index 5f5aa4e1d7d9b454132f533a9e84cca2859db735..c2c442b06ee71ae90ec63662886c709e38d4d2ad 100644 --- a/src/connector/python/windows/python3/taos/cursor.py +++ b/src/connector/python/windows/python3/taos/cursor.py @@ -142,6 +142,9 @@ class TDengineCursor(object): self._rowcount = 0 while True: block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index 001c73eb3946fe46984c25758d9e0ac2a678dd37..46376159c6782efde7adbd19c75af83aa4cde397 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -97,7 +97,7 @@ void dnodeCleanupModules() { } } - if (tsModule[TSDB_MOD_MNODE].enable && tsModule[TSDB_MOD_MNODE].cleanUpFp) { + if (tsModule[TSDB_MOD_MNODE].cleanUpFp) { (*tsModule[TSDB_MOD_MNODE].cleanUpFp)(); } } diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 892eaae0be4be19b9b37bba6bc602d26c394b3ff..7324e5a885c7018c41954fded5199e8c93c23b82 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -174,15 +174,15 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char return rpcRsp.code; } -void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid) { - dDebug("vgId:%d, sid:%d send config table msg to mnode", vgId, sid); +void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { + dDebug("vgId:%d, tid:%d send config table msg to mnode", vgId, tid); int32_t contLen = sizeof(SDMConfigTableMsg); SDMConfigTableMsg *pMsg = rpcMallocCont(contLen); pMsg->dnodeId = htonl(dnodeGetDnodeId()); pMsg->vgId = htonl(vgId); - pMsg->sid = htonl(sid); + pMsg->tid = htonl(tid); SRpcMsg rpcMsg = {0}; rpcMsg.pCont = pMsg; @@ -195,18 +195,18 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid) { if (rpcRsp.code != 0) { rpcFreeCont(rpcRsp.pCont); - dError("vgId:%d, sid:%d failed to config table from mnode", vgId, sid); + dError("vgId:%d, tid:%d failed to config table from mnode", vgId, tid); return NULL; } else { - dInfo("vgId:%d, sid:%d config table msg is received", vgId, sid); + dInfo("vgId:%d, tid:%d config table msg is received", vgId, tid); // delete this after debug finished SMDCreateTableMsg *pTable = rpcRsp.pCont; int16_t numOfColumns = htons(pTable->numOfColumns); int16_t numOfTags = htons(pTable->numOfTags); - int32_t sid = htonl(pTable->sid); + int32_t tableId = htonl(pTable->tid); uint64_t uid = htobe64(pTable->uid); - dInfo("table:%s, numOfColumns:%d numOfTags:%d sid:%d uid:%" PRIu64, pTable->tableId, numOfColumns, numOfTags, sid, uid); + dInfo("table:%s, numOfColumns:%d numOfTags:%d tid:%d uid:%" PRIu64, pTable->tableId, numOfColumns, numOfTags, tableId, uid); return rpcRsp.pCont; } diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index 1a3d0ebc27e432d5038301908514f941cc0dfb18..e61158ef30dddd6037af36c61be1fd522f8af10b 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -189,7 +189,6 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) { void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) { rpcFreeCont(pRead->rpcMsg.pCont); vnodeRelease(pVnode); - return; } static void *dnodeProcessReadQueue(void *param) { @@ -213,7 +212,8 @@ static void *dnodeProcessReadQueue(void *param) { } else { if (code == TSDB_CODE_QRY_HAS_RSP) { dnodeSendRpcReadRsp(pVnode, pReadMsg, pReadMsg->rpcMsg.code); - } else { // code == TSDB_CODE_NOT_READY, do not return msg to client + } else { // code == TSDB_CODE_QRY_NOT_READY, do not return msg to client + assert(pReadMsg->rpcMsg.handle == NULL || (pReadMsg->rpcMsg.handle != NULL && pReadMsg->rpcMsg.msgType == 5)); dnodeDispatchNonRspMsg(pVnode, pReadMsg, code); } } diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 4511080f2883033f391f9c2773354ee12c45e2fc..5a89d00246d936ad66ae0992680032fb3fef987e 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -190,6 +190,7 @@ void dnodeFreeVnodeWqueue(void *wqueue) { void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code) { SWriteMsg *pWrite = (SWriteMsg *)param; + if (pWrite == NULL) return; if (code < 0) pWrite->code = code; int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1); diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 83d2a4ad9c58a77510aacf076296213779327326..e84545be1753f3fedb4ee78acf397c91d824ad8b 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -49,7 +49,7 @@ void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp); void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet); -void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid); +void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid); void *dnodeAllocateVnodeWqueue(void *pVnode); void dnodeFreeVnodeWqueue(void *queue); diff --git a/src/inc/query.h b/src/inc/query.h index 0c18f85dc31bae5e77bae7228d5390a8d32df07a..5e1de77889cc469566cc94b729c55622e5462bd6 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -78,7 +78,6 @@ int32_t qKillQuery(qinfo_t qinfo); int32_t qQueryCompleted(qinfo_t qinfo); - /** * destroy query info structure * @param qHandle diff --git a/src/inc/taos.h b/src/inc/taos.h index 7e8f174b7c3737463f7e66dfdc6d5cd906791ee3..315313734753de73bf477b1f67783a45c38c87c9 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -22,12 +22,12 @@ extern "C" { #endif -typedef void TAOS; -typedef void** TAOS_ROW; -typedef void TAOS_RES; -typedef void TAOS_SUB; -typedef void TAOS_STREAM; -typedef void TAOS_STMT; +typedef void TAOS; +typedef void TAOS_STMT; +typedef void TAOS_RES; +typedef void TAOS_STREAM; +typedef void TAOS_SUB; +typedef void **TAOS_ROW; // Data type definition #define TSDB_DATA_TYPE_NULL 0 // 1 bytes @@ -69,6 +69,8 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); DLL_EXPORT void taos_close(TAOS *taos); +const char *taos_data_type(int type); + typedef struct TAOS_BIND { int buffer_type; void * buffer; @@ -77,10 +79,26 @@ typedef struct TAOS_BIND { int * is_null; int is_unsigned; // unused int * error; // unused + union { + int64_t ts; + int8_t b; + int8_t v1; + int16_t v2; + int32_t v4; + int64_t v8; + float f4; + double f8; + unsigned char *bin; + char *nchar; + } u; + unsigned int allocated; } TAOS_BIND; TAOS_STMT *taos_stmt_init(TAOS *taos); int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); +int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); +int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); +int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); int taos_stmt_add_batch(TAOS_STMT *stmt); int taos_stmt_execute(TAOS_STMT *stmt); diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 08621a81c6399d3ed542924af07f876d69c47ce4..310bd78c7efd6af8304dbf29288fa1e09161ea42 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -74,6 +74,12 @@ TAOS_DEFINE_ERROR(TSDB_CODE_COM_MEMORY_CORRUPTED, 0, 0x0101, "Memory cor TAOS_DEFINE_ERROR(TSDB_CODE_COM_OUT_OF_MEMORY, 0, 0x0102, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_COM_INVALID_CFG_MSG, 0, 0x0103, "Invalid config message") TAOS_DEFINE_ERROR(TSDB_CODE_COM_FILE_CORRUPTED, 0, 0x0104, "Data file corrupted") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, 0, 0x0105, "Ref out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, 0, 0x0106, "too many Ref Objs") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ID_REMOVED, 0, 0x0107, "Ref ID is removed") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_INVALID_ID, 0, 0x0108, "Invalid Ref ID") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_ALREADY_EXIST, 0, 0x0109, "Ref is already there") +TAOS_DEFINE_ERROR(TSDB_CODE_REF_NOT_EXIST, 0, 0x010A, "Ref is not there") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_SQL, 0, 0x0200, "Invalid SQL statement") @@ -230,6 +236,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, 0, 0x0707, "Query not TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, 0, 0x0708, "Query should response") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, 0, 0x0709, "Multiple retrieval of this query") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, 0, 0x070A, "Too many time window in query") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, 0, 0x070B, "Query buffer limit has reached") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, 0, 0x0800, "License expired") @@ -354,6 +361,23 @@ TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, 0, 0x11A4, "tag value TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, 0, 0x11A5, "value not find") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, 0, 0x11A6, "value type should be boolean, number or string") + +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, 0, 0x2101, "out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_UNDEF, 0, 0x2102, "convertion undefined") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, 0, 0x2103, "convertion truncated") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, 0, 0x2104, "convertion not supported") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, 0, 0x2105, "out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, 0, 0x2106, "not supported yet") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, 0, 0x2107, "invalid handle") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, 0, 0x2108, "no result set") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, 0, 0x2109, "no fields returned") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, 0, 0x2110, "invalid cursor") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, 0, 0x2111, "statement not ready") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, 0, 0x2112, "connection still busy") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, 0, 0x2113, "bad connection string") +TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, 0, 0x2114, "bad argument") + + #ifdef TAOS_ERROR_C }; #endif diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 528bffce2ec1fb8da2a18df423aeecfb2a0ea410..1fc820dc43236449a9150a50f2b46377645ced43 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -107,6 +107,10 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY12, "dummy12" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY13, "dummy13" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY14, "dummy14" ) + +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "network-test" ) + + #ifndef TAOS_MESSAGE_C TSDB_MSG_TYPE_MAX // 105 #endif @@ -183,10 +187,16 @@ extern char *taosMsg[]; #pragma pack(push, 1) +// null-terminated string instead of char array to avoid too many memory consumption in case of more than 1M tableMeta typedef struct { char fqdn[TSDB_FQDN_LEN]; uint16_t port; -} SEpAddr; +} SEpAddrMsg; + +typedef struct { + char* fqdn; + uint16_t port; +} SEpAddr1; typedef struct { int32_t numOfVnodes; @@ -246,7 +256,7 @@ typedef struct { int8_t tableType; int16_t numOfColumns; int16_t numOfTags; - int32_t sid; + int32_t tid; int32_t sversion; int32_t tversion; int32_t tagDataLen; @@ -364,7 +374,7 @@ typedef struct { typedef struct { int32_t contLen; int32_t vgId; - int32_t sid; + int32_t tid; uint64_t uid; char tableId[TSDB_TABLE_FNAME_LEN]; } SMDDropTableMsg; @@ -411,6 +421,7 @@ typedef struct SExprInfo { int16_t bytes; int16_t type; int32_t interBytes; + int64_t uid; } SExprInfo; typedef struct SColumnFilterInfo { @@ -672,16 +683,27 @@ typedef struct SCMSTableVgroupMsg { } SCMSTableVgroupMsg, SCMSTableVgroupRspMsg; typedef struct { - int32_t vgId; - int8_t numOfEps; - SEpAddr epAddr[TSDB_MAX_REPLICA]; + int32_t vgId; + int8_t numOfEps; + SEpAddr1 epAddr[TSDB_MAX_REPLICA]; } SCMVgroupInfo; +typedef struct { + int32_t vgId; + int8_t numOfEps; + SEpAddrMsg epAddr[TSDB_MAX_REPLICA]; +} SCMVgroupMsg; + typedef struct { int32_t numOfVgroups; SCMVgroupInfo vgroups[]; } SVgroupsInfo; +typedef struct { + int32_t numOfVgroups; + SCMVgroupMsg vgroups[]; +} SVgroupsMsg; + typedef struct STableMetaMsg { int32_t contLen; char tableId[TSDB_TABLE_FNAME_LEN]; // table id @@ -692,9 +714,9 @@ typedef struct STableMetaMsg { int16_t numOfColumns; int16_t sversion; int16_t tversion; - int32_t sid; + int32_t tid; uint64_t uid; - SCMVgroupInfo vgroup; + SCMVgroupMsg vgroup; SSchema schema[]; } STableMetaMsg; @@ -740,7 +762,7 @@ typedef struct { typedef struct { int32_t dnodeId; int32_t vgId; - int32_t sid; + int32_t tid; } SDMConfigTableMsg; typedef struct { diff --git a/src/inc/twal.h b/src/inc/twal.h index 92204abd7d34a9ee2eebf1b74c2e8d58b9599f17..1ce7b132b068526f87fa594845bb591ade6f4966 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -22,22 +22,22 @@ extern "C" { #define TAOS_WAL_NOLOG 0 #define TAOS_WAL_WRITE 1 #define TAOS_WAL_FSYNC 2 - + typedef struct { - int8_t msgType; - int8_t reserved[3]; - int32_t len; - uint64_t version; - uint32_t signature; - uint32_t cksum; - char cont[]; + int8_t msgType; + int8_t reserved[3]; + int32_t len; + uint64_t version; + uint32_t signature; + uint32_t cksum; + char cont[]; } SWalHead; typedef struct { - int8_t walLevel; // wal level - int32_t fsyncPeriod; // millisecond - int8_t wals; // number of WAL files; - int8_t keep; // keep the wal file when closed + int8_t walLevel; // wal level + int32_t fsyncPeriod; // millisecond + int8_t wals; // number of WAL files; + int8_t keep; // keep the wal file when closed } SWalCfg; typedef void* twalh; // WAL HANDLE @@ -53,9 +53,6 @@ int walRestore(twalh, void *pVnode, FWalWrite writeFp); int walGetWalFile(twalh, char *name, uint32_t *index); int64_t walGetVersion(twalh); -extern int wDebugFlag; - - #ifdef __cplusplus } #endif diff --git a/src/inc/vnode.h b/src/inc/vnode.h index 15ddb6afee7c3ac2914df8133df24f6ef80a0a8a..fdce4d62794075bd2e7027b125780fbd7a2deaed 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -41,6 +41,8 @@ typedef struct { SRpcMsg rpcMsg; } SReadMsg; +extern char *vnodeStatus[]; + int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId, char *rootDir); @@ -54,6 +56,7 @@ void vnodeRelease(void *pVnode); // dec refCount void* vnodeGetWal(void *pVnode); int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item); +int32_t vnodeCheckWrite(void *pVnode); int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); void vnodeBuildStatusMsg(void *param); void vnodeConfirmForward(void *param, uint64_t version, int32_t code); @@ -63,6 +66,7 @@ int32_t vnodeInitResources(); void vnodeCleanupResources(); int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg); +int32_t vnodeCheckRead(void *pVnode); #ifdef __cplusplus } diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 765181dbba5c92ca9dc80adb7376e68c70237d6d..f508d186083c84482080f2f8fe251173733f1366 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -60,7 +60,7 @@ typedef struct SShellArguments { extern void shellParseArgument(int argc, char* argv[], SShellArguments* arguments); extern TAOS* shellInit(SShellArguments* args); extern void* shellLoopQuery(void* arg); -extern void taos_error(TAOS* con); +extern void taos_error(TAOS_RES* tres); extern int regex_match(const char* s, const char* reg, int cflags); void shellReadCommand(TAOS* con, char command[]); int32_t shellRunCommand(TAOS* con, char* command); @@ -72,7 +72,7 @@ void source_dir(TAOS* con, SShellArguments* args); void get_history_path(char* history); void cleanup_handler(void* arg); void exitShell(); -int shellDumpResult(TAOS* con, char* fname, int* error_no, bool printMode); +int shellDumpResult(TAOS_RES* con, char* fname, int* error_no, bool printMode); void shellGetGrantInfo(void *con); int isCommentLine(char *line); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 24388bf50c7a215920a58e79f8d33ab81752c547..748b7e792982352de611227d82a844448d251ee2 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -493,7 +493,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { if (i > 0) { fputc(',', fp); } - dumpFieldToFile(fp, row[i], fields +i, length[i], precision); + dumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); } fputc('\n', fp); @@ -619,7 +619,7 @@ static int verticalPrintResult(TAOS_RES* tres) { int padding = (int)(maxColNameLen - strlen(field->name)); printf("%*.s%s: ", padding, " ", field->name); - printField(row[i], field, 0, length[i], precision); + printField((const char*)row[i], field, 0, length[i], precision); putchar('\n'); } @@ -720,7 +720,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { int32_t* length = taos_fetch_lengths(tres); for (int i = 0; i < num_fields; i++) { putchar(' '); - printField(row[i], fields + i, width[i], length[i], precision); + printField((const char*)row[i], fields + i, width[i], length[i], precision); putchar(' '); putchar('|'); } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index ee0a90757bb51a98dd2bf535319afc215dbc04a8..af61995c618bc80389b8abf6d8c6f6c929327925 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -204,7 +204,7 @@ static void shellSourceFile(TAOS *con, char *fptr) { int32_t code = taos_errno(pSql); if (code != 0) { - fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo); + fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(pSql), fname, lineNo); } /* free local resouce: allocated memory/metric-meta refcnt */ @@ -243,7 +243,7 @@ static void shellRunImportThreads(SShellArguments* args) pThread->totalThreads = args->threadNum; pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsDnodeShellPort); if (pThread->taos == NULL) { - fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taos)); + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, "null taos"/*taos_errstr(pThread->taos)*/); exit(0); } diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 6cb7c669cc7a08434b2558588067d007b51b3595..2083ad3e9b7d5a101f35af4fe20dcd38957d5be2 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -80,7 +80,10 @@ int main(int argc, char* argv[]) { shellParseArgument(argc, argv, &args); if (args.netTestRole && args.netTestRole[0] != 0) { - taosNetTest(args.host, (uint16_t)args.port, (uint16_t)args.endPort, args.pktLen, args.netTestRole); + taos_init(); + CmdArguments cmdArgs; + memcpy(&cmdArgs, &args, sizeof(SShellArguments)); + taosNetTest(&cmdArgs); exit(0); } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 67174827d4ce005ad579fbb9ab3c72f1828ad095..ce986813918249ebe501e92d3af307a67c296907 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -90,11 +90,12 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } else if (strcmp(argv[i], "-c") == 0) { if (i < argc - 1) { - if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { - fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); + char *tmp = argv[++i]; + if (strlen(tmp) >= TSDB_FILENAME_LEN) { + fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); } - strcpy(configDir, argv[++i]); + strcpy(configDir, tmp); } else { fprintf(stderr, "Option -c requires an argument\n"); exit(EXIT_FAILURE); diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h index 8a2947dd180218dfd2ddc91d52b2825ccb011eae..4bc840f026d41dc4fe4e8d61c84d53140501bff6 100644 --- a/src/mnode/inc/mnodeDef.h +++ b/src/mnode/inc/mnodeDef.h @@ -115,7 +115,7 @@ typedef struct { uint64_t suid; int64_t createdTime; int32_t numOfColumns; //used by normal table - int32_t sid; + int32_t tid; int32_t vgId; int32_t sqlLen; int8_t updateEnd[4]; diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index ea2ac9bf90ed6669c01402c78e11c23bd200dc3d..2bb8a810566a676a4ae991bc79e5eb78234747c7 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -123,16 +123,18 @@ int32_t mnodeInitSystem() { } void mnodeCleanupSystem() { - mInfo("starting to clean up mnode"); - tsMgmtIsRunning = false; - - dnodeFreeMnodeWqueue(); - dnodeFreeMnodeRqueue(); - dnodeFreeMnodePqueue(); - mnodeCleanupTimer(); - mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); - - mInfo("mnode is cleaned up"); + if (tsMgmtIsRunning) { + mInfo("starting to clean up mnode"); + tsMgmtIsRunning = false; + + dnodeFreeMnodeWqueue(); + dnodeFreeMnodeRqueue(); + dnodeFreeMnodePqueue(); + mnodeCleanupTimer(); + mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); + + mInfo("mnode is cleaned up"); + } } void mnodeStopSystem() { diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index fc76e3dce39dc2f6eeee3aac19d13f2dc605c1a1..c29d1ec0b7c863ec86d78f28997d40ba86b4dd34 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -111,7 +111,6 @@ void mnodeReleaseConn(SConnObj *pConn) { } SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port) { - uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs(); SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); if (pConn == NULL) { mDebug("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port); @@ -126,7 +125,7 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po } // mDebug("connId:%d, is incoming, user:%s ip:%s:%u", connId, pConn->user, taosIpStr(pConn->ip), pConn->port); - pConn->lastAccess = expireTime; + pConn->lastAccess = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs(); return pConn; } @@ -183,7 +182,7 @@ static int32_t mnodeGetConnsMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC // app name pShow->bytes[cols] = TSDB_APPNAME_LEN + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - strcpy(pSchema[cols].name, "app_name"); + strcpy(pSchema[cols].name, "program"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; @@ -626,7 +625,7 @@ static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg) { SCMKillConnMsg *pKill = pMsg->rpcMsg.pCont; int32_t connId = atoi(pKill->queryId); - SConnObj * pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); + SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t)); if (pConn == NULL) { mError("connId:%s, failed to kill, conn not exist", pKill->queryId); return TSDB_CODE_MND_INVALID_CONN_ID; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 14558485aa743a42bee016226dfc2e16a4f81d0c..8c61c61a10f6a5dd46c188acf32ef1242f3e7c50 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -281,7 +281,14 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { ((SSdbTable *)pOper->table)->tableName, pOper->pObj, sdbGetKeyStr(pOper->table, pHead->cont), pHead->version, action, tstrerror(pOper->retCode)); if (action == SDB_ACTION_INSERT) { - sdbDeleteHash(pOper->table, pOper); + // It's better to create a table in two stages, create it first and then set it success + //sdbDeleteHash(pOper->table, pOper); + SSdbOper oper = { + .type = SDB_OPER_GLOBAL, + .table = pOper->table, + .pObj = pOper->pObj + }; + sdbDeleteRow(&oper); } } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index e100fc127bf955e065dd0908d99f4a6dbe246f1b..82a062169a591d26d665437273183a4c5e1f27b4 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -303,7 +303,7 @@ static int32_t mnodeChildTableActionRestored() { SVgObj *pVgroup = mnodeGetVgroup(pTable->vgId); if (pVgroup == NULL) { - mError("ctable:%s, failed to get vgId:%d sid:%d, discard it", pTable->info.tableId, pTable->vgId, pTable->sid); + mError("ctable:%s, failed to get vgId:%d tid:%d, discard it", pTable->info.tableId, pTable->vgId, pTable->tid); pTable->vgId = 0; SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; sdbDeleteRow(&desc); @@ -314,7 +314,7 @@ static int32_t mnodeChildTableActionRestored() { if (strcmp(pVgroup->dbName, pDb->name) != 0) { mError("ctable:%s, db:%s not match with vgId:%d db:%s sid:%d, discard it", - pTable->info.tableId, pDb->name, pTable->vgId, pVgroup->dbName, pTable->sid); + pTable->info.tableId, pDb->name, pTable->vgId, pVgroup->dbName, pTable->tid); pTable->vgId = 0; SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb}; sdbDeleteRow(&desc); @@ -771,8 +771,8 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { return mnodeProcessDropSuperTableMsg(pMsg); } else { SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable; - mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, - pDrop->tableId, pCTable->vgId, pCTable->sid, pCTable->uid); + mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d tid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, + pDrop->tableId, pCTable->vgId, pCTable->tid, pCTable->uid); return mnodeProcessDropChildTableMsg(pMsg); } } @@ -1399,16 +1399,20 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, void mnodeDropAllSuperTables(SDbObj *pDropDb) { void * pIter= NULL; int32_t numOfTables = 0; - int32_t dbNameLen = strlen(pDropDb->name); SSuperTableObj *pTable = NULL; + char prefix[64] = {0}; + tstrncpy(prefix, pDropDb->name, 64); + strcat(prefix, TS_PATH_DELIMITER); + int32_t prefixLen = strlen(prefix); + mInfo("db:%s, all super tables will be dropped from sdb", pDropDb->name); while (1) { pIter = mnodeGetNextSuperTable(pIter, &pTable); if (pTable == NULL) break; - if (strncmp(pDropDb->name, pTable->info.tableId, dbNameLen) == 0) { + if (strncmp(prefix, pTable->info.tableId, prefixLen) == 0) { SSdbOper oper = { .type = SDB_OPER_LOCAL, .table = tsSuperTableSdb, @@ -1472,13 +1476,14 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { int32_t numOfTable = htonl(pInfo->numOfTables); // reserve space - int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo); + int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupMsg) + sizeof(SVgroupsMsg); for (int32_t i = 0; i < numOfTable; ++i) { char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN) * i; SSuperTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable != NULL && pTable->vgHash != NULL) { - contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo)); + contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupMsg) + sizeof(SVgroupsMsg)); } + mnodeDecTableRef(pTable); } @@ -1506,12 +1511,12 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { // even this super table has no corresponding table, still return pRsp->numOfTables++; - SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg; - pVgroupInfo->numOfVgroups = 0; + SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)msg; + pVgroupMsg->numOfVgroups = 0; - msg += sizeof(SVgroupsInfo); + msg += sizeof(SVgroupsMsg); } else { - SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg; + SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)msg; SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash); int32_t vgSize = 0; @@ -1520,15 +1525,17 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { SVgObj * pVgroup = mnodeGetVgroup(*pVgId); if (pVgroup == NULL) continue; - pVgroupInfo->vgroups[vgSize].vgId = htonl(pVgroup->vgId); + pVgroupMsg->vgroups[vgSize].vgId = htonl(pVgroup->vgId); + pVgroupMsg->vgroups[vgSize].numOfEps = 0; + for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) { SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode; if (pDnode == NULL) break; - tstrncpy(pVgroupInfo->vgroups[vgSize].epAddr[vn].fqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN); - pVgroupInfo->vgroups[vgSize].epAddr[vn].port = htons(pDnode->dnodePort); + tstrncpy(pVgroupMsg->vgroups[vgSize].epAddr[vn].fqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN); + pVgroupMsg->vgroups[vgSize].epAddr[vn].port = htons(pDnode->dnodePort); - pVgroupInfo->vgroups[vgSize].numOfEps++; + pVgroupMsg->vgroups[vgSize].numOfEps++; } vgSize++; @@ -1538,10 +1545,10 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { taosHashDestroyIter(pIter); mnodeDecTableRef(pTable); - pVgroupInfo->numOfVgroups = htonl(vgSize); + pVgroupMsg->numOfVgroups = htonl(vgSize); // one table is done, try the next table - msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo); + msg += sizeof(SVgroupsMsg) + vgSize * sizeof(SCMVgroupMsg); pRsp->numOfTables++; } } @@ -1591,7 +1598,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO pCreate->vgId = htonl(pTable->vgId); pCreate->tableType = pTable->info.type; pCreate->createdTime = htobe64(pTable->createdTime); - pCreate->sid = htonl(pTable->sid); + pCreate->tid = htonl(pTable->tid); pCreate->sqlDataLen = htonl(pTable->sqlLen); pCreate->uid = htobe64(pTable->uid); @@ -1640,7 +1647,7 @@ static int32_t mnodeDoCreateChildTableFp(SMnodeMsg *pMsg) { assert(pTable); mDebug("app:%p:%p, table:%s, created in mnode, vgId:%d sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid); + pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid); SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable); @@ -1682,7 +1689,7 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { return TSDB_CODE_MND_ACTION_IN_PROGRESS; } else { mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); + pTable->info.tableId, pTable->tid, pTable->uid, tstrerror(code)); SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb}; sdbDeleteRow(&desc); return code; @@ -1706,7 +1713,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { pTable->info.tableId = strdup(pCreate->tableId); pTable->createdTime = taosGetTimestampMs(); - pTable->sid = tid; + pTable->tid = tid; pTable->vgId = pVgroup->vgId; if (pTable->info.type == TSDB_CHILD_TABLE) { @@ -1720,7 +1727,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { } pTable->suid = pMsg->pSTable->uid; - pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) + + pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pTable->superTable = pMsg->pSTable; } else { @@ -1728,7 +1735,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { int64_t us = taosGetTimestampUs(); pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); } else { - pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->sid) & ((1ul << 24) - 1ul)) << 24) + + pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); } @@ -1785,7 +1792,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { tstrerror(code)); } else { mDebug("app:%p:%p, table:%s, allocated in vgroup, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pVgroup->vgId, pTable->sid, pTable->uid); + pTable->info.tableId, pVgroup->vgId, pTable->tid, pTable->uid); } return code; @@ -1803,8 +1810,8 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { if (pMsg->retry == 0) { if (pMsg->pTable == NULL) { SVgObj *pVgroup = NULL; - int32_t sid = 0; - code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &sid); + int32_t tid = 0; + code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &tid); if (code != TSDB_CODE_SUCCESS) { mDebug("app:%p:%p, table:%s, failed to get available vgroup, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, tstrerror(code)); @@ -1818,7 +1825,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { pMsg->pVgroup = pVgroup; mnodeIncVgroupRef(pVgroup); - return mnodeDoCreateChildTable(pMsg, sid); + return mnodeDoCreateChildTable(pMsg, tid); } } else { if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId); @@ -1848,13 +1855,13 @@ static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) { tstrncpy(pDrop->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); pDrop->vgId = htonl(pTable->vgId); pDrop->contLen = htonl(sizeof(SMDDropTableMsg)); - pDrop->sid = htonl(pTable->sid); + pDrop->tid = htonl(pTable->tid); pDrop->uid = htobe64(pTable->uid); SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup); mInfo("app:%p:%p, ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, - pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid); + pDrop->tableId, pTable->vgId, pTable->tid, pTable->uid); SRpcMsg rpcMsg = { .ahandle = pMsg, @@ -2093,7 +2100,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; pMeta->uid = htobe64(pTable->uid); - pMeta->sid = htonl(pTable->sid); + pMeta->tid = htonl(pTable->tid); pMeta->precision = pDb->cfg.precision; pMeta->tableType = pTable->info.type; tstrncpy(pMeta->tableId, pTable->info.tableId, TSDB_TABLE_FNAME_LEN); @@ -2133,7 +2140,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { pMeta->vgroup.vgId = htonl(pMsg->pVgroup->vgId); mDebug("app:%p:%p, table:%s, uid:%" PRIu64 " table meta is retrieved, vgId:%d sid:%d", pMsg->rpcMsg.ahandle, pMsg, - pTable->info.tableId, pTable->uid, pTable->vgId, pTable->sid); + pTable->info.tableId, pTable->uid, pTable->vgId, pTable->tid); return TSDB_CODE_SUCCESS; } @@ -2285,11 +2292,11 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) { } #if 0 -static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t sid) { +static SChildTableObj* mnodeGetTableByPos(int32_t vnode, int32_t tid) { SVgObj *pVgroup = mnodeGetVgroup(vnode); if (pVgroup == NULL) return NULL; - SChildTableObj *pTable = pVgroup->tableList[sid - 1]; + SChildTableObj *pTable = pVgroup->tableList[tid - 1]; mnodeIncTableRef((STableObj *)pTable); mnodeDecVgroupRef(pVgroup); @@ -2337,12 +2344,12 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) { assert(pTable); mInfo("app:%p:%p, table:%s, drop table rsp received, vgId:%d sid:%d uid:%" PRIu64 ", thandle:%p result:%s", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, + mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, mnodeMsg->rpcMsg.handle, tstrerror(rpcMsg->code)); if (rpcMsg->code != TSDB_CODE_SUCCESS) { mError("app:%p:%p, table:%s, failed to drop in dnode, vgId:%d sid:%d uid:%" PRIu64 ", reason:%s", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, + mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, tstrerror(rpcMsg->code)); dnodeSendRpcMnodeWriteRsp(mnodeMsg, rpcMsg->code); return; @@ -2380,7 +2387,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { // If the table is deleted by another thread during creation, stop creating and send drop msg to vnode if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) { mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64, - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid); + mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid); // if the vgroup is already dropped from hash, it can't be accquired by pTable->vgId // so the refCount of vgroup can not be decreased @@ -2415,13 +2422,13 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { if (mnodeMsg->retry++ < 10) { mDebug("app:%p:%p, table:%s, create table rsp received, need retry, times:%d vgId:%d sid:%d uid:%" PRIu64 " result:%s thandle:%p", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, mnodeMsg->retry, pTable->vgId, pTable->sid, + mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, mnodeMsg->retry, pTable->vgId, pTable->tid, pTable->uid, tstrerror(rpcMsg->code), mnodeMsg->rpcMsg.handle); dnodeDelayReprocessMnodeWriteMsg(mnodeMsg); } else { mError("app:%p:%p, table:%s, failed to create in dnode, vgId:%d sid:%d uid:%" PRIu64 ", result:%s thandle:%p", - mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid, + mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->tid, pTable->uid, tstrerror(rpcMsg->code), mnodeMsg->rpcMsg.handle); SSdbOper oper = {.type = SDB_OPER_GLOBAL, .table = tsChildTableSdb, .pObj = pTable}; @@ -2676,7 +2683,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows // tid pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - *(int32_t*) pWrite = pTable->sid; + *(int32_t*) pWrite = pTable->tid; cols++; //vgid diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 28e4d179202e2d54bef669bbc80cadcfe85d3d17..df703241c2612294fb5ac5f970f9ed68b0ac4e0e 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -663,13 +663,13 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p for (int32_t i = 0; i < pShow->maxReplica; ++i) { pShow->bytes[cols] = 2; pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; - snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "dnode%d", i + 1); + snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dDnode", i + 1); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; - snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dstatus", i + 1); + snprintf(pSchema[cols].name, TSDB_COL_NAME_LEN, "v%dStatus", i + 1); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; } @@ -793,12 +793,12 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { int32_t idPoolSize = taosIdPoolMaxSize(pVgroup->idPool); - if (pTable->sid > idPoolSize) { + if (pTable->tid > idPoolSize) { mnodeAllocVgroupIdPool(pVgroup); } - if (pTable->sid >= 1) { - taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid); + if (pTable->tid >= 1) { + taosIdPoolMarkStatus(pVgroup->idPool, pTable->tid); pVgroup->numOfTables++; // The create vgroup message may be received later than the create table message // and the writing order in sdb is therefore uncertain @@ -808,8 +808,8 @@ void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { } void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) { - if (pTable->sid >= 1) { - taosFreeId(pVgroup->idPool, pTable->sid); + if (pTable->tid >= 1) { + taosFreeId(pVgroup->idPool, pTable->tid); pVgroup->numOfTables--; // The create vgroup message may be received later than the create table message // and the writing order in sdb is therefore uncertain diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h index 81c70a58fdf01e405d2e665b5a017a4696d76e91..d718bef6da42f59c5ee4ea7bfaad098b47062984 100644 --- a/src/os/inc/osDef.h +++ b/src/os/inc/osDef.h @@ -27,7 +27,7 @@ extern "C" { #define FD_VALID(x) ((x) > STDERR_FILENO) #define FD_INITIALIZER ((int32_t)-1) -#define WCHAR wchar_t +// #define WCHAR wchar_t #define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b))) #define POINTER_DISTANCE(p1, p2) ((char *)(p1) - (char *)(p2)) diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 36a022159f4e8b58ca4179e9ae73d5ad7dd793d2..affc0e838ea97f25dcb2e53947ecbb7b025dffb1 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -122,9 +122,9 @@ typedef struct { } HttpDecodeMethod; typedef struct { - void (*startJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, void *result); + void (*startJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result); void (*stopJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd); - bool (*buildQueryJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, void *result, int numOfRows); + bool (*buildQueryJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows); void (*buildAffectRowJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, int affectRows); void (*initJsonFp)(struct HttpContext *pContext); void (*cleanJsonFp)(struct HttpContext *pContext); @@ -148,7 +148,7 @@ typedef struct HttpContext { char ipstr[22]; char user[TSDB_USER_LEN]; // parsed from auth token or login message char pass[TSDB_PASSWORD_LEN]; - void * taos; + TAOS * taos; void * ppContext; HttpSession *session; z_stream gzipStream; diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index 80e4ae3bc2b51387d5573af00aa1e91d2c1cd663..2c9eca11deb51acc8cc25965b83cc686ac04c6e5 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -217,7 +217,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - httpJsonStringForTransMean(jsonBuf, row[i], fields[i].bytes); + httpJsonStringForTransMean(jsonBuf, (char*)row[i], fields[i].bytes); break; case TSDB_DATA_TYPE_TIMESTAMP: if (precision == TSDB_TIME_PRECISION_MILLI) { //ms diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index 954678c24c8fb4597ef3184d2d65060c6d159aa1..a5b156bffc23ea3cdb042f456a327895fc0bdaa6 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -131,7 +131,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - httpJsonStringForTransMean(jsonBuf, row[i], length[i]); + httpJsonStringForTransMean(jsonBuf, (char*)row[i], length[i]); break; case TSDB_DATA_TYPE_TIMESTAMP: if (timestampFormat == REST_TIMESTAMP_FMT_LOCAL_STRING) { @@ -195,4 +195,4 @@ void restStopSqlJson(HttpContext *pContext, HttpSqlCmd *cmd) { httpJsonToken(jsonBuf, JsonObjEnd); httpWriteJsonBufEnd(jsonBuf); -} \ No newline at end of file +} diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 883fa574ff19958591051512e717a9b512f3e0ce..70d644146cf5ebe15a42c8f00a2e6e4bd603d081 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -27,8 +27,6 @@ #include "httpSession.h" #include "httpQueue.h" -void *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos); void httpProcessMultiSql(HttpContext *pContext); void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 33b724e4348a9c50997ab3c61f7cb5607684c82a..b474bea98717034c68ca4b3beb5a3a61288a064b 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -33,15 +33,11 @@ struct SColumnFilterElem; typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2); typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order); -typedef struct SPosInfo { - int32_t pageId:20; - int32_t rowId:12; -} SPosInfo; - typedef struct SGroupResInfo { int32_t groupId; int32_t numOfDataPages; - SPosInfo pos; + int32_t pageId; + int32_t rowId; } SGroupResInfo; typedef struct SSqlGroupbyExpr { @@ -53,9 +49,10 @@ typedef struct SSqlGroupbyExpr { } SSqlGroupbyExpr; typedef struct SWindowResult { - SPosInfo pos; // Position of current result in disk-based output buffer + int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer + int32_t rowId:15; + bool closed:1; // this result status: closed or opened uint16_t numOfRows; // number of rows of current time window - bool closed; // this result status: closed or opened SResultInfo* resultInfo; // For each result column, there is a resultInfo union {STimeWindow win; char* key;}; // start key of current time window } SWindowResult; @@ -213,6 +210,7 @@ typedef struct SQInfo { void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables; pthread_mutex_t lock; // used to synchronize the rsp/query threads + tsem_t ready; int32_t dataReady; // denote if query result is ready or not void* rspContext; // response context } SQInfo; diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 0ab3644cd5fa2528c1739e76a3b0e60656d64550..084f2247d011a2add9e671c1b9ad734cf8fda1cb 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -285,8 +285,6 @@ SDelSQL *tSetDelSQLElems(tVariantList *pFrom, tSQLExpr *pWhere); SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); -tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList); - void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); diff --git a/src/query/inc/qTsbuf.h b/src/query/inc/qTsbuf.h index 46e6f79014f32c9b3052824bdb702556f5c4f060..6c2a955f47577a0da19f6520b2addc44e66b73ba 100644 --- a/src/query/inc/qTsbuf.h +++ b/src/query/inc/qTsbuf.h @@ -35,16 +35,9 @@ typedef struct STSList { int32_t len; } STSList; -typedef struct STSRawBlock { - int32_t vnode; - int64_t tag; - TSKEY* ts; - int32_t len; -} STSRawBlock; - typedef struct STSElem { TSKEY ts; - tVariant tag; + tVariant* tag; int32_t vnode; } STSElem; @@ -84,6 +77,7 @@ typedef struct STSBuf { char path[PATH_MAX]; uint32_t fileSize; + // todo use array STSVnodeBlockInfoEx* pData; uint32_t numOfAlloc; uint32_t numOfVnodes; @@ -106,12 +100,12 @@ typedef struct STSBufFileHeader { STSBuf* tsBufCreate(bool autoDelete, int32_t order); STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete); -STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder); +STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder, int32_t vnodeId); void* tsBufDestroy(STSBuf* pTSBuf); void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag, const char* pData, int32_t len); -int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx); +int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf); STSBuf* tsBufClone(STSBuf* pTSBuf); @@ -121,6 +115,7 @@ void tsBufFlush(STSBuf* pTSBuf); void tsBufResetPos(STSBuf* pTSBuf); STSElem tsBufGetElem(STSBuf* pTSBuf); + bool tsBufNextPos(STSBuf* pTSBuf); STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag); @@ -136,6 +131,10 @@ void tsBufSetCursor(STSBuf* pTSBuf, STSCursor* pCur); */ void tsBufDisplay(STSBuf* pTSBuf); +int32_t tsBufGetNumOfVnodes(STSBuf* pTSBuf); + +void tsBufGetVnodeIdList(STSBuf* pTSBuf, int32_t* num, int32_t** vnodeId); + #ifdef __cplusplus } #endif diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 5320e5622e70a93c06edb4a1e5a3fe568498ef21..32f26f66f5a46f98477db7e67c7a3a6d988fdfc8 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -51,7 +51,7 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3 SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); + int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage + pQuery->pSelectExpr[columnIndex].bytes * realRowId; } diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index 63411aaf3f9085d3912b923ac419cd1552239583..893105e44ac4eb82843514cda20928d3e0dcdaf9 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -646,9 +646,7 @@ static bool filterItem(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *p } // handle the leaf node - assert(pLeft->nodeType == TSQL_NODE_COL && pRight->nodeType == TSQL_NODE_VALUE); param->setupInfoFn(pExpr, param->pExtInfo); - return param->nodeFilterFn(pItem, pExpr->_node.info); } @@ -769,6 +767,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S assert(taosArrayGetSize(result) == 0); tSQLBinaryTraverseOnSkipList(pExpr, result, pSkipList, param); } + return; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f742616b050887f1728d8ea3e7baf51bcdce9a95..dd8f83a6431b12ed2a48cb0eade4937634ca4c46 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -184,7 +184,7 @@ static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInf static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex); + SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId); static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo); @@ -194,6 +194,8 @@ static void buildTagQueryResult(SQInfo *pQInfo); static int32_t setAdditionalInfo(SQInfo *pQInfo, void *pTable, STableQueryInfo *pTableQueryInfo); static int32_t flushFromResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo); +static int32_t checkForQueryBuf(size_t numOfTables); +static void releaseQueryBuf(size_t numOfTables); bool doFilterData(SQuery *pQuery, int32_t elemPos) { for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { @@ -555,7 +557,7 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResultBuf *pResultBuf, int32_t sid, int32_t numOfRowsPerPage) { - if (pWindowRes->pos.pageId != -1) { + if (pWindowRes->pageId != -1) { return 0; } @@ -588,11 +590,11 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult } // set the number of rows in current disk page - if (pWindowRes->pos.pageId == -1) { // not allocated yet, allocate new buffer - pWindowRes->pos.pageId = pageId; - pWindowRes->pos.rowId = (int32_t)(pData->num++); + if (pWindowRes->pageId == -1) { // not allocated yet, allocate new buffer + pWindowRes->pageId = pageId; + pWindowRes->rowId = (int32_t)(pData->num++); - assert(pWindowRes->pos.pageId >= 0); + assert(pWindowRes->pageId >= 0); } return 0; @@ -614,7 +616,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes *newWind = true; // not assign result buffer yet, add new result buffer - if (pWindowRes->pos.pageId == -1) { + if (pWindowRes->pageId == -1) { int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage); if (ret != TSDB_CODE_SUCCESS) { return -1; @@ -1005,9 +1007,10 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } + SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k, pQInfo->vgId); } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -1140,7 +1143,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat assert(pRuntimeEnv->windowResInfo.interval == 0); - if (pWindowRes->pos.pageId == -1) { + if (pWindowRes->pageId == -1) { int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage); if (ret != 0) { return -1; @@ -1200,7 +1203,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; // compare tag first - if (tVariantCompare(&pCtx[0].tag, &elem.tag) != 0) { + if (tVariantCompare(&pCtx[0].tag, elem.tag) != 0) { return TS_JOIN_TAG_NOT_EQUALS; } @@ -1286,9 +1289,10 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS groupbyColumnData = getGroupbyColumnData(pQuery, &type, &bytes, pDataBlock); } + SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &sasArray[k], k, pQInfo->vgId); } // set the input column data @@ -1303,7 +1307,6 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // from top to bottom in desc // from bottom to top in asc order if (pRuntimeEnv->pTSBuf != NULL) { - SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pRuntimeEnv); qDebug("QInfo:%p process data rows, numOfRows:%d, query order:%d, ts comp order:%d", pQInfo, pDataBlockInfo->rows, pQuery->order.order, pRuntimeEnv->pTSBuf->cur.order); } @@ -1409,6 +1412,10 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS item->lastKey = (QUERY_IS_ASC_QUERY(pQuery)? pDataBlockInfo->window.ekey:pDataBlockInfo->window.skey) + step; } + if (pRuntimeEnv->pTSBuf != NULL) { + item->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + } + // todo refactor: extract method for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { if (pQuery->pSelectExpr[i].base.functionId != TSDB_FUNC_ARITHM) { @@ -1469,7 +1476,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex) { + SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId) { int32_t functionId = pQuery->pSelectExpr[colIndex].base.functionId; int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId; @@ -1542,6 +1549,9 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY } } } + } else if (functionId == TSDB_FUNC_TS_COMP) { + pCtx->param[0].i64Key = vgId; + pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; } #if defined(_DEBUG_VIEW) @@ -2621,12 +2631,19 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { assert(pFuncMsg->numOfParams == 1); - int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; - SColumnInfo* pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); + int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; + SColumnInfo *pColInfo = doGetTagColumnInfoById(pQuery->tagColList, pQuery->numOfTags, tagColId); doSetTagValueInParam(tsdb, pTable, tagColId, &pRuntimeEnv->pCtx[0].tag, pColInfo->type, pColInfo->bytes); - qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%"PRId64, pQInfo, pExprInfo->base.arg->argValue.i64, - pRuntimeEnv->pCtx[0].tag.i64Key) + + int16_t tagType = pRuntimeEnv->pCtx[0].tag.nType; + if (tagType == TSDB_DATA_TYPE_BINARY || tagType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%s", pQInfo, + pExprInfo->base.arg->argValue.i64, pRuntimeEnv->pCtx[0].tag.pz); + } else { + qDebug("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%" PRId64, pQInfo, + pExprInfo->base.arg->argValue.i64, pRuntimeEnv->pCtx[0].tag.i64Key); + } } } } @@ -2635,7 +2652,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SWindowRes SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t functionId = pQuery->pSelectExpr[i].base.functionId; @@ -2806,14 +2823,14 @@ int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) SWindowResInfo *pWindowResInfo1 = &supporter->pTableQueryInfo[left]->windowResInfo; SWindowResult * pWindowRes1 = getWindowResult(pWindowResInfo1, leftPos); - tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes1->pos.pageId); + tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes1->pageId); char *b1 = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes1, page1); TSKEY leftTimestamp = GET_INT64_VAL(b1); SWindowResInfo *pWindowResInfo2 = &supporter->pTableQueryInfo[right]->windowResInfo; SWindowResult * pWindowRes2 = getWindowResult(pWindowResInfo2, rightPos); - tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes2->pos.pageId); + tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes2->pageId); char *b2 = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes2, page2); TSKEY rightTimestamp = GET_INT64_VAL(b2); @@ -2850,7 +2867,7 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) { } SGroupResInfo* info = &pQInfo->groupResInfo; - if (pQInfo->groupIndex == numOfGroups && info->pos.pageId == info->numOfDataPages) { + if (pQInfo->groupIndex == numOfGroups && info->pageId == info->numOfDataPages) { SET_STABLE_QUERY_OVER(pQInfo); } @@ -2866,10 +2883,10 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; // all results have been return to client, try next group - if (pGroupResInfo->pos.pageId == pGroupResInfo->numOfDataPages) { + if (pGroupResInfo->pageId == pGroupResInfo->numOfDataPages) { pGroupResInfo->numOfDataPages = 0; - pGroupResInfo->pos.pageId = 0; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId = 0; + pGroupResInfo->rowId = 0; // current results of group has been sent to client, try next group if (mergeIntoGroupResult(pQInfo) != TSDB_CODE_SUCCESS) { @@ -2897,22 +2914,22 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { assert(size == pGroupResInfo->numOfDataPages); bool done = false; - for (int32_t j = pGroupResInfo->pos.pageId; j < size; ++j) { + for (int32_t j = pGroupResInfo->pageId; j < size; ++j) { SPageInfo* pi = *(SPageInfo**) taosArrayGet(list, j); tFilePage* pData = getResBufPage(pResultBuf, pi->pageId); - assert(pData->num > 0 && pData->num <= pRuntimeEnv->numOfRowsPerPage && pGroupResInfo->pos.rowId < pData->num); - int32_t numOfRes = (int32_t)(pData->num - pGroupResInfo->pos.rowId); + assert(pData->num > 0 && pData->num <= pRuntimeEnv->numOfRowsPerPage && pGroupResInfo->rowId < pData->num); + int32_t numOfRes = (int32_t)(pData->num - pGroupResInfo->rowId); if (numOfRes > pQuery->rec.capacity - offset) { numOfCopiedRows = (int32_t)(pQuery->rec.capacity - offset); - pGroupResInfo->pos.rowId += numOfCopiedRows; + pGroupResInfo->rowId += numOfCopiedRows; done = true; } else { numOfCopiedRows = (int32_t)pData->num; - pGroupResInfo->pos.pageId += 1; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId += 1; + pGroupResInfo->rowId = 0; } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { @@ -3003,8 +3020,8 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { pGroupResInfo->numOfDataPages = (int32_t)taosArrayGetSize(pageList); pGroupResInfo->groupId = tid; - pGroupResInfo->pos.pageId = 0; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->pageId = 0; + pGroupResInfo->rowId = 0; return pGroupResInfo->numOfDataPages; } @@ -3050,7 +3067,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { SWindowResInfo *pWindowResInfo = &pTableList[pos]->windowResInfo; SWindowResult *pWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); char *b = getPosInResultPage(pRuntimeEnv, PRIMARYKEY_TIMESTAMP_COL_INDEX, pWindowRes, page); TSKEY ts = GET_INT64_VAL(b); @@ -3087,7 +3104,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { lastTimestamp = ts; // move to the next element of current entry - int32_t currentPageId = pWindowRes->pos.pageId; + int32_t currentPageId = pWindowRes->pageId; cs.position[pos] += 1; if (cs.position[pos] >= pWindowResInfo->size) { @@ -3100,7 +3117,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { } else { // current page is not needed anymore SWindowResult *pNextWindowRes = getWindowResult(pWindowResInfo, cs.position[pos]); - if (pNextWindowRes->pos.pageId != currentPageId) { + if (pNextWindowRes->pageId != currentPageId) { releaseResBufPage(pRuntimeEnv->pResultBuf, page); } } @@ -3312,7 +3329,8 @@ int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool is return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pResultRow->pos = (SPosInfo) {-1, -1}; + pResultRow->pageId = -1; + pResultRow->rowId = -1; char* buf = (char*) pResultRow->resultInfo + numOfCols * sizeof(SResultInfo); @@ -3779,7 +3797,7 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { * not assign result buffer yet, add new result buffer * all group belong to one result set, and each group result has different group id so set the id to be one */ - if (pWindowRes->pos.pageId == -1) { + if (pWindowRes->pageId == -1) { if (addNewWindowResultBuf(pWindowRes, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->numOfRowsPerPage) != TSDB_CODE_SUCCESS) { return; @@ -3796,7 +3814,7 @@ void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pResult SQuery *pQuery = pRuntimeEnv->pQuery; // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; @@ -3823,7 +3841,7 @@ void setWindowResOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult * SQuery *pQuery = pRuntimeEnv->pQuery; // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group - tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId); + tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; @@ -3860,14 +3878,40 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQ // both the master and supplement scan needs to set the correct ts comp start position if (pRuntimeEnv->pTSBuf != NULL) { + tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; + if (pTableQueryInfo->cur.vgroupIndex == -1) { - tVariantAssign(&pTableQueryInfo->tag, &pRuntimeEnv->pCtx[0].tag); - tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, 0, &pTableQueryInfo->tag); + tVariantAssign(&pTableQueryInfo->tag, pTag); + + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, &pTableQueryInfo->tag); + + // failed to find data with the specified tag value and vnodeId + if (elem.vnode < 0) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pQInfo, pTag->i64Key); + } + + return false; + } // keep the cursor info of current meter - pTableQueryInfo->cur = pRuntimeEnv->pTSBuf->cur; + pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } + } else { tsBufSetCursor(pRuntimeEnv->pTSBuf, &pTableQueryInfo->cur); + + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + } } } @@ -3976,12 +4020,12 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_ for (int32_t i = startIdx; (i < totalSet) && (i >= 0); i += step) { if (result[i].numOfRows == 0) { pQInfo->groupIndex += 1; - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->rowId = 0; continue; } - int32_t numOfRowsToCopy = result[i].numOfRows - pGroupResInfo->pos.rowId; - int32_t oldOffset = pGroupResInfo->pos.rowId; + int32_t numOfRowsToCopy = result[i].numOfRows - pGroupResInfo->rowId; + int32_t oldOffset = pGroupResInfo->rowId; /* * current output space is not enough to accommodate all data of this page, only partial results @@ -3989,13 +4033,13 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SWindowResInfo *pResultInfo, int32_ */ if (numOfRowsToCopy > pQuery->rec.capacity - numOfResult) { numOfRowsToCopy = (int32_t) pQuery->rec.capacity - numOfResult; - pGroupResInfo->pos.rowId += numOfRowsToCopy; + pGroupResInfo->rowId += numOfRowsToCopy; } else { - pGroupResInfo->pos.rowId = 0; + pGroupResInfo->rowId = 0; pQInfo->groupIndex += 1; } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, result[i].pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, result[i].pageId); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t size = pRuntimeEnv->pCtx[j].outputBytes; @@ -4763,15 +4807,62 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { } if (pRuntimeEnv->pTSBuf != NULL) { - if (pRuntimeEnv->cur.vgroupIndex == -1) { - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, 0, &pRuntimeEnv->pCtx[0].tag); + tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; - // failed to find data with the specified tag value + if (pRuntimeEnv->cur.vgroupIndex == -1) { + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + // failed to find data with the specified tag value and vnodeId if (elem.vnode < 0) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%"PRId64" in ts_comp", pQInfo, pTag->i64Key); + } + return false; + } else { + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, + cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, + cur.blockIndex, cur.tsIndex); + } } } else { - tsBufSetCursor(pRuntimeEnv->pTSBuf, &pRuntimeEnv->cur); + STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); + if (tVariantCompare(elem.tag, &pRuntimeEnv->pCtx[0].tag) != 0) { + + STSElem elem1 = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + // failed to find data with the specified tag value and vnodeId + if (elem1.vnode < 0) { + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qError("QInfo:%p failed to find tag:%s in ts_comp", pQInfo, pTag->pz); + } else { + qError("QInfo:%p failed to find tag:%"PRId64" in ts_comp", pQInfo, pTag->i64Key); + } + + return false; + } else { + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, cur.blockIndex, cur.tsIndex); + } + } + + } else { + tsBufSetCursor(pRuntimeEnv->pTSBuf, &pRuntimeEnv->cur); + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { + qDebug("QInfo:%p continue scan ts_comp file, tag:%s blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); + } else { + qDebug("QInfo:%p continue scan ts_comp file, tag:%"PRId64" blockIndex:%d, tsIndex:%d", pQInfo, pTag->i64Key, cur.blockIndex, cur.tsIndex); + } + } } } @@ -5027,6 +5118,10 @@ static void sequentialTableProcess(SQInfo *pQInfo) { break; } + if (pRuntimeEnv->pTSBuf != NULL) { + pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; + } + } else { // all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter if (pQuery->rec.rows == 0) { @@ -6227,7 +6322,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou // NOTE: pTableCheckInfo need to update the query time range and the lastKey info pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo)); pQInfo->dataReady = QUERY_RESULT_NOT_READY; + pQInfo->rspContext = NULL; pthread_mutex_init(&pQInfo->lock, NULL); + tsem_init(&pQInfo->ready, 0, 0); pQuery->pos = -1; pQuery->window = pQueryMsg->window; @@ -6318,7 +6415,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ STSBuf *pTSBuf = NULL; if (pQueryMsg->tsLen > 0) { // open new file to save the result char *tsBlock = (char *) pQueryMsg + pQueryMsg->tsOffset; - pTSBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder); + pTSBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); tsBufResetPos(pTSBuf); bool ret = tsBufNextPos(pTSBuf); @@ -6400,6 +6497,8 @@ static void freeQInfo(SQInfo *pQInfo) { qDebug("QInfo:%p start to free QInfo", pQInfo); + releaseQueryBuf(pQInfo->tableqinfoGroupInfo.numOfTables); + teardownQueryRuntimeEnv(&pQInfo->runtimeEnv); SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -6634,6 +6733,11 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi assert(0); } + code = checkForQueryBuf(tableGroupInfo.numOfTables); + if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort + goto _over; + } + (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery); pExprs = NULL; pGroupbyExpr = NULL; @@ -6692,12 +6796,14 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pQInfo->dataReady = QUERY_RESULT_READY; buildRes = (pQInfo->rspContext != NULL); - pthread_mutex_unlock(&pQInfo->lock); - - // clear qhandle owner + // clear qhandle owner, it must be in the secure area. other thread may run ahead before current, after it is + // put into task to be executed. assert(pQInfo->owner == taosGetPthreadId()); pQInfo->owner = 0; + pthread_mutex_unlock(&pQInfo->lock); + + tsem_post(&pQInfo->ready); return buildRes; } @@ -6761,18 +6867,24 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex SQInfo *pQInfo = (SQInfo *)qinfo; if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + qError("QInfo:%p invalid qhandle", pQInfo); return TSDB_CODE_QRY_INVALID_QHANDLE; } *buildRes = false; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; if (IS_QUERY_KILLED(pQInfo)) { - qDebug("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); + qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); return pQInfo->code; } int32_t code = TSDB_CODE_SUCCESS; + +#if 0 + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + pthread_mutex_lock(&pQInfo->lock); + assert(pQInfo->rspContext == NULL); + if (pQInfo->dataReady == QUERY_RESULT_READY) { *buildRes = true; qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%"PRId64", code:%d", pQInfo, pQuery->rowSize, pQuery->rec.rows, @@ -6781,10 +6893,17 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex *buildRes = false; qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); pQInfo->rspContext = pRspContext; + assert(pQInfo->rspContext != NULL); } code = pQInfo->code; pthread_mutex_unlock(&pQInfo->lock); +#else + tsem_wait(&pQInfo->ready); + *buildRes = true; + code = pQInfo->code; +#endif + return code; } @@ -7020,6 +7139,48 @@ static void buildTagQueryResult(SQInfo* pQInfo) { setQueryStatus(pQuery, QUERY_COMPLETED); } +static int64_t getQuerySupportBufSize(size_t numOfTables) { + size_t s1 = sizeof(STableQueryInfo); + size_t s2 = sizeof(SHashNode); + +// size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb + return (int64_t)((s1 + s2) * 1.5 * numOfTables); +} + +int32_t checkForQueryBuf(size_t numOfTables) { + int64_t t = getQuerySupportBufSize(numOfTables); + if (tsQueryBufferSize < 0) { + return TSDB_CODE_SUCCESS; + } else if (tsQueryBufferSize > 0) { + + while(1) { + int64_t s = tsQueryBufferSize; + int64_t remain = s - t; + if (remain >= 0) { + if (atomic_val_compare_exchange_64(&tsQueryBufferSize, s, remain) == s) { + return TSDB_CODE_SUCCESS; + } + } else { + return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER; + } + } + } + + // disable query processing if the value of tsQueryBufferSize is zero. + return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER; +} + +void releaseQueryBuf(size_t numOfTables) { + if (tsQueryBufferSize <= 0) { + return; + } + + int64_t t = getQuerySupportBufSize(numOfTables); + + // restore value is not enough buffer available + atomic_add_fetch_64(&tsQueryBufferSize, t); +} + void* qGetResultRetrieveMsg(qinfo_t qinfo) { SQInfo* pQInfo = (SQInfo*) qinfo; assert(pQInfo != NULL); @@ -7101,6 +7262,7 @@ void qCleanupQueryMgmt(void* pQMgmt) { void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { if (pMgmt == NULL) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } @@ -7109,6 +7271,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } @@ -7116,6 +7279,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { if (pQueryMgmt->closed) { // pthread_mutex_unlock(&pQueryMgmt->lock); qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } else { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; @@ -7129,13 +7293,20 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { void** qAcquireQInfo(void* pMgmt, uint64_t _key) { SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) { + if (pQueryMgmt->closed) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + if (pQueryMgmt->qinfoPool == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; return NULL; } TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); if (handle == NULL || *handle == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; return NULL; } else { return handle; diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index fc9c60b39b0cfa4b591cc77c1efcdac4e6647ce9..17be294531e51982924d84700fc21653ad231224 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -344,8 +344,6 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t return 0; } - assert(colIdx == 0); - if (tsOrder == TSDB_ORDER_DESC) { // primary column desc order return (f1 < f2) ? 1 : -1; } else { // asc diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 18b81beb56f0dffbf1f99d416c23f8e4f36e9bc9..36fd057eea754fadb4b2052e8708684fe554fcdf 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -130,13 +130,15 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) { tVariantCreate(&pSQLExpr->val, pToken); pSQLExpr->nSQLOptr = optrType; } else if (optrType == TK_NOW) { - // default use microsecond + // use microsecond by default pSQLExpr->val.i64Key = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO); pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT; pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond } else if (optrType == TK_VARIABLE) { int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key); - UNUSED(ret); + if (ret != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; + } pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT; pSQLExpr->nSQLOptr = TK_TIMESTAMP; @@ -148,6 +150,7 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) { pSQLExpr->nSQLOptr = optrType; } + return pSQLExpr; } @@ -532,26 +535,6 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, return pQuery; } -tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList) { - if (pList == NULL) pList = calloc(1, sizeof(tSQLExprListList)); - - if (pList->nAlloc <= pList->nList) { // - pList->nAlloc = (pList->nAlloc << 1) + 4; - pList->a = realloc(pList->a, pList->nAlloc * sizeof(pList->a[0])); - if (pList->a == 0) { - pList->nList = pList->nAlloc = 0; - return pList; - } - } - assert(pList->a != 0); - - if (pExprList) { - pList->a[pList->nList++] = pExprList; - } - - return pList; -} - void doDestroyQuerySql(SQuerySQL *pQuerySql) { if (pQuerySql == NULL) { return; diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index 3a8be781d5c9e7cfcb524e10b2d937a73ecb74ca..ab9ffb7bcb458129b7f170e7020cee904d2dfda6 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -365,7 +365,7 @@ void tMemBucketDestroy(tMemBucket *pBucket) { taosTFree(pBucket); } -void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { +void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataType) { switch (dataType) { case TSDB_DATA_TYPE_INT: { int32_t val = *(int32_t *)data; diff --git a/src/query/src/qSyntaxtreefunction.c b/src/query/src/qSyntaxtreefunction.c index 2104edfd910bba1a1701800387545c4f58dfb625..7f7fca2c1e0ca6ce85f0df1c1b6e6dce82d8c000 100644 --- a/src/query/src/qSyntaxtreefunction.c +++ b/src/query/src/qSyntaxtreefunction.c @@ -1247,7 +1247,10 @@ _bi_consumer_fn_t tGetBiConsumerFn(int32_t leftType, int32_t rightType, int32_t case TSDB_BINARY_OP_REMAINDER: return rem_function_arraylist[leftType][rightType]; default: + assert(0); return NULL; } + + assert(0); return NULL; } diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index b264f6cdc9d815a12cc5a3ab0e5c09c0d670bcdb..ad29cef5c290f4a6bcd45f8a79828b0c14727dc0 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -403,7 +403,7 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, tVariant* tag, const char* pDa } else { expandBuffer(ptsData, len); } - + tVariantAssign(&pTSBuf->block.tag, tag); memcpy(ptsData->rawBuf + ptsData->len, pData, (size_t)len); @@ -561,6 +561,19 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex pCur->tsIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : pBlock->numOfElem - 1; } +static int32_t doUpdateVnodeInfo(STSBuf* pTSBuf, int64_t offset, STSVnodeBlockInfo* pVInfo) { + if (offset < 0 || offset >= getDataStartOffset()) { + return -1; + } + + if (fseek(pTSBuf->f, (int32_t)offset, SEEK_SET) != 0) { + return -1; + } + + fwrite(pVInfo, sizeof(STSVnodeBlockInfo), 1, pTSBuf->f); + return 0; +} + STSVnodeBlockInfo* tsBufGetVnodeBlockInfo(STSBuf* pTSBuf, int32_t vnodeId) { int32_t j = tsBufFindVnodeIndexFromId(pTSBuf->pData, pTSBuf->numOfVnodes, vnodeId); if (j == -1) { @@ -649,7 +662,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) { return false; } - int32_t blockIndex = pCur->order == TSDB_ORDER_ASC ? 0 : pBlockInfo->numOfBlocks - 1; + int32_t blockIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : (pBlockInfo->numOfBlocks - 1); tsBufGetBlock(pTSBuf, pCur->vgroupIndex + step, blockIndex); break; @@ -675,8 +688,7 @@ void tsBufResetPos(STSBuf* pTSBuf) { } STSElem tsBufGetElem(STSBuf* pTSBuf) { - STSElem elem1 = {.vnode = -1}; - + STSElem elem1 = {.vnode = -1}; if (pTSBuf == NULL) { return elem1; } @@ -690,7 +702,7 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { elem1.vnode = pTSBuf->pData[pCur->vgroupIndex].info.vnode; elem1.ts = *(TSKEY*)(pTSBuf->tsData.rawBuf + pCur->tsIndex * TSDB_KEYSIZE); - tVariantAssign(&elem1.tag, &pBlock->tag); + elem1.tag = &pBlock->tag; return elem1; } @@ -702,7 +714,7 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { * @param vnodeId * @return */ -int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { +int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { if (pDestBuf == NULL || pSrcBuf == NULL || pSrcBuf->numOfVnodes <= 0) { return 0; } @@ -712,14 +724,13 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { } // src can only have one vnode index - if (pSrcBuf->numOfVnodes > 1) { - return -1; - } - + assert(pSrcBuf->numOfVnodes == 1); + // there are data in buffer, flush to disk first tsBufFlush(pDestBuf); // compared with the last vnode id + int32_t vnodeId = tsBufGetLastVnodeInfo((STSBuf*) pSrcBuf)->info.vnode; if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) { int32_t oldSize = pDestBuf->numOfVnodes; int32_t newSize = oldSize + pSrcBuf->numOfVnodes; @@ -791,14 +802,14 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { return 0; } -STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t order) { +STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t order, int32_t vnodeId) { STSBuf* pTSBuf = tsBufCreate(true, order); STSVnodeBlockInfo* pBlockInfo = &(addOneVnodeInfo(pTSBuf, 0)->info); pBlockInfo->numOfBlocks = numOfBlocks; pBlockInfo->compLen = len; pBlockInfo->offset = getDataStartOffset(); - pBlockInfo->vnode = 0; + pBlockInfo->vnode = vnodeId; // update prev vnode length info in file TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); @@ -902,8 +913,8 @@ void tsBufDisplay(STSBuf* pTSBuf) { while (tsBufNextPos(pTSBuf)) { STSElem elem = tsBufGetElem(pTSBuf); - if (elem.tag.nType == TSDB_DATA_TYPE_BIGINT) { - printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + if (elem.tag->nType == TSDB_DATA_TYPE_BIGINT) { + printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, elem.tag->i64Key, elem.ts); } } @@ -915,19 +926,6 @@ static int32_t getDataStartOffset() { return sizeof(STSBufFileHeader) + TS_COMP_FILE_VNODE_MAX * sizeof(STSVnodeBlockInfo); } -static int32_t doUpdateVnodeInfo(STSBuf* pTSBuf, int64_t offset, STSVnodeBlockInfo* pVInfo) { - if (offset < 0 || offset >= getDataStartOffset()) { - return -1; - } - - if (fseek(pTSBuf->f, (int32_t)offset, SEEK_SET) != 0) { - return -1; - } - - fwrite(pVInfo, sizeof(STSVnodeBlockInfo), 1, pTSBuf->f); - return 0; -} - // update prev vnode length info in file static void TSBufUpdateVnodeInfo(STSBuf* pTSBuf, int32_t index, STSVnodeBlockInfo* pBlockInfo) { int32_t offset = sizeof(STSBufFileHeader) + index * sizeof(STSVnodeBlockInfo); @@ -969,3 +967,29 @@ static STSBuf* allocResForTSBuf(STSBuf* pTSBuf) { pTSBuf->fileSize += getDataStartOffset(); return pTSBuf; } + +int32_t tsBufGetNumOfVnodes(STSBuf* pTSBuf) { + if (pTSBuf == NULL) { + return 0; + } + + return pTSBuf->numOfVnodes; +} + +void tsBufGetVnodeIdList(STSBuf* pTSBuf, int32_t* num, int32_t** vnodeId) { + int32_t size = tsBufGetNumOfVnodes(pTSBuf); + if (num != NULL) { + *num = size; + } + + *vnodeId = NULL; + if (size == 0) { + return; + } + + (*vnodeId) = malloc(tsBufGetNumOfVnodes(pTSBuf) * sizeof(int32_t)); + + for(int32_t i = 0; i < size; ++i) { + (*vnodeId)[i] = pTSBuf->pData[i].info.vnode; + } +} \ No newline at end of file diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 2bd92c74a456c16fe946288968d25d797907a390..ac95afffb157847269122bf14f65a3308ef5d9a2 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -266,7 +266,7 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow return; } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pos.pageId); + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) { SResultInfo *pResultInfo = &pWindowRes->resultInfo[i]; @@ -279,7 +279,8 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow } pWindowRes->numOfRows = 0; - pWindowRes->pos = (SPosInfo){-1, -1}; + pWindowRes->pageId = -1; + pWindowRes->rowId = -1; pWindowRes->closed = false; pWindowRes->win = TSWINDOW_INITIALIZER; } @@ -308,10 +309,10 @@ void copyTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *dst, con memcpy(pDst->interResultBuf, pSrc->interResultBuf, pDst->bufLen); // copy the output buffer data from src to dst, the position info keep unchanged - tFilePage *dstpage = getResBufPage(pRuntimeEnv->pResultBuf, dst->pos.pageId); + tFilePage *dstpage = getResBufPage(pRuntimeEnv->pResultBuf, dst->pageId); char * dstBuf = getPosInResultPage(pRuntimeEnv, i, dst, dstpage); - tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pos.pageId); + tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pageId); char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SWindowResult *)src, srcpage); size_t s = pRuntimeEnv->pQuery->pSelectExpr[i].bytes; diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index b78c5314f243874e348748a6434d224592489528..8cd3a9cbef0bbf268db074a935a7b25cb389944e 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -304,7 +304,7 @@ void TSTraverse() { int32_t totalOutput = 10; while (1) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag->i64Key, elem.ts); if (!tsBufNextPos(pTSBuf)) { break; @@ -352,7 +352,7 @@ void TSTraverse() { totalOutput = 10; while (1) { STSElem elem = tsBufGetElem(pTSBuf); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag->i64Key, elem.ts); if (!tsBufNextPos(pTSBuf)) { break; @@ -416,8 +416,8 @@ void mergeDiffVnodeBufferTest() { int64_t* list = createTsList(num, start, step); t.i64Key = i; - tsBufAppend(pTSBuf1, 0, &t, (const char*)list, num * sizeof(int64_t)); - tsBufAppend(pTSBuf2, 0, &t, (const char*)list, num * sizeof(int64_t)); + tsBufAppend(pTSBuf1, 1, &t, (const char*)list, num * sizeof(int64_t)); + tsBufAppend(pTSBuf2, 9, &t, (const char*)list, num * sizeof(int64_t)); free(list); @@ -426,7 +426,7 @@ void mergeDiffVnodeBufferTest() { tsBufFlush(pTSBuf2); - tsBufMerge(pTSBuf1, pTSBuf2, 9); + tsBufMerge(pTSBuf1, pTSBuf2); EXPECT_EQ(pTSBuf1->numOfVnodes, 2); EXPECT_EQ(pTSBuf1->numOfTotal, numOfTags * 2 * num); @@ -459,8 +459,6 @@ void mergeIdenticalVnodeBufferTest() { start += step * num; } - - for (int32_t i = numOfTags; i < numOfTags * 2; ++i) { int64_t* list = createTsList(num, start, step); @@ -473,7 +471,7 @@ void mergeIdenticalVnodeBufferTest() { tsBufFlush(pTSBuf2); - tsBufMerge(pTSBuf1, pTSBuf2, 12); + tsBufMerge(pTSBuf1, pTSBuf2); EXPECT_EQ(pTSBuf1->numOfVnodes, 1); EXPECT_EQ(pTSBuf1->numOfTotal, numOfTags * 2 * num); @@ -482,7 +480,7 @@ void mergeIdenticalVnodeBufferTest() { STSElem elem = tsBufGetElem(pTSBuf1); EXPECT_EQ(elem.vnode, 12); - printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag.i64Key, elem.ts); + printf("%d-%" PRIu64 "-%" PRIu64 "\n", elem.vnode, elem.tag->i64Key, elem.ts); } tsBufDestroy(pTSBuf1); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 63231bb1fab05bd1df02945f6a0d5593814d0b3a..1dfd87ff3a0e3d27f5c8091dd4ccee16f4971446 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -20,6 +20,7 @@ #include "ttimer.h" #include "tutil.h" #include "lz4.h" +#include "tref.h" #include "taoserror.h" #include "tsocket.h" #include "tglobal.h" @@ -72,7 +73,6 @@ typedef struct { SRpcInfo *pRpc; // associated SRpcInfo SRpcEpSet epSet; // ip list provided by app void *ahandle; // handle provided by app - void *signature; // for validation struct SRpcConn *pConn; // pConn allocated char msgType; // message type uint8_t *pCont; // content provided by app @@ -132,6 +132,10 @@ int tsRpcMaxRetry; int tsRpcHeadSize; int tsRpcOverhead; +static int tsRpcRefId = -1; +static int32_t tsRpcNum = 0; +static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT; + // server:0 client:1 tcp:2 udp:0 #define RPC_CONN_UDPS 0 #define RPC_CONN_UDPC 1 @@ -211,14 +215,26 @@ static void rpcUnlockConn(SRpcConn *pConn); static void rpcAddRef(SRpcInfo *pRpc); static void rpcDecRef(SRpcInfo *pRpc); -void *rpcOpen(const SRpcInit *pInit) { - SRpcInfo *pRpc; +static void rpcFree(void *p) { + tTrace("free mem: %p", p); + free(p); +} + +static void rpcInit(void) { tsProgressTimer = tsRpcTimer/2; tsRpcMaxRetry = tsRpcMaxTime * 1000/tsProgressTimer; tsRpcHeadSize = RPC_MSG_OVERHEAD; tsRpcOverhead = sizeof(SRpcReqContext); + tsRpcRefId = taosOpenRef(200, rpcFree); +} + +void *rpcOpen(const SRpcInit *pInit) { + SRpcInfo *pRpc; + + pthread_once(&tsRpcInit, rpcInit); + pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) return NULL; @@ -237,6 +253,8 @@ void *rpcOpen(const SRpcInit *pInit) { pRpc->afp = pInit->afp; pRpc->refCount = 1; + atomic_add_fetch_32(&tsRpcNum, 1); + size_t size = sizeof(SRpcConn) * pRpc->sessions; pRpc->connList = (SRpcConn *)calloc(1, size); if (pRpc->connList == NULL) { @@ -363,7 +381,6 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen); pContext = (SRpcReqContext *) ((char*)pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext)); pContext->ahandle = pMsg->ahandle; - pContext->signature = pContext; pContext->pRpc = (SRpcInfo *)shandle; pContext->epSet = *pEpSet; pContext->contLen = contLen; @@ -386,6 +403,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { // set the handle to pContext, so app can cancel the request if (pMsg->handle) *((void **)pMsg->handle) = pContext; + taosAddRef(tsRpcRefId, pContext); rpcSendReqToServer(pRpc, pContext); return; @@ -412,7 +430,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { rpcLockConn(pConn); if ( pConn->inType == 0 || pConn->user[0] == 0 ) { - tDebug("%s, connection is already released, rsp wont be sent", pConn->info); + tError("%s, connection is already released, rsp wont be sent", pConn->info); rpcUnlockConn(pConn); rpcFreeCont(pMsg->pCont); rpcDecRef(pRpc); @@ -536,14 +554,15 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) { void rpcCancelRequest(void *handle) { SRpcReqContext *pContext = handle; - // signature is used to check if pContext is freed. - // pContext may have been released just before app calls the rpcCancelRequest - if (pContext == NULL || pContext->signature != pContext) return; + int code = taosAcquireRef(tsRpcRefId, pContext); + if (code < 0) return; if (pContext->pConn) { tDebug("%s, app tries to cancel request", pContext->pConn->info); rpcCloseConn(pContext->pConn); } + + taosReleaseRef(tsRpcRefId, pContext); } static void rpcFreeMsg(void *msg) { @@ -612,7 +631,7 @@ static void rpcReleaseConn(SRpcConn *pConn) { // if there is an outgoing message, free it if (pConn->outType && pConn->pReqMsg) { if (pConn->pContext) pConn->pContext->pConn = NULL; - rpcFreeMsg(pConn->pReqMsg); + taosRemoveRef(tsRpcRefId, pConn->pContext); } } @@ -1057,6 +1076,13 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); } } else { // msg is passed to app only parsing is ok + + if (pHead->msgType == TSDB_MSG_TYPE_NETWORK_TEST) { + rpcSendQuickRsp(pConn, TSDB_CODE_SUCCESS); + rpcFreeMsg(pRecv->msg); + return pConn; + } + rpcProcessIncomingMsg(pConn, pHead, pContext); } } @@ -1068,7 +1094,6 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { SRpcInfo *pRpc = pContext->pRpc; - pContext->signature = NULL; pContext->pConn = NULL; if (pContext->pRsp) { // for synchronous API @@ -1085,7 +1110,7 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { } // free the request message - rpcFreeCont(pContext->pCont); + taosRemoveRef(tsRpcRefId, pContext); } static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqContext *pContext) { @@ -1593,6 +1618,12 @@ static void rpcDecRef(SRpcInfo *pRpc) pthread_mutex_destroy(&pRpc->mutex); tDebug("%s rpc resources are released", pRpc->label); taosTFree(pRpc); + + int count = atomic_sub_fetch_32(&tsRpcNum, 1); + if (count == 0) { + taosCloseRef(tsRpcRefId); + // tsRpcInit = PTHREAD_ONCE_INIT; // windows compliling error + } } } diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 0b9bbae92eef3942b7a930f30650c412ce2eba46..bc8d360d39509ce9a2fdbe6a9dd883c5c5c99190 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -174,12 +174,15 @@ static void taosStopTcpThread(SThreadObj* pThreadObj) { pThreadObj->stop = true; eventfd_t fd = -1; + // save thread into local variable since pThreadObj is freed when thread exits + pthread_t thread = pThreadObj->thread; + if (taosComparePthread(pThreadObj->thread, pthread_self())) { pthread_detach(pthread_self()); return; } - if (taosCheckPthreadValid(pThreadObj->thread) && pThreadObj->pollFd >= 0) { + if (taosCheckPthreadValid(pThreadObj->thread)) { // signal the thread to stop, try graceful method first, // and use pthread_cancel when failed struct epoll_event event = { .events = EPOLLIN }; @@ -196,8 +199,9 @@ static void taosStopTcpThread(SThreadObj* pThreadObj) { } } - if (taosCheckPthreadValid(pThreadObj->thread) && pThreadObj->pollFd >= 0) { - pthread_join(pThreadObj->thread, NULL); + // at this step, pThreadObj has already been released + if (taosCheckPthreadValid(thread)) { + pthread_join(thread, NULL); } if (fd != -1) taosCloseSocket(fd); diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index efdf2bd185cb5db46d7f0918725ebe8886d5bd98..60271c771ca0a01bd449cb878fe2269759250fd3 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -5,14 +5,14 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) - LIST(REMOVE_ITEM SRC ./src/tarbitrator.c) + LIST(REMOVE_ITEM SRC src/tarbitrator.c) ADD_LIBRARY(sync ${SRC}) TARGET_LINK_LIBRARIES(sync tutil pthread common) - LIST(APPEND BIN_SRC ./src/tarbitrator.c) - LIST(APPEND BIN_SRC ./src/taosTcpPool.c) + LIST(APPEND BIN_SRC src/tarbitrator.c) + LIST(APPEND BIN_SRC src/taosTcpPool.c) ADD_EXECUTABLE(tarbitrator ${BIN_SRC}) TARGET_LINK_LIBRARIES(tarbitrator sync common osdetail tutil) - ADD_SUBDIRECTORY(test) + #ADD_SUBDIRECTORY(test) ENDIF () diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index ebb6c3a0a9edff5acfc5f2ce7da8b58f03d8ab4a..19a5d3ba41b8fb0752f5849b8504059accdae485 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -56,6 +56,7 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { int code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; uint32_t pindex = 0; // index in last restore + bool fileChanged = false; *fversion = 0; sinfo.index = 0; @@ -114,10 +115,11 @@ static int syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { close(dfd); if (ret < 0) break; + fileChanged = true; sDebug("%s, %s is received, size:%" PRId64, pPeer->id, minfo.name, minfo.size); } - if (code == 0 && (minfo.fversion != sinfo.fversion)) { + if (code == 0 && fileChanged) { // data file is changed, code shall be set to 1 *fversion = minfo.fversion; code = 1; diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 626ad77da2eab4be9e94516c4e5c7c0e5a45837e..5633e97cdf56bea8f2c24fc862d720bea2c376db 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -79,7 +79,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { DIR * dir = NULL; int fid = 0; int vid = 0; - regex_t regex1, regex2; + regex_t regex1 = {0}, regex2 = {0}; int code = 0; char fname[TSDB_FILENAME_LEN] = "\0"; @@ -95,9 +95,27 @@ int tsdbOpenFileH(STsdbRepo *pRepo) { dir = opendir(tDataDir); if (dir == NULL) { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _err; + if (errno == ENOENT) { + tsdbError("vgId:%d directory %s not exist", REPO_ID(pRepo), tDataDir); + terrno = TAOS_SYSTEM_ERROR(errno); + + if (taosMkDir(tDataDir, 0755) < 0) { + tsdbError("vgId:%d failed to create directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + dir = opendir(tDataDir); + if (dir == NULL) { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + } else { + tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), tDataDir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + goto _err; + } } code = regcomp(®ex1, "^v[0-9]+f[0-9]+\\.(head|data|last|stat)$", REG_EXTENDED); diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 564d7f5db51c80929391283e4b6fa1128275ea14..f3bd91f038cf209827b1c252160019a6b0aac27f 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -239,7 +239,7 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) { return NULL; } - if (tsdbInitTableCfg(pCfg, pMsg->tableType, htobe64(pMsg->uid), htonl(pMsg->sid)) < 0) goto _err; + if (tsdbInitTableCfg(pCfg, pMsg->tableType, htobe64(pMsg->uid), htonl(pMsg->tid)) < 0) goto _err; if (tdInitTSchemaBuilder(&schemaBuilder, htonl(pMsg->sversion)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ac6c2e0c5a9d5590e7dd57863dd18d8726912de5..5b0896ae6f912f143a4649e8fb620955cda6462e 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1263,7 +1263,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl int32_t end = endPos; if (!ASCENDING_TRAVERSE(pQueryHandle->order)) { - assert(start >= end); SWAP(start, end, int32_t); } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 89c8e3dc39211eca1b6c877f7789ad4313917ea2..b8f568f82c15e711766b5957a65106b822cd5ce6 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,6 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil pthread osdetail lz4 z) diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 0520cf29a87c9d4727ef6db48d8f5712ac845b89..33819f6a20ee64ada194d520ef09c6133d4dad96 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -53,7 +53,7 @@ enum { TAOS_CFG_UTYPE_NONE, TAOS_CFG_UTYPE_PERCENT, TAOS_CFG_UTYPE_GB, - TAOS_CFG_UTYPE_Mb, + TAOS_CFG_UTYPE_MB, TAOS_CFG_UTYPE_BYTE, TAOS_CFG_UTYPE_SECOND, TAOS_CFG_UTYPE_MS diff --git a/src/util/inc/tnettest.h b/src/util/inc/tnettest.h index 3fe1dfa9204fbbf85f193078b17e0bb4f9643848..426df5cbb28b9c0fcada049c7242730359d2a3fc 100644 --- a/src/util/inc/tnettest.h +++ b/src/util/inc/tnettest.h @@ -20,7 +20,27 @@ extern "C" { #endif -void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole); +typedef struct CmdArguments { + char* host; + char* password; + char* user; + char* auth; + char* database; + char* timezone; + bool is_raw_time; + bool is_use_passwd; + char file[TSDB_FILENAME_LEN]; + char dir[TSDB_FILENAME_LEN]; + int threadNum; + char* commands; + int abort; + int port; + int endPort; + int pktLen; + char* netTestRole; +} CmdArguments; + +void taosNetTest(CmdArguments* args); #ifdef __cplusplus } diff --git a/src/util/inc/tref.h b/src/util/inc/tref.h new file mode 100644 index 0000000000000000000000000000000000000000..6619ff407e2aad2322e952ce3ef43c87eba64bb2 --- /dev/null +++ b/src/util/inc/tref.h @@ -0,0 +1,38 @@ + +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TREF_H +#define TDENGINE_TREF_H + +#ifdef __cplusplus +extern "C" { +#endif + +int taosOpenRef(int max, void (*fp)(void *)); // return refId which will be used by other APIs +void taosCloseRef(int refId); +int taosListRef(); // return the number of references in system +int taosAddRef(int refId, void *p); +int taosAcquireRef(int refId, void *p); +void taosReleaseRef(int refId, void *p); + +#define taosRemoveRef taosReleaseRef + + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TREF_H diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 6e20c1708dfc81728c6b961b9259d50e953b4b9d..5be7253f6db80802feac5e51778fe89f5858733f 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -335,7 +335,7 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data) { } void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { - if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) + pCacheObj->numOfElemsInTrash == 0) { + if (pCacheObj == NULL) { return; } @@ -343,7 +343,12 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { uError("cache:%s, NULL data to release", pCacheObj->name); return; } - + + + // The operation of removal from hash table and addition to trashcan is not an atomic operation, + // therefore the check for the empty of both the hash table and the trashcan has a race condition. + // It happens when there is only one object in the cache, and two threads which has referenced this object + // start to free the it simultaneously [TD-1569]. size_t offset = offsetof(SCacheDataNode, data); SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset); diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 3793f3d3a944cc5c8d86c0dc0c0fa5bfe3cee764..6fd526598365f831addd1bacb0b7f748d9552fdb 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -15,11 +15,16 @@ #include "os.h" #include "taosdef.h" +#include "taosmsg.h" #include "taoserror.h" #include "tulog.h" #include "tconfig.h" #include "tglobal.h" #include "tsocket.h" +#include "trpc.h" +#include "rpcHead.h" +#include "tutil.h" +#include "tnettest.h" #define MAX_PKG_LEN (64*1000) #define BUFFER_SIZE (MAX_PKG_LEN + 1024) @@ -30,9 +35,15 @@ typedef struct { uint16_t pktLen; } info_s; -static char serverFqdn[TSDB_FQDN_LEN]; +extern int tsRpcMaxUdpSize; + +static char g_user[TSDB_USER_LEN+1] = {0}; +static char g_pass[TSDB_PASSWORD_LEN+1] = {0}; +static char g_serverFqdn[TSDB_FQDN_LEN] = {0}; static uint16_t g_startPort = 0; static uint16_t g_endPort = 6042; +static uint32_t g_pktLen = 0; + static void *bindUdpPort(void *sarg) { info_s *pinfo = (info_s *)sarg; @@ -321,19 +332,145 @@ static void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uin return ; } -static void taosNetTestClient(const char* serverFqdn, uint16_t startPort, uint16_t endPort, int pktLen) { - uint32_t serverIp = taosGetIpFromFqdn(serverFqdn); - if (serverIp == 0xFFFFFFFF) { - printf("Failed to resolve FQDN:%s", serverFqdn); - exit(-1); +void* tnetInitRpc(char* secretEncrypt, char spi) { + SRpcInit rpcInit; + void* pRpcConn = NULL; + + taosEncryptPass((uint8_t *)g_pass, strlen(g_pass), secretEncrypt); + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localPort = 0; + rpcInit.label = "NET-TEST"; + rpcInit.numOfThreads = 1; // every DB connection has only one thread + rpcInit.cfp = NULL; + rpcInit.sessions = 16; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.user = g_user; + rpcInit.idleTime = 2000; + rpcInit.ckey = "key"; + rpcInit.spi = spi; + rpcInit.secret = secretEncrypt; + + pRpcConn = rpcOpen(&rpcInit); + return pRpcConn; +} + +static int rpcCheckPortImpl(const char* serverFqdn, uint16_t port, uint16_t pktLen, char spi) { + SRpcEpSet epSet; + SRpcMsg reqMsg; + SRpcMsg rspMsg; + void* pRpcConn; + + char secretEncrypt[32] = {0}; + + pRpcConn = tnetInitRpc(secretEncrypt, spi); + if (NULL == pRpcConn) { + return -1; } - checkPort(serverIp, startPort, endPort, pktLen); + memset(&epSet, 0, sizeof(SRpcEpSet)); + epSet.inUse = 0; + epSet.numOfEps = 1; + epSet.port[0] = port; + strcpy(epSet.fqdn[0], serverFqdn); + + reqMsg.msgType = TSDB_MSG_TYPE_NETWORK_TEST; + reqMsg.pCont = rpcMallocCont(pktLen); + reqMsg.contLen = pktLen; + reqMsg.code = 0; + reqMsg.handle = NULL; // rpc handle returned to app + reqMsg.ahandle = NULL; // app handle set by client + + rpcSendRecv(pRpcConn, &epSet, &reqMsg, &rspMsg); + + // handle response + if ((rspMsg.code != 0) || (rspMsg.msgType != TSDB_MSG_TYPE_NETWORK_TEST + 1)) { + //printf("code:%d[%s]\n", rspMsg.code, tstrerror(rspMsg.code)); + return -1; + } + + rpcFreeCont(rspMsg.pCont); - return; + rpcClose(pRpcConn); + + return 0; +} + +static void rpcCheckPort(uint32_t hostIp) { + int ret; + char spi; + + for (uint16_t port = g_startPort; port <= g_endPort; port++) { + //printf("test: %s:%d\n", info.host, port); + printf("\n"); + + //================ check tcp port ================ + int32_t pktLen; + if (g_pktLen <= tsRpcMaxUdpSize) { + pktLen = tsRpcMaxUdpSize + 1000; + } else { + pktLen = g_pktLen; + } + + spi = 1; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + spi = 0; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + printf("TCP port:%d test fail.\t\t", port); + } else { + //printf("tcp port:%d test ok.\t\t", port); + printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); + } + } else { + //printf("tcp port:%d test ok.\t\t", port); + printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); + } + + //================ check udp port ================ + if (g_pktLen >= tsRpcMaxUdpSize) { + pktLen = tsRpcMaxUdpSize - 1000; + } else { + pktLen = g_pktLen; + } + + spi = 0; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + spi = 1; + ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); + if (ret != 0) { + printf("udp port:%d test fail.\t\n", port); + } else { + //printf("udp port:%d test ok.\t\n", port); + printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); + } + } else { + //printf("udp port:%d test ok.\t\n", port); + printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); + } + } + + printf("\n"); + return ; } +static void taosNetTestClient(int flag) { + uint32_t serverIp = taosGetIpFromFqdn(g_serverFqdn); + if (serverIp == 0xFFFFFFFF) { + printf("Failed to resolve FQDN:%s", g_serverFqdn); + exit(-1); + } + if (0 == flag) { + checkPort(serverIp, g_startPort, g_endPort, g_pktLen); + } else { + rpcCheckPort(serverIp); + } + + return; +} static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) { @@ -375,49 +512,66 @@ static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) } -void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole) { - if (pktLen > MAX_PKG_LEN) { - printf("test packet len overflow: %d, max len not greater than %d bytes\n", pktLen, MAX_PKG_LEN); - exit(-1); +void taosNetTest(CmdArguments *args) { + if (0 == args->pktLen) { + g_pktLen = 1000; + } else { + g_pktLen = args->pktLen; } - if (port && endPort) { - if (port > endPort) { - printf("endPort[%d] must not lesss port[%d]\n", endPort, port); + if (args->port && args->endPort) { + if (args->port > args->endPort) { + printf("endPort[%d] must not lesss port[%d]\n", args->endPort, args->port); exit(-1); } } - if (host && host[0] != 0) { - if (strlen(host) >= TSDB_EP_LEN) { - printf("host invalid: %s\n", host); + if (args->host && args->host[0] != 0) { + if (strlen(args->host) >= TSDB_EP_LEN) { + printf("host invalid: %s\n", args->host); exit(-1); } - taosGetFqdnPortFromEp(host, serverFqdn, &g_startPort); + taosGetFqdnPortFromEp(args->host, g_serverFqdn, &g_startPort); } else { - tstrncpy(serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN); + tstrncpy(g_serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN); g_startPort = tsServerPort; } - if (port) { - g_startPort = port; + if (args->port) { + g_startPort = args->port; } - if (endPort) { - g_endPort = endPort; + if (args->endPort) { + g_endPort = args->endPort; } - if (port > endPort) { + if (g_startPort > g_endPort) { printf("endPort[%d] must not lesss port[%d]\n", g_endPort, g_startPort); exit(-1); } + + + if (args->is_use_passwd) { + if (args->password == NULL) args->password = getpass("Enter password: "); + } else { + args->password = TSDB_DEFAULT_PASS; + } + tstrncpy(g_pass, args->password, TSDB_PASSWORD_LEN); + + if (args->user == NULL) { + args->user = TSDB_DEFAULT_USER; + } + tstrncpy(g_user, args->user, TSDB_USER_LEN); - if (0 == strcmp("client", netTestRole)) { - printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", serverFqdn, g_startPort, g_endPort, pktLen); - taosNetTestClient(serverFqdn, g_startPort, g_endPort, pktLen); - } else if (0 == strcmp("server", netTestRole)) { - taosNetTestServer(g_startPort, g_endPort, pktLen); + if (0 == strcmp("client", args->netTestRole)) { + printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); + taosNetTestClient(0); + } else if (0 == strcmp("clients", args->netTestRole)) { + printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); + taosNetTestClient(1); + } else if (0 == strcmp("server", args->netTestRole)) { + taosNetTestServer(g_startPort, g_endPort, g_pktLen); } } diff --git a/src/util/src/tref.c b/src/util/src/tref.c new file mode 100644 index 0000000000000000000000000000000000000000..4c3b8363407326b724c0b2706231bd751e43e110 --- /dev/null +++ b/src/util/src/tref.c @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taoserror.h" +#include "tulog.h" +#include "tutil.h" + +#define TSDB_REF_OBJECTS 50 +#define TSDB_REF_STATE_EMPTY 0 +#define TSDB_REF_STATE_ACTIVE 1 +#define TSDB_REF_STATE_DELETED 2 + +typedef struct SRefNode { + struct SRefNode *prev; + struct SRefNode *next; + void *p; + int32_t count; +} SRefNode; + +typedef struct { + SRefNode **nodeList; + int state; // 0: empty, 1: active; 2: deleted + int refId; + int max; + int32_t count; // total number of SRefNodes in this set + int64_t *lockedBy; + void (*fp)(void *); +} SRefSet; + +static SRefSet tsRefSetList[TSDB_REF_OBJECTS]; +static pthread_once_t tsRefModuleInit = PTHREAD_ONCE_INIT; +static pthread_mutex_t tsRefMutex; +static int tsRefSetNum = 0; +static int tsNextId = 0; + +static void taosInitRefModule(void); +static int taosHashRef(SRefSet *pSet, void *p); +static void taosLockList(int64_t *lockedBy); +static void taosUnlockList(int64_t *lockedBy); +static void taosIncRefCount(SRefSet *pSet); +static void taosDecRefCount(SRefSet *pSet); + +int taosOpenRef(int max, void (*fp)(void *)) +{ + SRefNode **nodeList; + SRefSet *pSet; + int64_t *lockedBy; + int i, refId; + + pthread_once(&tsRefModuleInit, taosInitRefModule); + + nodeList = calloc(sizeof(SRefNode *), (size_t)max); + if (nodeList == NULL) { + return TSDB_CODE_REF_NO_MEMORY; + } + + lockedBy = calloc(sizeof(int64_t), (size_t)max); + if (lockedBy == NULL) { + free(nodeList); + return TSDB_CODE_REF_NO_MEMORY; + } + + pthread_mutex_lock(&tsRefMutex); + + for (i = 0; i < TSDB_REF_OBJECTS; ++i) { + tsNextId = (tsNextId + 1) % TSDB_REF_OBJECTS; + if (tsRefSetList[tsNextId].state == TSDB_REF_STATE_EMPTY) break; + } + + if (i < TSDB_REF_OBJECTS) { + refId = tsNextId; + pSet = tsRefSetList + refId; + taosIncRefCount(pSet); + pSet->max = max; + pSet->nodeList = nodeList; + pSet->lockedBy = lockedBy; + pSet->fp = fp; + pSet->state = TSDB_REF_STATE_ACTIVE; + pSet->refId = refId; + + tsRefSetNum++; + uTrace("refId:%d is opened, max:%d, fp:%p refSetNum:%d", refId, max, fp, tsRefSetNum); + } else { + refId = TSDB_CODE_REF_FULL; + free (nodeList); + free (lockedBy); + uTrace("run out of Ref ID, maximum:%d refSetNum:%d", TSDB_REF_OBJECTS, tsRefSetNum); + } + + pthread_mutex_unlock(&tsRefMutex); + + return refId; +} + +void taosCloseRef(int refId) +{ + SRefSet *pSet; + int deleted = 0; + + if (refId < 0 || refId >= TSDB_REF_OBJECTS) { + uTrace("refId:%d is invalid, out of range", refId); + return; + } + + pSet = tsRefSetList + refId; + + pthread_mutex_lock(&tsRefMutex); + + if (pSet->state == TSDB_REF_STATE_ACTIVE) { + pSet->state = TSDB_REF_STATE_DELETED; + deleted = 1; + uTrace("refId:%d is closed, count:%d", refId, pSet->count); + } else { + uTrace("refId:%d is already closed, count:%d", refId, pSet->count); + } + + pthread_mutex_unlock(&tsRefMutex); + + if (deleted) taosDecRefCount(pSet); +} + +int taosAddRef(int refId, void *p) +{ + int hash; + SRefNode *pNode; + SRefSet *pSet; + + if (refId < 0 || refId >= TSDB_REF_OBJECTS) { + uTrace("refId:%d p:%p failed to add, refId not valid", refId, p); + return TSDB_CODE_REF_INVALID_ID; + } + + uTrace("refId:%d p:%p try to add", refId, p); + + pSet = tsRefSetList + refId; + taosIncRefCount(pSet); + if (pSet->state != TSDB_REF_STATE_ACTIVE) { + taosDecRefCount(pSet); + uTrace("refId:%d p:%p failed to add, not active", refId, p); + return TSDB_CODE_REF_ID_REMOVED; + } + + int code = 0; + hash = taosHashRef(pSet, p); + + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + while (pNode) { + if (pNode->p == p) + break; + + pNode = pNode->next; + } + + if (pNode) { + code = TSDB_CODE_REF_ALREADY_EXIST; + uTrace("refId:%d p:%p is already there, faild to add", refId, p); + } else { + pNode = calloc(sizeof(SRefNode), 1); + if (pNode) { + pNode->p = p; + pNode->count = 1; + pNode->prev = 0; + pNode->next = pSet->nodeList[hash]; + if (pSet->nodeList[hash]) pSet->nodeList[hash]->prev = pNode; + pSet->nodeList[hash] = pNode; + uTrace("refId:%d p:%p is added, count:%d malloc mem: %p", refId, p, pSet->count, pNode); + } else { + code = TSDB_CODE_REF_NO_MEMORY; + uTrace("refId:%d p:%p is not added, since no memory", refId, p); + } + } + + if (code < 0) taosDecRefCount(pSet); + + taosUnlockList(pSet->lockedBy+hash); + + return code; +} + +int taosAcquireRef(int refId, void *p) +{ + int hash, code = 0; + SRefNode *pNode; + SRefSet *pSet; + + if (refId < 0 || refId >= TSDB_REF_OBJECTS) { + uTrace("refId:%d p:%p failed to acquire, refId not valid", refId, p); + return TSDB_CODE_REF_INVALID_ID; + } + + uTrace("refId:%d p:%p try to acquire", refId, p); + + pSet = tsRefSetList + refId; + taosIncRefCount(pSet); + if (pSet->state != TSDB_REF_STATE_ACTIVE) { + uTrace("refId:%d p:%p failed to acquire, not active", refId, p); + taosDecRefCount(pSet); + return TSDB_CODE_REF_ID_REMOVED; + } + + hash = taosHashRef(pSet, p); + + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + + while (pNode) { + if (pNode->p == p) { + break; + } + + pNode = pNode->next; + } + + if (pNode) { + pNode->count++; + uTrace("refId:%d p:%p is acquired", refId, p); + } else { + code = TSDB_CODE_REF_NOT_EXIST; + uTrace("refId:%d p:%p is not there, failed to acquire", refId, p); + } + + taosUnlockList(pSet->lockedBy+hash); + + taosDecRefCount(pSet); + + return code; +} + +void taosReleaseRef(int refId, void *p) +{ + int hash; + SRefNode *pNode; + SRefSet *pSet; + int released = 0; + + if (refId < 0 || refId >= TSDB_REF_OBJECTS) { + uTrace("refId:%d p:%p failed to release, refId not valid", refId, p); + return; + } + + uTrace("refId:%d p:%p try to release", refId, p); + + pSet = tsRefSetList + refId; + if (pSet->state == TSDB_REF_STATE_EMPTY) { + uTrace("refId:%d p:%p failed to release, cleaned", refId, p); + return; + } + + hash = taosHashRef(pSet, p); + + taosLockList(pSet->lockedBy+hash); + + pNode = pSet->nodeList[hash]; + while (pNode) { + if (pNode->p == p) + break; + + pNode = pNode->next; + } + + if (pNode) { + pNode->count--; + + if (pNode->count == 0) { + if ( pNode->prev ) { + pNode->prev->next = pNode->next; + } else { + pSet->nodeList[hash] = pNode->next; + } + + if ( pNode->next ) { + pNode->next->prev = pNode->prev; + } + + (*pSet->fp)(pNode->p); + + free(pNode); + released = 1; + uTrace("refId:%d p:%p is removed, count:%d, free mem: %p", refId, p, pSet->count, pNode); + } else { + uTrace("refId:%d p:%p is released", refId, p); + } + } else { + uTrace("refId:%d p:%p is not there, failed to release", refId, p); + } + + taosUnlockList(pSet->lockedBy+hash); + + if (released) taosDecRefCount(pSet); +} + +int taosListRef() { + SRefSet *pSet; + SRefNode *pNode; + int num = 0; + + pthread_mutex_lock(&tsRefMutex); + + for (int i = 0; i < TSDB_REF_OBJECTS; ++i) { + pSet = tsRefSetList + i; + + if (pSet->state == TSDB_REF_STATE_EMPTY) + continue; + + uInfo("refId:%d state:%d count::%d", i, pSet->state, pSet->count); + + for (int j=0; j < pSet->max; ++j) { + pNode = pSet->nodeList[j]; + + while (pNode) { + uInfo("refId:%d p:%p count:%d", i, pNode->p, pNode->count); + pNode = pNode->next; + num++; + } + } + } + + pthread_mutex_unlock(&tsRefMutex); + + return num; +} + +static int taosHashRef(SRefSet *pSet, void *p) +{ + int hash = 0; + int64_t v = (int64_t)p; + + for (int i = 0; i < sizeof(v); ++i) { + hash += (int)(v & 0xFFFF); + v = v >> 16; + i = i + 2; + } + + hash = hash % pSet->max; + + return hash; +} + +static void taosLockList(int64_t *lockedBy) { + int64_t tid = taosGetPthreadId(); + int i = 0; + while (atomic_val_compare_exchange_64(lockedBy, 0, tid) != 0) { + if (++i % 100 == 0) { + sched_yield(); + } + } +} + +static void taosUnlockList(int64_t *lockedBy) { + int64_t tid = taosGetPthreadId(); + if (atomic_val_compare_exchange_64(lockedBy, tid, 0) != tid) { + assert(false); + } +} + +static void taosInitRefModule(void) { + pthread_mutex_init(&tsRefMutex, NULL); +} + +static void taosIncRefCount(SRefSet *pSet) { + atomic_add_fetch_32(&pSet->count, 1); + uTrace("refId:%d inc count:%d", pSet->refId, pSet->count); +} + +static void taosDecRefCount(SRefSet *pSet) { + int32_t count = atomic_sub_fetch_32(&pSet->count, 1); + uTrace("refId:%d dec count:%d", pSet->refId, pSet->count); + + if (count > 0) return; + + pthread_mutex_lock(&tsRefMutex); + + if (pSet->state != TSDB_REF_STATE_EMPTY) { + pSet->state = TSDB_REF_STATE_EMPTY; + pSet->max = 0; + pSet->fp = NULL; + + taosTFree(pSet->nodeList); + taosTFree(pSet->lockedBy); + + tsRefSetNum--; + uTrace("refId:%d is cleaned, refSetNum:%d count:%d", pSet->refId, tsRefSetNum, pSet->count); + } + + pthread_mutex_unlock(&tsRefMutex); +} + diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 8687a8005ddeda7320c60c9ef90dd221f56b971f..0c96ed2a2f3dfb7f03268c9f8fbb1b0afa2397b9 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -9,7 +9,22 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - + + LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(utilTest tutil common osdetail gtest pthread gcov) + + LIST(APPEND BIN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) + ADD_EXECUTABLE(trefTest ${BIN_SRC}) + TARGET_LINK_LIBRARIES(trefTest common tutil) + ENDIF() + +#IF (TD_LINUX) +# ADD_EXECUTABLE(trefTest ./trefTest.c) +# TARGET_LINK_LIBRARIES(trefTest tutil common) +#ENDIF () + +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc) + + diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c new file mode 100644 index 0000000000000000000000000000000000000000..486f9f6d6d8cbde6f130aebc96efb209c5b26997 --- /dev/null +++ b/src/util/tests/trefTest.c @@ -0,0 +1,166 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "tref.h" +#include "tlog.h" +#include "tglobal.h" +#include "taoserror.h" +#include "tulog.h" + +typedef struct { + int refNum; + int steps; + int refId; + void **p; +} SRefSpace; + +void *takeRefActions(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + int code, id; + + for (int i=0; i < pSpace->steps; ++i) { + printf("s"); + id = random() % pSpace->refNum; + code = taosAddRef(pSpace->refId, pSpace->p[id]); + usleep(1); + + id = random() % pSpace->refNum; + code = taosAcquireRef(pSpace->refId, pSpace->p[id]); + if (code >= 0) { + usleep(id % 5 + 1); + taosReleaseRef(pSpace->refId, pSpace->p[id]); + } + + id = random() % pSpace->refNum; + taosRemoveRef(pSpace->refId, pSpace->p[id]); + usleep(id %5 + 1); + + id = random() % pSpace->refNum; + code = taosAcquireRef(pSpace->refId, pSpace->p[id]); + if (code >= 0) { + usleep(id % 5 + 1); + taosReleaseRef(pSpace->refId, pSpace->p[id]); + } + } + + for (int i=0; i < pSpace->refNum; ++i) { + taosRemoveRef(pSpace->refId, pSpace->p[i]); + } + + //uInfo("refId:%d thread exits", pSpace->refId); + + return NULL; +} + +void myfree(void *p) { + return; +} + +void *openRefSpace(void *param) { + SRefSpace *pSpace = (SRefSpace *)param; + + printf("c"); + pSpace->refId = taosOpenRef(10000, myfree); + + if (pSpace->refId < 0) { + printf("failed to open ref, reson:%s\n", tstrerror(pSpace->refId)); + return NULL; + } + + pSpace->p = (void **) calloc(sizeof(void *), pSpace->refNum); + for (int i=0; irefNum; ++i) { + pSpace->p[i] = (void *) malloc(128); + } + + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + pthread_t thread1, thread2, thread3; + pthread_create(&(thread1), &thattr, takeRefActions, (void *)(pSpace)); + pthread_create(&(thread2), &thattr, takeRefActions, (void *)(pSpace)); + pthread_create(&(thread3), &thattr, takeRefActions, (void *)(pSpace)); + + pthread_join(thread1, NULL); + pthread_join(thread2, NULL); + pthread_join(thread3, NULL); + + taosCloseRef(pSpace->refId); + + for (int i=0; irefNum; ++i) { + free(pSpace->p[i]); + } + + uInfo("refId:%d main thread exit", pSpace->refId); + free(pSpace->p); + pSpace->p = NULL; + + return NULL; +} + +int main(int argc, char *argv[]) { + int refNum = 100; + int threads = 10; + int steps = 10000; + int loops = 1; + + uDebugFlag = 143; + + for (int i=1; icfg.vgId, sizeof(int32_t)); - if (pTemp != NULL) { - vInfo("vgId:%d, vnode already exist, pVnode:%p", pVnodeCfg->cfg.vgId, pTemp); + SVnodeObj *pVnode = vnodeAcquire(pVnodeCfg->cfg.vgId); + if (pVnode != NULL) { + vDebug("vgId:%d, vnode already exist, refCount:%d pVnode:%p", pVnodeCfg->cfg.vgId, pVnode->refCount, pVnode); + vnodeRelease(pVnode); return TSDB_CODE_SUCCESS; } @@ -143,22 +153,24 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { return TSDB_CODE_VND_INIT_FAILED; } - vInfo("vgId:%d, vnode is created, walLevel:%d fsyncPeriod:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.walLevel, pVnodeCfg->cfg.fsyncPeriod); + vInfo("vgId:%d, vnode dir is created, walLevel:%d fsyncPeriod:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.walLevel, + pVnodeCfg->cfg.fsyncPeriod); code = vnodeOpen(pVnodeCfg->cfg.vgId, rootDir); return code; } int32_t vnodeDrop(int32_t vgId) { - SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); - if (ppVnode == NULL || *ppVnode == NULL) { - vDebug("vgId:%d, failed to drop, vgId not find", vgId); + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vDebug("vgId:%d, failed to drop, vnode not find", vgId); return TSDB_CODE_VND_INVALID_VGROUP_ID; } - SVnodeObj *pVnode = *ppVnode; - vTrace("vgId:%d, vnode will be dropped, refCount:%d", pVnode->vgId, pVnode->refCount); + vInfo("vgId:%d, vnode will be dropped, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); pVnode->dropped = 1; + + vnodeRelease(pVnode); vnodeCleanUp(pVnode); return TSDB_CODE_SUCCESS; @@ -279,11 +291,15 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { if (pVnode->tsdb == NULL) { vnodeCleanUp(pVnode); return terrno; - } else if (terrno != TSDB_CODE_SUCCESS && pVnode->syncCfg.replica <= 1) { + } else if (terrno != TSDB_CODE_SUCCESS) { vError("vgId:%d, failed to open tsdb, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); - vnodeCleanUp(pVnode); - return terrno; + if (pVnode->syncCfg.replica <= 1) { + vnodeCleanUp(pVnode); + return terrno; + } else { + pVnode->version = 0; + } } sprintf(temp, "%s/wal", rootDir); @@ -340,11 +356,11 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { } int32_t vnodeClose(int32_t vgId) { - SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); - if (ppVnode == NULL || *ppVnode == NULL) return 0; + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) return 0; - SVnodeObj *pVnode = *ppVnode; - vDebug("vgId:%d, vnode will be closed", pVnode->vgId); + vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode); + vnodeRelease(pVnode); vnodeCleanUp(pVnode); return 0; @@ -355,21 +371,27 @@ void vnodeRelease(void *pVnodeRaw) { int32_t vgId = pVnode->vgId; int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); + vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", vgId, refCount, pVnode); assert(refCount >= 0); if (refCount > 0) { - vDebug("vgId:%d, release vnode, refCount:%d", vgId, refCount); - if (pVnode->status == TAOS_VN_STATUS_RESET && refCount == 2) + if (pVnode->status == TAOS_VN_STATUS_RESET && refCount == 2) { tsem_post(&pVnode->sem); + } return; } - qCleanupQueryMgmt(pVnode->qMgmt); - pVnode->qMgmt = NULL; + vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", vgId, refCount, pVnode); - if (pVnode->tsdb) + if (pVnode->qMgmt) { + qCleanupQueryMgmt(pVnode->qMgmt); + pVnode->qMgmt = NULL; + } + + if (pVnode->tsdb) { tsdbCloseRepo(pVnode->tsdb, 1); - pVnode->tsdb = NULL; + pVnode->tsdb = NULL; + } // stop continuous query if (pVnode->cq) { @@ -378,18 +400,21 @@ void vnodeRelease(void *pVnodeRaw) { cqClose(cq); } - if (pVnode->wal) + if (pVnode->wal) { walClose(pVnode->wal); - pVnode->wal = NULL; + pVnode->wal = NULL; + } - if (pVnode->wqueue) + if (pVnode->wqueue) { dnodeFreeVnodeWqueue(pVnode->wqueue); - pVnode->wqueue = NULL; + pVnode->wqueue = NULL; + } - if (pVnode->rqueue) + if (pVnode->rqueue) { dnodeFreeVnodeRqueue(pVnode->rqueue); - pVnode->rqueue = NULL; - + pVnode->rqueue = NULL; + } + taosTFree(pVnode->rootDir); if (pVnode->dropped) { @@ -413,31 +438,41 @@ void vnodeRelease(void *pVnodeRaw) { free(pVnode); int32_t count = taosHashGetSize(tsDnodeVnodesHash); - vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count); + vDebug("vgId:%d, vnode is destroyed, vnodes:%d", vgId, count); +} + +static void vnodeIncRef(void *ptNode) { + assert(ptNode != NULL); + + SVnodeObj **ppVnode = (SVnodeObj **)ptNode; + assert(ppVnode); + assert(*ppVnode); + + SVnodeObj *pVnode = *ppVnode; + atomic_add_fetch_32(&pVnode->refCount, 1); + vTrace("vgId:%d, get vnode, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); } void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t)); + SVnodeObj **ppVnode = taosHashGetCB(tsDnodeVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + if (ppVnode == NULL || *ppVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - vInfo("vgId:%d, not exist", vgId); + vDebug("vgId:%d, not exist", vgId); return NULL; } - SVnodeObj *pVnode = *ppVnode; - atomic_add_fetch_32(&pVnode->refCount, 1); - vDebug("vgId:%d, get vnode, refCount:%d", pVnode->vgId, pVnode->refCount); - - return pVnode; + return *ppVnode; } void *vnodeAcquireRqueue(int32_t vgId) { SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) return NULL; - if (pVnode->status == TAOS_VN_STATUS_RESET) { - terrno = TSDB_CODE_APP_NOT_READY; - vInfo("vgId:%d, status is in reset", vgId); + int32_t code = vnodeCheckRead(pVnode); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + vInfo("vgId:%d, can not provide read service, status is %s", vgId, vnodeStatus[pVnode->status]); vnodeRelease(pVnode); return NULL; } @@ -449,13 +484,14 @@ void *vnodeAcquireWqueue(int32_t vgId) { SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) return NULL; - if (pVnode->status == TAOS_VN_STATUS_RESET) { - terrno = TSDB_CODE_APP_NOT_READY; - vInfo("vgId:%d, status is in reset", vgId); + int32_t code = vnodeCheckWrite(pVnode); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + vInfo("vgId:%d, can not provide write service, status is %s", vgId, vnodeStatus[pVnode->status]); vnodeRelease(pVnode); return NULL; } - + return pVnode->wqueue; } @@ -528,7 +564,7 @@ void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) { if (pVnode != NULL) { pVnode->accessState = pAccess[i].accessState; if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) { - vDebug("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState) + vDebug("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState); } vnodeRelease(pVnode); } @@ -538,11 +574,12 @@ void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) { static void vnodeCleanUp(SVnodeObj *pVnode) { // remove from hash, so new messages wont be consumed taosHashRemove(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t)); - int i = 0; if (pVnode->status != TAOS_VN_STATUS_INIT) { // it may be in updateing or reset state, then it shall wait - while (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_CLOSING) != TAOS_VN_STATUS_READY) { + int i = 0; + while (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_CLOSING) != + TAOS_VN_STATUS_READY) { if (++i % 1000 == 0) { sched_yield(); } @@ -556,7 +593,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { syncStop(sync); } - vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount); + vDebug("vgId:%d, vnode will cleanup, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); // release local resources only after cutting off outside connections qQueryMgmtNotifyClosed(pVnode->qMgmt); @@ -613,17 +650,19 @@ static int vnodeResetTsdb(SVnodeObj *pVnode) char rootDir[128] = "\0"; sprintf(rootDir, "%s/tsdb", pVnode->rootDir); - if (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_RESET) != TAOS_VN_STATUS_READY) + if (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_RESET) != TAOS_VN_STATUS_READY) { return -1; + } void *tsdb = pVnode->tsdb; pVnode->tsdb = NULL; // acquire vnode - int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - if (refCount > 2) + if (refCount > 2) { tsem_wait(&pVnode->sem); + } // close tsdb, then open tsdb tsdbCloseRepo(tsdb, 0); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index baaeae2a81d28331454eb414106eabe1ef64d939..99aed03e54ccd069e5879104f62eb01ff7bb3d05 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -14,7 +14,8 @@ */ #define _DEFAULT_SOURCE -//#include +#define _NON_BLOCKING_RETRIEVE 0 + #include "os.h" #include "tglobal.h" @@ -38,6 +39,11 @@ void vnodeInitReadFp(void) { vnodeProcessReadMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg; } +// +// After the fetch request enters the vnode queue, if the vnode cannot provide services, the process function are +// still required, or there will be a deadlock, so we don’t do any check here, but put the check codes before the +// request enters the queue +// int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { SVnodeObj *pVnode = (SVnodeObj *)param; int msgType = pReadMsg->rpcMsg.msgType; @@ -47,48 +53,72 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) { return TSDB_CODE_VND_MSG_NOT_PROCESSED; } + return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg); +} + +int32_t vnodeCheckRead(void *param) { + SVnodeObj *pVnode = param; if (pVnode->status != TAOS_VN_STATUS_READY) { - vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status); + vDebug("vgId:%d, vnode status is %s, recCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], + pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } // tsdb may be in reset state - if (pVnode->tsdb == NULL) return TSDB_CODE_APP_NOT_READY; - if (pVnode->status == TAOS_VN_STATUS_CLOSING) return TSDB_CODE_APP_NOT_READY; + if (pVnode->tsdb == NULL) { + vDebug("vgId:%d, tsdb is null, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + return TSDB_CODE_APP_NOT_READY; + } - // TODO: Later, let slave to support query - // if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) { if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) { - vDebug("vgId:%d, msgType:%s not processed, replica:%d role:%s", pVnode->vgId, taosMsg[msgType], - pVnode->syncCfg.replica, syncRole[pVnode->role]); + vDebug("vgId:%d, replica:%d role:%s, recCount:%d pVnode:%p", pVnode->vgId, pVnode->syncCfg.replica, + syncRole[pVnode->role], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; } - return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg); + return TSDB_CODE_SUCCESS; } +static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void *ahandle) { + int32_t code = vnodeCheckRead(pVnode); + if (code != TSDB_CODE_SUCCESS) return code; -static void vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle) { SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg)); pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY; pRead->pCont = qhandle; pRead->contLen = 0; - pRead->rpcMsg.handle = NULL; + pRead->rpcMsg.ahandle = ahandle; atomic_add_fetch_32(&pVnode->refCount, 1); vDebug("QInfo:%p add to vread queue for exec query, msg:%p", *qhandle, pRead); taosWriteQitem(pVnode->rqueue, TAOS_QTYPE_QUERY, pRead); + + return TSDB_CODE_SUCCESS; } -static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, bool *freeHandle) { +/** + * + * @param pRet response message object + * @param pVnode the vnode object + * @param handle qhandle for executing query + * @param freeHandle free qhandle or not + * @param ahandle sqlObj address at client side + * @return + */ +static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, bool *freeHandle, void *ahandle) { bool continueExec = false; int32_t code = TSDB_CODE_SUCCESS; if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) == TSDB_CODE_SUCCESS) { if (continueExec) { *freeHandle = false; - vnodePutItemIntoReadQueue(pVnode, handle); - pRet->qhandle = *handle; + code = vnodePutItemIntoReadQueue(pVnode, handle, ahandle); + if (code != TSDB_CODE_SUCCESS) { + *freeHandle = true; + return code; + } else { + pRet->qhandle = *handle; + } } else { *freeHandle = true; vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); @@ -165,11 +195,13 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { handle = qRegisterQInfo(pVnode->qMgmt, (uint64_t)pQInfo); - if (handle == NULL) { // failed to register qhandle, todo add error test case + if (handle == NULL) { // failed to register qhandle + pRsp->code = terrno; + terrno = 0; vError("vgId:%d QInfo:%p register qhandle failed, return to app, code:%s", pVnode->vgId, (void *)pQInfo, tstrerror(pRsp->code)); - pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE; qDestroyQueryInfo(pQInfo); // destroy it directly + return pRsp->code; } else { assert(*handle == pQInfo); pRsp->qhandle = htobe64((uint64_t)pQInfo); @@ -189,7 +221,12 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (handle != NULL) { vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); - vnodePutItemIntoReadQueue(pVnode, handle); + code = vnodePutItemIntoReadQueue(pVnode, handle, pReadMsg->rpcMsg.ahandle); + if (code != TSDB_CODE_SUCCESS) { + pRsp->code = code; + qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); + return pRsp->code; + } } } else { assert(pCont != NULL); @@ -197,6 +234,8 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); + +#if _NON_BLOCKING_RETRIEVE bool freehandle = false; bool buildRes = qTableQuery(*qhandle); // do execute query @@ -210,11 +249,14 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { pReadMsg->rpcMsg.handle); // set the real rsp error code - pReadMsg->rpcMsg.code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle); + pReadMsg->rpcMsg.code = vnodeDumpQueryResult(&pReadMsg->rspRet, pVnode, qhandle, &freehandle, pReadMsg->rpcMsg.ahandle); // NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client code = TSDB_CODE_QRY_HAS_RSP; } else { + void* h1 = qGetResultRetrieveMsg(*qhandle); + assert(h1 == NULL); + freehandle = qQueryCompleted(*qhandle); } @@ -223,6 +265,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { if (freehandle || (!buildRes)) { qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); } +#else + qTableQuery(*qhandle); // do execute query + qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false); +#endif } return code; @@ -241,16 +287,22 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { memset(pRet, 0, sizeof(SRspRet)); + terrno = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS; void ** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qhandle); - if (handle == NULL || (*handle) != (void *)pRetrieve->qhandle) { + if (handle == NULL) { + code = terrno; + terrno = TSDB_CODE_SUCCESS; + } else if ((*handle) != (void *)pRetrieve->qhandle) { code = TSDB_CODE_QRY_INVALID_QHANDLE; - vDebug("vgId:%d, invalid qhandle in retrieving result, QInfo:%p", pVnode->vgId, (void *)pRetrieve->qhandle); + } + if (code != TSDB_CODE_SUCCESS) { + vDebug("vgId:%d, invalid handle in retrieving result, code:0x%08x, QInfo:%p", pVnode->vgId, code, (void *)pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } - + if (pRetrieve->free == 1) { vWarn("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); qKillQuery(*handle); @@ -282,12 +334,18 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) { memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); freeHandle = true; } else { // result is not ready, return immediately + assert(buildRes == true); +#if _NON_BLOCKING_RETRIEVE if (!buildRes) { + assert(pReadMsg->rpcMsg.handle != NULL); + qReleaseQInfo(pVnode->qMgmt, (void **)&handle, false); return TSDB_CODE_QRY_NOT_READY; } +#endif - code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle); + // ahandle is the sqlObj pointer + code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pReadMsg->rpcMsg.ahandle); } // If qhandle is not added into vread queue, the query should be completed already or paused with error. diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 0867c5c437f7c2bac79b0af06eb2a0af8d7f30ca..ef9916c1b8c5206af0c2e7ce032396f2fecb37fa 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -58,15 +58,6 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { return TSDB_CODE_VND_MSG_NOT_PROCESSED; } - if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { - vDebug("vgId:%d, msgType:%s not processed, no write auth", pVnode->vgId, taosMsg[pHead->msgType]); - return TSDB_CODE_VND_NO_WRITE_AUTH; - } - - // tsdb may be in reset state - if (pVnode->tsdb == NULL) return TSDB_CODE_APP_NOT_READY; - if (pVnode->status == TAOS_VN_STATUS_CLOSING) return TSDB_CODE_APP_NOT_READY; - if (pHead->version == 0) { // from client or CQ if (pVnode->status != TAOS_VN_STATUS_READY) { vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[pHead->msgType], @@ -107,6 +98,28 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { return syncCode; } +int32_t vnodeCheckWrite(void *param) { + SVnodeObj *pVnode = param; + if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { + vDebug("vgId:%d, no write auth, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + return TSDB_CODE_VND_NO_WRITE_AUTH; + } + + // tsdb may be in reset state + if (pVnode->tsdb == NULL) { + vDebug("vgId:%d, tsdb is null, recCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + return TSDB_CODE_APP_NOT_READY; + } + + if (pVnode->status == TAOS_VN_STATUS_CLOSING) { + vDebug("vgId:%d, vnode status is %s, recCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], + pVnode->refCount, pVnode); + return TSDB_CODE_APP_NOT_READY; + } + + return TSDB_CODE_SUCCESS; +} + void vnodeConfirmForward(void *param, uint64_t version, int32_t code) { SVnodeObj *pVnode = (SVnodeObj *)param; syncConfirmForward(pVnode->sync, version, code); @@ -168,7 +181,7 @@ static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet int32_t code = TSDB_CODE_SUCCESS; vDebug("vgId:%d, table:%s, start to drop", pVnode->vgId, pTable->tableId); - STableId tableId = {.uid = htobe64(pTable->uid), .tid = htonl(pTable->sid)}; + STableId tableId = {.uid = htobe64(pTable->uid), .tid = htonl(pTable->tid)}; if (tsdbDropTable(pVnode->tsdb, tableId) < 0) code = terrno; @@ -219,7 +232,7 @@ int vnodeWriteCqMsgToQueue(void *param, void *data, int type) { memcpy(pWal, pHead, size); atomic_add_fetch_32(&pVnode->refCount, 1); - vDebug("CQ: vgId:%d, get vnode wqueue, refCount:%d", pVnode->vgId, pVnode->refCount); + vTrace("CQ: vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); taosWriteQitem(pVnode->wqueue, type, pSync); @@ -236,7 +249,7 @@ int vnodeWriteToQueue(void *param, void *data, int type) { memcpy(pWal, pHead, size); atomic_add_fetch_32(&pVnode->refCount, 1); - vDebug("vgId:%d, get vnode wqueue, refCount:%d", pVnode->vgId, pVnode->refCount); + vTrace("vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); taosWriteQitem(pVnode->wqueue, type, pWal); diff --git a/src/wal/inc/walInt.h b/src/wal/inc/walInt.h new file mode 100644 index 0000000000000000000000000000000000000000..593611589d6e1eb5d90a0eb8986f21bb32d5d1c6 --- /dev/null +++ b/src/wal/inc/walInt.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_WAL_INT_H +#define TDENGINE_WAL_INT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tlog.h" + +extern int32_t wDebugFlag; + +#define wFatal(...) { if (wDebugFlag & DEBUG_FATAL) { taosPrintLog("WAL FATAL ", 255, __VA_ARGS__); }} +#define wError(...) { if (wDebugFlag & DEBUG_ERROR) { taosPrintLog("WAL ERROR ", 255, __VA_ARGS__); }} +#define wWarn(...) { if (wDebugFlag & DEBUG_WARN) { taosPrintLog("WAL WARN ", 255, __VA_ARGS__); }} +#define wInfo(...) { if (wDebugFlag & DEBUG_INFO) { taosPrintLog("WAL ", 255, __VA_ARGS__); }} +#define wDebug(...) { if (wDebugFlag & DEBUG_DEBUG) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} +#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} + +#define walPrefix "wal" +#define walSignature (uint32_t)(0xFAFBFDFE) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/wal/inc/walMgmt.h b/src/wal/inc/walMgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..a23c7f8ec3d0064de23c03a62a81388b63b0f319 --- /dev/null +++ b/src/wal/inc/walMgmt.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_WAL_MGMT_H +#define TDENGINE_WAL_MGMT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..2dd094d860528fa95084b7d3a0f404007cb757ed --- /dev/null +++ b/src/wal/src/walMgmt.c @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "twal.h" +#include "walInt.h" +#include "walMgmt.h" \ No newline at end of file diff --git a/src/wal/src/walMain.c b/src/wal/src/walWrite.c similarity index 89% rename from src/wal/src/walMain.c rename to src/wal/src/walWrite.c index d7fd1b84c97b94730971277a699c40e3252c335f..95587caa141a8964a845a9a0b47485bfc20fa998 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walWrite.c @@ -14,11 +14,10 @@ */ #define _DEFAULT_SOURCE - -#define TAOS_RANDOM_FILE_FAIL_TEST - #include "os.h" -#include "tlog.h" +#include "twal.h" +#include "walInt.h" +#include "walMgmt.h" #include "tchecksum.h" #include "tutil.h" #include "ttimer.h" @@ -26,14 +25,6 @@ #include "twal.h" #include "tqueue.h" -#define walPrefix "wal" - -#define wFatal(...) { if (wDebugFlag & DEBUG_FATAL) { taosPrintLog("WAL FATAL ", 255, __VA_ARGS__); }} -#define wError(...) { if (wDebugFlag & DEBUG_ERROR) { taosPrintLog("WAL ERROR ", 255, __VA_ARGS__); }} -#define wWarn(...) { if (wDebugFlag & DEBUG_WARN) { taosPrintLog("WAL WARN ", 255, __VA_ARGS__); }} -#define wInfo(...) { if (wDebugFlag & DEBUG_INFO) { taosPrintLog("WAL ", 255, __VA_ARGS__); }} -#define wDebug(...) { if (wDebugFlag & DEBUG_DEBUG) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} -#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", wDebugFlag, __VA_ARGS__); }} typedef struct { uint64_t version; @@ -54,12 +45,12 @@ typedef struct { static void *walTmrCtrl = NULL; static int tsWalNum = 0; static pthread_once_t walModuleInit = PTHREAD_ONCE_INIT; -static uint32_t walSignature = 0xFAFBFDFE; static int walHandleExistingFiles(const char *path); static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp); static int walRemoveWalFiles(const char *path); static void walProcessFsyncTimer(void *param, void *tmrId); static void walRelease(SWal *pWal); +static int walGetMaxOldFileId(char *odir); static void walModuleInitFunc() { walTmrCtrl = taosTmrInit(1000, 100, 300000, "WAL"); @@ -249,11 +240,13 @@ int walWrite(void *handle, SWalHead *pHead) { if (taosTWrite(pWal->fd, pHead, contLen) != contLen) { wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); + return terrno; } else { pWal->version = pHead->version; } + ASSERT(contLen == pHead->len + sizeof(SWalHead)); - return terrno; + return 0; } void walFsync(void *handle) { @@ -312,7 +305,7 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) for (index = minId; index <= maxId; ++index) { snprintf(pWal->name, sizeof(pWal->name), "%s/%s%d", opath, walPrefix, index); terrno = walRestoreWalFile(pWal, pVnode, writeFp); - if (terrno < 0) break; + if (terrno < 0) continue; } } @@ -423,7 +416,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wWarn("wal:%s, cksum is messed up, skip the rest of file", name); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - // ASSERT(false); + ASSERT(false); break; } @@ -476,31 +469,26 @@ int walHandleExistingFiles(const char *path) { int plen = strlen(walPrefix); terrno = 0; - if (access(opath, F_OK) == 0) { - // old directory is there, it means restore process is not finished - walRemoveWalFiles(path); - - } else { - // move all files to old directory - int count = 0; - while ((ent = readdir(dir)) != NULL) { - if (strncmp(ent->d_name, walPrefix, plen) == 0) { - snprintf(oname, sizeof(oname), "%s/%s", path, ent->d_name); - snprintf(nname, sizeof(nname), "%s/old/%s", path, ent->d_name); - if (taosMkDir(opath, 0755) != 0) { - wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } - - if (rename(oname, nname) < 0) { - wError("wal:%s, failed to move to new:%s", oname, nname); - terrno = TAOS_SYSTEM_ERROR(errno); - break; - } + int midx = walGetMaxOldFileId(opath); + int count = 0; + while ((ent = readdir(dir)) != NULL) { + if (strncmp(ent->d_name, walPrefix, plen) == 0) { + midx++; + snprintf(oname, sizeof(oname), "%s/%s", path, ent->d_name); + snprintf(nname, sizeof(nname), "%s/old/wal%d", path, midx); + if (taosMkDir(opath, 0755) != 0) { + wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + break; + } - count++; + if (rename(oname, nname) < 0) { + wError("wal:%s, failed to move to new:%s", oname, nname); + terrno = TAOS_SYSTEM_ERROR(errno); + break; } + + count++; } wDebug("wal:%s, %d files are moved for restoration", path, count); @@ -563,4 +551,30 @@ int64_t walGetVersion(twalh param) { if (pWal == 0) return 0; return pWal->version; +} + +static int walGetMaxOldFileId(char *odir) { + int midx = 0; + DIR * dir = NULL; + struct dirent *dp = NULL; + int plen = strlen(walPrefix); + + if (access(odir, F_OK) != 0) return midx; + + dir = opendir(odir); + if (dir == NULL) { + wError("failed to open directory %s since %s", odir, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + while ((dp = readdir(dir)) != NULL) { + if (strncmp(dp->d_name, walPrefix, plen) == 0) { + int idx = atol(dp->d_name + plen); + if (midx < idx) midx = idx; + } + } + + closedir(dir); + return midx; } \ No newline at end of file diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c index 237403f52565c1b68b21e895d5555a2a673bb5f7..1298aa8323b0f876e3c64ff88323031b8d529648 100644 --- a/tests/comparisonTest/tdengine/tdengineTest.c +++ b/tests/comparisonTest/tdengine/tdengineTest.c @@ -108,9 +108,9 @@ void parseArg(int argc, char *argv[]) { } } -void taos_error(TAOS *con) { - printf("TDengine error: %s\n", taos_errstr(con)); - taos_close(con); +static void taos_error(TAOS_RES *tres, TAOS *conn) { + printf("TDengine error: %s\n", tres?taos_errstr(tres):"null result"); + taos_close(conn); exit(1); } @@ -125,13 +125,17 @@ void writeDataImp(void *param) { printf("Thread %d, writing sID %d, eID %d\n", pThread->threadId, pThread->sID, pThread->eID); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); - if (taos == NULL) - taos_error(taos); + if (taos == NULL) { + // where to find errstr? + // taos_error(NULL, taos); + printf("TDengine error: %s\n", "failed to connect"); + exit(1); + } TAOS_RES* result = taos_query(taos, "use db"); int32_t code = taos_errno(result); if (code != 0) { - taos_error(taos); + taos_error(result, taos); } taos_free_result(result); @@ -227,12 +231,17 @@ void writeData() { taos_init(); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); - if (taos == NULL) taos_error(taos); + if (taos == NULL) { + // where to find errstr? + // taos_error(NULL, taos); + printf("TDengine error: %s\n", "failed to connect"); + exit(1); + } TAOS_RES *result = taos_query(taos, "create database if not exists db"); int32_t code = taos_errno(result); if (code != 0) { - taos_error(taos); + taos_error(result, taos); } taos_free_result(result); @@ -241,7 +250,7 @@ void writeData() { "tags(devid int, devname binary(16), devgroup int)"); code = taos_errno(result); if (code != 0) { - taos_error(taos); + taos_error(result, taos); } taos_free_result(result); @@ -293,8 +302,12 @@ void readDataImp(void *param) printf("open file %s success\n", arguments.sql); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); - if (taos == NULL) - taos_error(taos); + if (taos == NULL) { + // where to find errstr? + // taos_error(NULL, taos); + printf("TDengine error: %s\n", "failed to connect"); + exit(1); + } char *line = NULL; size_t len = 0; @@ -313,7 +326,7 @@ void readDataImp(void *param) TAOS_RES *result = taos_query(taos, line); int32_t code = taos_errno(result); if (code != 0) { - taos_error(taos); + taos_error(result, taos); } TAOS_ROW row; @@ -343,8 +356,12 @@ void readData() { printf("---- clients: %d\n", arguments.clients); void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); - if (taos == NULL) - taos_error(taos); + if (taos == NULL) { + // where to find errstr? + // taos_error(NULL, taos); + printf("TDengine error: %s\n", "failed to connect"); + exit(1); + } ThreadObj *threads = calloc((size_t)arguments.clients, sizeof(ThreadObj)); diff --git a/tests/examples/C#/C#checker/C#checker.cs b/tests/examples/C#/C#checker/C#checker.cs new file mode 100644 index 0000000000000000000000000000000000000000..24b7060b14862e220b9b08a362e27cd65ae4eb7d --- /dev/null +++ b/tests/examples/C#/C#checker/C#checker.cs @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; + +namespace TDengineDriver +{ + class TDengineTest + { + //connect parameters + private string host; + private string configDir; + private string user; + private string password; + private short port = 0; + + //sql parameters + private string dbName; + private string tbName; + + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineTest tester = new TDengineTest(); + tester.ReadArgument(args); + + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.createDatabase(); + tester.useDatabase(); + tester.checkDropTable(); + tester.createTable(); + tester.checkInsert(); + tester.checkSelect(); + tester.checkDropTable(); + + tester.CloseConnection(); + + + + } + + public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + + long tmpVal = Convert.ToInt64(tmp); + if (tmpVal < minVal || tmpVal > maxVal) + { + Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); + ExitProgram(); + } + + return tmpVal; + } + } + + return defaultValue; + } + + public String GetArgumentAsString(String[] argv, String argName, String defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + return tmp; + } + } + + return defaultValue; + } + + public void PrintHelp(String[] argv) + { + for (int i = 0; i < argv.Length; ++i) + { + if ("--help" == argv[i]) + { + String indent = " "; + Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); + Console.WriteLine("{0:G}{1:G}", indent, "-h"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); + Console.WriteLine("{0:G}{1:G}", indent, "-u"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); + Console.WriteLine("{0:G}{1:G}", indent, "-p"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); + Console.WriteLine("{0:G}{1:G}", indent, "-d"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); + Console.WriteLine("{0:G}{1:G}", indent, "-s"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); + Console.WriteLine("{0:G}{1:G}", indent, "-t"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); + Console.WriteLine("{0:G}{1:G}", indent, "-w"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); + Console.WriteLine("{0:G}{1:G}", indent, "-r"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); + Console.WriteLine("{0:G}{1:G}", indent, "-n"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-b"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-i"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); + Console.WriteLine("{0:G}{1:G}", indent, "-c"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); + + ExitProgram(); + } + } + } + + public void ReadArgument(String[] argv) + { + PrintHelp(argv); + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); + user = this.GetArgumentAsString(argv, "-u", "root"); + password = this.GetArgumentAsString(argv, "-p", "taosdata"); + dbName = this.GetArgumentAsString(argv, "-db", "test"); + tbName = this.GetArgumentAsString(argv, "-s", "weather"); + + isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; + isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; + tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); + batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000, 500); + totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); + configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + Console.WriteLine("get connection starting..."); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("connection failed: " + this.host); + ExitProgram(); + } + else + { + Console.WriteLine("[ OK ] Connection established."); + } + } + public void createDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("create database if not exists ").Append(this.dbName); + execute(sql.ToString()); + } + public void useDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("use ").Append(this.dbName); + execute(sql.ToString()); + } + public void checkSelect() + { + StringBuilder sql = new StringBuilder(); + sql.Append("select * from test.weather"); + execute(sql.ToString()); + } + public void createTable() + { + StringBuilder sql = new StringBuilder(); + sql.Append("create table if not exists ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts timestamp, temperature float, humidity int)"); + execute(sql.ToString()); + } + public void checkInsert() + { + StringBuilder sql = new StringBuilder(); + sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"); + execute(sql.ToString()); + } + public void checkDropTable() + { + StringBuilder sql = new StringBuilder(); + sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append(""); + execute(sql.ToString()); + } + public void execute(string sql) + { + DateTime dt1 = DateTime.Now; + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + + if (res != IntPtr.Zero) + { + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + } + else + { + Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + TDengine.FreeResult(res); + } + + public void ExecuteQuery(string sql) + { + + DateTime dt1 = DateTime.Now; + long queryRows = 0; + IntPtr res = TDengine.Query(conn, sql); + if (res == IntPtr.Zero) + { + Console.WriteLine(sql + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + int fieldCount = TDengine.FieldCount(res); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + + TDengine.FreeResult(res); + + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + Console.WriteLine("connection closed."); + } + } + + static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + } +} diff --git a/tests/examples/C#/C#checker/TDengineDriver.cs b/tests/examples/C#/C#checker/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..b6f143e1813d60c1ac4ae8356efdca4929c51345 --- /dev/null +++ b/tests/examples/C#/C#checker/TDengineDriver.cs @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md new file mode 100644 index 0000000000000000000000000000000000000000..1c0e4c100b1b7fb9e1ff85c3051c6911d8078abe --- /dev/null +++ b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -0,0 +1,268 @@ +# 如何在 windows环境下使用jdbc进行TDengine应用开发 + +本文以windows环境为例,介绍java如何进行TDengine开发应用 + +## 环境准备 + +(1)安装jdk + +官网下载jdk-1.8,下载页面:https://www.oracle.com/java/technologies/javase/javase-jdk8-downloads.html + +安装,配置环境变量,把jdk加入到环境变量里。 + +命令行内查看java的版本。 + +```shell +>java -version +java version "1.8.0_131" +Java(TM) SE Runtime Environment (build 1.8.0_131-b11) +Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode) +``` + + +(2)安装配置maven + +官网下载maven,下载地址:http://maven.apache.org/download.cgi + +配置环境变量MAVEN_HOME,将MAVEN_HOME/bin添加到PATH + +命令行里查看maven的版本 + +```shell +>mvn --version +Apache Maven 3.5.0 (ff8f5e7444045639af65f6095c62210b5713f426; 2017-04-04T03:39:06+08:00) +Maven home: D:\apache-maven-3.5.0\bin\.. +Java version: 1.8.0_131, vendor: Oracle Corporation +Java home: C:\Program Files\Java\jdk1.8.0_131\jre +Default locale: zh_CN, platform encoding: GBK +OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows" +``` + +为了加快maven下载依赖的速度,可以为maven配置mirror,修改MAVEN_HOME\config\settings.xml文件 + +```xml + + + D:\apache-maven-localRepository + + + + + alimaven + aliyun maven + http://maven.aliyun.com/nexus/content/groups/public/ + central + + + + + + + jdk-1.8 + + true + 1.8 + + + 1.8 + 1.8 + 1.8 + + + + +``` + + + +(3)在linux服务器上安装TDengine-server + +在taosdata官网下载TDengine-server,下载地址:https://www.taosdata.com/cn/all-downloads/ + +在linux服务器上安装TDengine-server + +```shell +# tar -zxvf package/TDengine-server-2.0.1.1-Linux-x64.tar.gz +# cd TDengine-server/ +# ./install.sh +``` + +启动taosd + +```shell +# systemctl start taosd +``` + +在server上用taos连接taosd + +```shell +# taos +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | +================================================================================================================== + 1 | td01:6030 | 2 | 4 | ready | any | 2020-08-19 18:40:25.045 | +Query OK, 1 row(s) in set (0.005765s) +``` + +如果可以正确连接到taosd实例,并打印出databases的信息,说明TDengine的server已经正确启动。这里查看server的hostname + +```shell +# hostname -f +td01 +``` + +注意,如果安装TDengine后,使用默认的taos.cfg配置文件,taosd会使用当前server的hostname创建dnode实例。之后,在client也需要使用这个hostname来连接taosd。 + + + +(4)在windows上安装TDengine-client + +在taosdata官网下载taos客户端,下载地址: +https://www.taosdata.com/cn/all-downloads/ +下载后,双击exe安装。 + +修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中 + +``` +192.168.236.136 td01 +``` + +配置完成后,在命令行内使用taos shell连接server端 + +```shell +C:\TDengine>taos +Welcome to the TDengine shell from Linux, Client Version:2.0.1.1 +Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status | +=================================================================================================================================================================================================================================================================== + test | 2020-08-19 18:43:50.731 | 1 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready | + log | 2020-08-19 18:40:28.064 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready | +Query OK, 2 row(s) in set (0.068000s) +``` + +如果windows上的client能够正常连接,并打印database信息,说明client可以正常连接server了。 + + + +## 应用开发 + +(1)新建maven工程,在pom.xml中引入taos-jdbcdriver依赖。 + +```xml + + + 4.0.0 + + com.taosdata.demo + JdbcDemo + 1.0-SNAPSHOT + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.8 + + + +``` + +(2)使用jdbc查询TDengine数据库 + +下面是示例代码: + +```java +public class JdbcDemo { + + public static void main(String[] args) throws Exception { + Connection conn = getConn(); + Statement stmt = conn.createStatement(); + // create database + stmt.executeUpdate("create database if not exists db"); + // use database + stmt.executeUpdate("use db"); + // create table + stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); + // insert data + int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + System.out.println("insert " + affectedRows + " rows."); + // query data + ResultSet resultSet = stmt.executeQuery("select * from tb"); + Timestamp ts = null; + int temperature = 0; + float humidity = 0; + while(resultSet.next()){ + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); + } + } + + public static Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://td01:0/log?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; + } + +} +``` + +(3)测试jdbc访问tdengine的sever实例 + +console输出: + +``` +insert 2 rows. +2020-08-26 00:06:34.575, 23, 10.3 +2020-08-26 00:06:35.575, 20, 9.3 +``` + + + +## 指南 + +(1)如何设置主机名和hosts + +在server上查看hostname和fqdn +```shell +查看hostname +# hostname +taos-server + +查看fqdn +# hostname -f +taos-server +``` + +windows下hosts文件位于: +C:\\Windows\System32\drivers\etc\hosts +修改hosts文件,添加server的ip和hostname + +```s +192.168.56.101 node5 +``` + +(2)什么是fqdn? + + +> 什么是FQDN? +> +> FQDN(Full qualified domain name)全限定域名,fqdn由2部分组成:hostname+domainname。 +> +> 例如,一个邮件服务器的fqdn可能是:mymail.somecollege.edu,其中mymail是hostname(主机名),somcollege.edu是domainname(域名)。本例中,.edu是顶级域名,.somecollege是二级域名。 +> +> 当连接服务器时,必须指定fqdn,然后,dns服务器通过查看dns表,将hostname解析为相应的ip地址。如果只指定hostname(不指定domainname),应用程序可能服务解析主机名。因为如果你试图访问不在本地的远程服务器时,本地的dns服务器和可能没有远程服务器的hostname列表。 +> +> 参考:https://kb.iu.edu/d/aiuv diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 21130746741209b1b9e89d00a73c7e52e496b519..121a3b5cd63579b47fb166c7365090f06d05c0da 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -5,18 +5,11 @@ 4.0.0 com.taosdata.jdbc - jdbcdemo - 1.0-SNAPSHOT + jdbcChecker + SNAPSHOT jar - - - org.apache.maven.plugins - maven-plugins - 30 - - org.apache.maven.plugins maven-assembly-plugin @@ -30,7 +23,7 @@ - TSDBSyncSample + com.taosdata.example.JdbcChecker @@ -63,12 +56,18 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.4 + 2.0.8 log4j log4j 1.2.17 + + junit + junit + 4.13.1 + test + diff --git a/tests/examples/JDBC/JDBCDemo/readme.md b/tests/examples/JDBC/JDBCDemo/readme.md index a91624a9e47a3015d88e2e9aa9f62cf8dd0672cc..9b8790adaddb20246232392dd323ec502102fa18 100644 --- a/tests/examples/JDBC/JDBCDemo/readme.md +++ b/tests/examples/JDBC/JDBCDemo/readme.md @@ -2,12 +2,14 @@ TDengine's JDBC demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
    sudo apt-get install maven
    -## Compile and Install JDBC Driver -TDengine's JDBC driver jar is not yet published to maven center repo, so we need to manually compile it and install it to the local Maven repository. This can be easily done with Maven. Go to source directory of the JDBC driver ``TDengine/src/connector/jdbc`` and execute -
    mvn clean package install
    +## Install TDengine Client +Make sure you have already installed a tdengine client on your current develop environment. +Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client. ## Compile the Demo Code and Run It To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute -
    mvn clean assembly:single package
    -The ``pom.xml`` is configured to package all the dependencies into one executable jar file. To run it, go to ``examples/JDBC/JDBCDemo/target`` and execute -
    java -jar jdbcdemo-1.0-SNAPSHOT-jar-with-dependencies.jar
    +
    mvn clean package assembly:single
    +The ``pom.xml`` is configured to package all the dependencies into one executable jar file. + +To run it, go to ``examples/JDBC/JDBCDemo/target`` and execute +
    java -jar jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host localhost
    \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java b/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java deleted file mode 100644 index c093b604da6dc6f815272f99d7fc786dab87928b..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java +++ /dev/null @@ -1,205 +0,0 @@ -import java.sql.*; - -public class TSDBSyncSample { - private static final String JDBC_PROTOCAL = "jdbc:TAOS://"; - private static final String TSDB_DRIVER = "com.taosdata.jdbc.TSDBDriver"; - - private String host = "127.0.0.1"; - private String user = "root"; - private String password = "taosdata"; - private int port = 0; - private String jdbcUrl = ""; - - private String databaseName = "db"; - private String metricsName = "mt"; - private String tablePrefix = "t"; - - private int tablesCount = 1; - private int loopCount = 2; - private int batchSize = 10; - private long beginTimestamp = 1519833600000L; - - private long rowsInserted = 0; - - static { - try { - Class.forName(TSDB_DRIVER); - } catch (Exception e) { - e.printStackTrace(); - } - } - - /** - * @param args - */ - public static void main(String[] args) { - TSDBSyncSample tester = new TSDBSyncSample(); - tester.doReadArgument(args); - - System.out.println("---------------------------------------------------------------"); - System.out.println("Start testing..."); - System.out.println("---------------------------------------------------------------"); - - tester.doMakeJdbcUrl(); - tester.doCreateDbAndTable(); - tester.doExecuteInsert(); - tester.doExecuteQuery(); - - System.out.println("\n---------------------------------------------------------------"); - System.out.println("Stop testing..."); - System.out.println("---------------------------------------------------------------"); - } - - private void doReadArgument(String[] args) { - System.out.println("Arguments format: host tables loop batchs"); - if (args.length >= 1) { - this.host = args[0]; - } - - if (args.length >= 2) { - this.tablesCount = Integer.parseInt(args[1]); - } - - if (args.length >= 3) { - this.loopCount = Integer.parseInt(args[2]); - } - - if (args.length >= 4) { - this.batchSize = Integer.parseInt(args[3]); - } - } - - private void doMakeJdbcUrl() { - // jdbc:TSDB://127.0.0.1:0/dbname?user=root&password=taosdata - System.out.println("\nJDBC URL to use:"); - this.jdbcUrl = String.format("%s%s:%d/%s?user=%s&password=%s", JDBC_PROTOCAL, this.host, this.port, "", - this.user, this.password); - System.out.println(this.jdbcUrl); - } - - private void doCreateDbAndTable() { - System.out.println("\n---------------------------------------------------------------"); - System.out.println("Start creating databases and tables..."); - String sql = ""; - try (Connection conn = DriverManager.getConnection(jdbcUrl); - Statement stmt = conn.createStatement()){ - - sql = "create database if not exists " + this.databaseName; - stmt.executeUpdate(sql); - System.out.printf("Successfully executed: %s\n", sql); - - sql = "use " + this.databaseName; - stmt.executeUpdate(sql); - System.out.printf("Successfully executed: %s\n", sql); - - sql = "create table if not exists " + this.metricsName + " (ts timestamp, v1 int) tags(t1 int)"; - stmt.executeUpdate(sql); - System.out.printf("Successfully executed: %s\n", sql); - - for (int i = 0; i < this.tablesCount; i++) { - sql = String.format("create table if not exists %s%d using %s tags(%d)", this.tablePrefix, i, - this.metricsName, i); - stmt.executeUpdate(sql); - System.out.printf("Successfully executed: %s\n", sql); - } - } catch (SQLException e) { - e.printStackTrace(); - System.out.printf("Failed to execute SQL: %s\n", sql); - System.exit(4); - } catch (Exception e) { - e.printStackTrace(); - System.exit(4); - } - System.out.println("Successfully created databases and tables"); - } - - public void doExecuteInsert() { - System.out.println("\n---------------------------------------------------------------"); - System.out.println("Start inserting data..."); - int start = (int) System.currentTimeMillis(); - StringBuilder sql = new StringBuilder(""); - try (Connection conn = DriverManager.getConnection(jdbcUrl); - Statement stmt = conn.createStatement()){ - stmt.executeUpdate("use " + databaseName); - for (int loop = 0; loop < this.loopCount; loop++) { - for (int table = 0; table < this.tablesCount; ++table) { - sql = new StringBuilder("insert into "); - sql.append(this.tablePrefix).append(table).append(" values"); - for (int batch = 0; batch < this.batchSize; ++batch) { - int rows = loop * this.batchSize + batch; - sql.append("(").append(this.beginTimestamp + rows).append(",").append(rows).append(")"); - } - int affectRows = stmt.executeUpdate(sql.toString()); - this.rowsInserted += affectRows; - } - } - } catch (SQLException e) { - e.printStackTrace(); - System.out.printf("Failed to execute SQL: %s\n", sql.toString()); - System.exit(4); - } catch (Exception e) { - e.printStackTrace(); - System.exit(4); - } - int end = (int) System.currentTimeMillis(); - System.out.println("Inserting completed!"); - System.out.printf("Total %d rows inserted, %d rows failed, time spend %d seconds.\n", this.rowsInserted, - this.loopCount * this.batchSize - this.rowsInserted, (end - start) / 1000); - } - - public void doExecuteQuery() { - System.out.println("\n---------------------------------------------------------------"); - System.out.println("Starting querying data..."); - ResultSet resSet = null; - StringBuilder sql = new StringBuilder(""); - StringBuilder resRow = new StringBuilder(""); - try (Connection conn = DriverManager.getConnection(jdbcUrl); - Statement stmt = conn.createStatement()){ - stmt.executeUpdate("use " + databaseName); - for (int i = 0; i < this.tablesCount; ++i) { - sql = new StringBuilder("select * from ").append(this.tablePrefix).append(i); - - resSet = stmt.executeQuery(sql.toString()); - if (resSet == null) { - System.out.println(sql + " failed"); - System.exit(4); - } - - ResultSetMetaData metaData = resSet.getMetaData(); - System.out.println("Retrieve metadata of " + tablePrefix + i); - for (int column = 1; column <= metaData.getColumnCount(); ++column) { - System.out.printf("Column%d: name = %s, type = %d, type name = %s, display size = %d\n", column, metaData.getColumnName(column), metaData.getColumnType(column), - metaData.getColumnTypeName(column), metaData.getColumnDisplaySize(column)); - } - int rows = 0; - System.out.println("Retrieve data of " + tablePrefix + i); - while (resSet.next()) { - resRow = new StringBuilder(); - for (int col = 1; col <= metaData.getColumnCount(); col++) { - resRow.append(metaData.getColumnName(col)).append("=").append(resSet.getObject(col)) - .append(" "); - } - System.out.println(resRow.toString()); - rows++; - } - - try { - if (resSet != null) - resSet.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - System.out.printf("Successfully executed query: %s;\nTotal rows returned: %d\n", sql.toString(), rows); - } - } catch (SQLException e) { - e.printStackTrace(); - System.out.printf("Failed to execute query: %s\n", sql.toString()); - System.exit(4); - } catch (Exception e) { - e.printStackTrace(); - System.exit(4); - } - System.out.println("Query completed!"); - } - -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java similarity index 98% rename from tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java rename to tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java index 74e586d7fdf1a0f8ad65a807134caae7e05f6d4a..4be71c52214c348ed7b41c3e763de0d908514907 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCConnectorChecker.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java @@ -5,7 +5,7 @@ import com.taosdata.jdbc.TSDBDriver; import java.sql.*; import java.util.Properties; -public class JDBCConnectorChecker { +public class JdbcChecker { private static String host; private static String dbName = "test"; private static String tbName = "weather"; @@ -120,6 +120,7 @@ public class JDBCConnectorChecker { printSql(sql, execute, (end - start)); } catch (SQLException e) { e.printStackTrace(); + } } @@ -157,7 +158,7 @@ public class JDBCConnectorChecker { return; } - JDBCConnectorChecker checker = new JDBCConnectorChecker(); + JdbcChecker checker = new JdbcChecker(); checker.init(); checker.createDatabase(); checker.useDatabase(); diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java index c30d85a084d7175e9e6861ad33d7374a868553d9..259985ec9f4708b9317575fd97919adcc82d7161 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java @@ -25,6 +25,7 @@ public class JdbcTaosdemo { } public static void main(String[] args) { + // parse config from args JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); boolean isHelp = Arrays.asList(args).contains("--help"); @@ -38,27 +39,51 @@ public class JdbcTaosdemo { } JdbcTaosdemo taosdemo = new JdbcTaosdemo(config); + // establish connection taosdemo.init(); + // drop database taosdemo.dropDatabase(); + // create database taosdemo.createDatabase(); + // use db taosdemo.useDatabase(); + // create super table taosdemo.createSuperTable(); + // create sub tables taosdemo.createTableMultiThreads(); boolean infinite = Arrays.asList(args).contains("--infinite"); if (infinite) { - logger.info("!!! Infinite Insert Mode Started. !!!!"); + logger.info("!!! Infinite Insert Mode Started. !!!"); taosdemo.insertInfinite(); } else { + // insert into table taosdemo.insertMultiThreads(); - // single table select + // select from sub table taosdemo.selectFromTableLimit(); taosdemo.selectCountFromTable(); taosdemo.selectAvgMinMaxFromTable(); - // super table select + // select last from + taosdemo.selectLastFromTable(); + // select from super table taosdemo.selectFromSuperTableLimit(); taosdemo.selectCountFromSuperTable(); taosdemo.selectAvgMinMaxFromSuperTable(); + //select avg ,max from stb where tag + taosdemo.selectAvgMinMaxFromSuperTableWhereTag(); + //select last from stb where location = '' + taosdemo.selectLastFromSuperTableWhere(); + // select group by + taosdemo.selectGroupBy(); + // select like + taosdemo.selectLike(); + // select where ts >= ts<= + taosdemo.selectLastOneHour(); + taosdemo.selectLastOneDay(); + taosdemo.selectLastOneWeek(); + taosdemo.selectLastOneMonth(); + taosdemo.selectLastOneYear(); + // drop super table if (config.isDeleteTable()) taosdemo.dropSuperTable(); @@ -196,6 +221,11 @@ public class JdbcTaosdemo { executeQuery(sql); } + private void selectLastFromTable() { + String sql = SqlSpeller.selectLastFromTableSQL(config.getDbName(), config.getTbPrefix(), 1); + executeQuery(sql); + } + private void selectFromSuperTableLimit() { String sql = SqlSpeller.selectFromSuperTableLimitSQL(config.getDbName(), config.getStbName(), 10, 0); executeQuery(sql); @@ -211,6 +241,52 @@ public class JdbcTaosdemo { executeQuery(sql); } + private void selectAvgMinMaxFromSuperTableWhereTag() { + String sql = SqlSpeller.selectAvgMinMaxFromSuperTableWhere("current", config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastFromSuperTableWhere() { + String sql = SqlSpeller.selectLastFromSuperTableWhere("current", config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectGroupBy() { + String sql = SqlSpeller.selectGroupBy("current", config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLike() { + String sql = SqlSpeller.selectLike(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastOneHour() { + String sql = SqlSpeller.selectLastOneHour(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastOneDay() { + String sql = SqlSpeller.selectLastOneDay(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastOneWeek() { + String sql = SqlSpeller.selectLastOneWeek(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastOneMonth() { + String sql = SqlSpeller.selectLastOneMonth(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void selectLastOneYear() { + String sql = SqlSpeller.selectLastOneYear(config.getDbName(), config.getStbName()); + executeQuery(sql); + } + + private void close() { try { if (connection != null) { @@ -241,6 +317,7 @@ public class JdbcTaosdemo { long end = System.currentTimeMillis(); printSql(sql, execute, (end - start)); } catch (SQLException e) { + logger.error("ERROR execute SQL ===> " + sql); logger.error(e.getMessage()); e.printStackTrace(); } @@ -258,6 +335,7 @@ public class JdbcTaosdemo { printSql(sql, true, (end - start)); printResult(resultSet); } catch (SQLException e) { + logger.error("ERROR execute SQL ===> " + sql); logger.error(e.getMessage()); e.printStackTrace(); } diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java index 3cca9a3d7a8b45be5d733b9f7a4836eb89c828c3..82613037dbccd3be1f2c8a85a2f25e7a25ffad01 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java @@ -14,9 +14,9 @@ public final class JdbcTaosdemoConfig { //Destination database. Default is 'test' private String dbName = "test"; //keep - private int keep = 365 * 20; + private int keep = 3650; //days - private int days = 30; + private int days = 10; //Super table Name. Default is 'meters' private String stbName = "meters"; @@ -35,7 +35,7 @@ public final class JdbcTaosdemoConfig { private boolean deleteTable = false; public static void printHelp() { - System.out.println("Usage: java -jar JDBCConnectorChecker.jar [OPTION...]"); + System.out.println("Usage: java -jar JdbcTaosDemo.jar [OPTION...]"); System.out.println("-h host The host to connect to TDengine. you must input one"); System.out.println("-p port The TCP/IP port number to use for the connection. Default is 6030"); System.out.println("-u user The TDengine user name to use when connecting to the server. Default is 'root'"); diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java index d6d6ebbff1ac08e68e3e8034a59f84189ad86bf4..a35628bb58c6630d92bd2b6aebb09f9912e57536 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java @@ -3,44 +3,49 @@ package com.taosdata.example.jdbcTaosdemo.task; import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory; import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller; -import com.taosdata.example.jdbcTaosdemo.utils.TimeStampUtil; import org.apache.log4j.Logger; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; -import java.util.concurrent.atomic.AtomicLong; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; public class InsertTableTask implements Runnable { private static final Logger logger = Logger.getLogger(InsertTableTask.class); - private static AtomicLong beginTimestamp = new AtomicLong(TimeStampUtil.datetimeToLong("2005-01-01 00:00:00.000")); private final JdbcTaosdemoConfig config; - private final int startIndex; + private final int startTbIndex; private final int tableNumber; - private final int recordsNumber; + private final int recordsNumberPerTable; - public InsertTableTask(JdbcTaosdemoConfig config, int startIndex, int tableNumber, int recordsNumber) { + public InsertTableTask(JdbcTaosdemoConfig config, int startTbIndex, int tableNumber, int recordsNumberPerTable) { this.config = config; - this.startIndex = startIndex; + this.startTbIndex = startTbIndex; this.tableNumber = tableNumber; - this.recordsNumber = recordsNumber; + this.recordsNumberPerTable = recordsNumberPerTable; } @Override public void run() { try { Connection connection = ConnectionFactory.build(config); + int keep = config.getKeep(); + Instant end = Instant.now(); + Instant start = end.minus(Duration.ofDays(keep - 1)); + long timeGap = ChronoUnit.MILLIS.between(start, end) / (recordsNumberPerTable - 1); + // iterate insert - for (int j = 0; j < recordsNumber; j++) { - long ts = beginTimestamp.getAndIncrement(); + for (int j = 0; j < recordsNumberPerTable; j++) { + long ts = start.toEpochMilli() + (j * timeGap); // insert data into echo table - for (int i = startIndex; i < startIndex + tableNumber; i++) { + for (int i = startTbIndex; i < startTbIndex + tableNumber; i++) { String sql = SqlSpeller.insertOneRowSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts); + logger.info(Thread.currentThread().getName() + ">>> " + sql); Statement statement = connection.createStatement(); statement.execute(sql); statement.close(); - logger.info(Thread.currentThread().getName() + ">>> " + sql); } } connection.close(); diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java index 7af97f3b1baa0206f6f29b18a1ae59d2182c5423..b4a79e9eba47cc947d822b645d0ae1f9952f08f0 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java @@ -78,5 +78,49 @@ public class SqlSpeller { return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + stbName + ""; } + public static String selectLastFromTableSQL(String dbName, String tbPrefix, int tbIndex) { + return "select last(*) from " + dbName + "." + tbPrefix + "" + tbIndex; + } + + //select avg ,max from stb where tag + public static String selectAvgMinMaxFromSuperTableWhere(String field, String dbName, String stbName) { + return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + stbName + " where location = '" + locations[random.nextInt(locations.length)] + "'"; + } + + //select last from stb where + public static String selectLastFromSuperTableWhere(String field, String dbName, String stbName) { + return "select last(" + field + ") from " + dbName + "." + stbName + " where location = '" + locations[random.nextInt(locations.length)] + "'"; + } + + public static String selectGroupBy(String field, String dbName, String stbName) { + return "select avg(" + field + ") from " + dbName + "." + stbName + " group by location"; + } + + public static String selectLike(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where location like 'S%'"; + } + + public static String selectLastOneHour(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where ts >= now - 1h"; + } + + public static String selectLastOneDay(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where ts >= now - 1d"; + } + + public static String selectLastOneWeek(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where ts >= now - 1w"; + } + + public static String selectLastOneMonth(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where ts >= now - 1n"; + } + + public static String selectLastOneYear(String dbName, String stbName) { + return "select * from " + dbName + "." + stbName + " where ts >= now - 1y"; + } + // select group by + // select like + // select ts >= ts<= } \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java index d00471f58147f9c66f3747c1f8a1eadbae3a6dab..0a345afdd1e45123d889d7ee198cf8efd201176b 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java @@ -1,8 +1,10 @@ package com.taosdata.example.jdbcTaosdemo.utils; -import java.sql.Date; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; public class TimeStampUtil { private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS"; @@ -21,14 +23,14 @@ public class TimeStampUtil { return sdf.format(new Date(time)); } - public static void main(String[] args) { - final String startTime = "2005-01-01 00:00:00.000"; + public static void main(String[] args) throws ParseException { + +// Instant now = Instant.now(); +// System.out.println(now); +// Instant years20Ago = now.minus(Duration.ofDays(365)); +// System.out.println(years20Ago); - long start = TimeStampUtil.datetimeToLong(startTime); - System.out.println(start); - String datetime = TimeStampUtil.longToDatetime(1519833600000L); - System.out.println(datetime); } diff --git a/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java b/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f370b2ef6eaa708b061ebf4a7f58f3d31f78f999 --- /dev/null +++ b/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java @@ -0,0 +1,52 @@ +package com.taosdata.example.jdbcTaosdemo.utils; + +import org.junit.Test; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.util.Date; + +import static org.junit.Assert.*; + +public class TimeStampUtilTest { + + @Test + public void datetimeToLong() { + final String startTime = "2005-01-01 00:00:00.000"; + long start = TimeStampUtil.datetimeToLong(startTime); + assertEquals(1104508800000l, start); + } + + @Test + public void longToDatetime() { + String datetime = TimeStampUtil.longToDatetime(1510000000000L); + assertEquals("2017-11-07 04:26:40.000", datetime); + } + + @Test + public void getStartDateTime() { + int keep = 365; + + Instant end = Instant.now(); + System.out.println(end.toString()); + System.out.println(end.toEpochMilli()); + + Instant start = end.minus(Duration.ofDays(keep)); + System.out.println(start.toString()); + System.out.println(start.toEpochMilli()); + + int numberOfRecordsPerTable = 10; + long timeGap = ChronoUnit.MILLIS.between(start, end) / (numberOfRecordsPerTable - 1); + System.out.println(timeGap); + + System.out.println("==========================="); + for (int i = 0; i < numberOfRecordsPerTable; i++) { + long ts = start.toEpochMilli() + (i * timeGap); + System.out.println(i + " : " + ts); + } + } +} \ No newline at end of file diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index d64c0de1ce45597e26396979bb693cc0f96873d5..8f8a66a32593bc25d71b554808719ca42f5b32ac 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -38,7 +38,7 @@ int main(int argc, char *argv[]) { taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { - printf("failed to connect to server, reason:%s\n", taos_errstr(taos)); + printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } printf("success to connect to server\n"); @@ -48,7 +48,7 @@ int main(int argc, char *argv[]) { result = taos_query(taos, "create database demo"); if (result == NULL) { - printf("failed to create database, reason:%s\n", taos_errstr(taos)); + printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/); exit(1); } printf("success to create database\n"); @@ -57,7 +57,7 @@ int main(int argc, char *argv[]) { // create table if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) { - printf("failed to create table, reason:%s\n", taos_errstr(taos)); + printf("failed to create table, reason:%s\n", taos_errstr(result)); exit(1); } printf("success to create table\n"); @@ -70,9 +70,19 @@ int main(int argc, char *argv[]) { for (i = 0; i < 10; ++i) { sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello"); printf("qstr: %s\n", qstr); - if (taos_query(taos, qstr)) { - printf("insert row: %i, reason:%s\n", i, taos_errstr(taos)); + + // note: how do you wanna do if taos_query returns non-NULL + // if (taos_query(taos, qstr)) { + // printf("insert row: %i, reason:%s\n", i, taos_errstr(taos)); + // } + TAOS_RES *result = taos_query(taos, qstr); + if (result) { + printf("insert row: %i\n", i); + } else { + printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/); + exit(1); } + //sleep(1); } printf("success to insert rows, total %d rows\n", i); diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go index b42e1e6d703a96bb86454f177a7207577c6d4d4c..14a67b93d3771a0848a270c43266d5c501500664 100644 --- a/tests/examples/go/taosdemo.go +++ b/tests/examples/go/taosdemo.go @@ -107,7 +107,7 @@ func main() { fmt.Scanln() url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" - //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) + //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) // open connect to taos server //db, err := sql.Open(taosDriverName, url) //if err != nil { @@ -115,6 +115,7 @@ func main() { // os.Exit(1) //} //defer db.Close() + rand.Seed(time.Now().Unix()) createDatabase(configPara.dbName, configPara.supTblName) fmt.Printf("======== create database success! ========\n\n") diff --git a/tests/examples/nodejs/README-win.md b/tests/examples/nodejs/README-win.md new file mode 100644 index 0000000000000000000000000000000000000000..75fec69413af2bb49498118ec7235c9947e2f89e --- /dev/null +++ b/tests/examples/nodejs/README-win.md @@ -0,0 +1,200 @@ +# 如何在windows上使用nodejs进行TDengine应用开发 + +## 环境准备 + +(1)安装nodejs-10.22.0 + +下载链接:https://nodejs.org/dist/v10.22.0/node-v10.22.0-win-x64.zip +解压安装,把node配置到环境变量里 + +cmd启动命令行,查看node的版本 + +```shell +> node.exe --version +v10.22.0 + +> npm --version +6.14.6 +``` + + + +(2)安装python2.7 + +下载链接:https://www.python.org/ftp/python/2.7.18/python-2.7.18.amd64.msi + +查看python版本 + +```shell +>python --version +Python 2.7.18 +``` + + +(3)安装TDengine-client + +下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致) + +使用client的taos shell连接server + +```shell +>taos -h node5 + +Welcome to the TDengine shell from Linux, Client Version:2.0.6.0 +Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +============================================================================================================================================ + 1 | node5:6030 | 7 | 1 | ready | any | 2020-10-26 09:45:26.308 | | +Query OK, 1 row(s) in set (0.036000s) +``` + +注意: +* 检查能否在client的机器上ping通server的fqdn +* 如果你的dns server并没有提供到server的域名解析,可以将server的hostname配置到client的hosts文件中 + + +## 应用开发 + +(1)建立nodejs项目 + +``` +npm init +``` + +(2)安装windows-build-tools +``` +npm install --global --production windows-build-tools +``` + +(3)安装td2.0-connector驱动 + +``` tdshell +npm install td2.0-connector +``` + +(4)nodejs访问tdengine的示例程序 + +```javascript +const taos = require('td2.0-connector'); + +var host = null; +var port = 6030; +for (var i = 2; i < global.process.argv.length; i++) { + var key = global.process.argv[i].split("=")[0]; + var value = global.process.argv[i].split("=")[1]; + + if ("host" == key) { + host = value; + } + if ("port" == key) { + port = value; + } +} + +if (host == null) { + console.log("Usage: node nodejsChecker.js host= port="); + process.exit(0); +} + +// establish connection +var conn = taos.connect({host: host, user: "root", password: "taosdata", port: port}); +var cursor = conn.cursor(); +// create database +executeSql("create database if not exists testnodejs", 0); +// use db +executeSql("use testnodejs", 0); +// drop table +executeSql("drop table if exists testnodejs.weather", 0); +// create table +executeSql("create table if not exists testnodejs.weather(ts timestamp, temperature float, humidity int)", 0); +// insert +executeSql("insert into testnodejs.weather (ts, temperature, humidity) values(now, 20.5, 34)", 1); +// select +executeQuery("select * from testnodejs.weather"); +// close connection +conn.close(); + +function executeQuery(sql) { + var start = new Date().getTime(); + var promise = cursor.query(sql, true); + var end = new Date().getTime(); + promise.then(function (result) { + printSql(sql, result != null, (end - start)); + result.pretty(); + }); +} + +function executeSql(sql, affectRows) { + var start = new Date().getTime(); + var promise = cursor.execute(sql); + var end = new Date().getTime(); + printSql(sql, promise == affectRows, (end - start)); +} + +function printSql(sql, succeed, cost) { + console.log("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); +} +``` + +(5)测试nodejs程序 + +```shell +>node nodejsChecker.js +Usage: node nodejsChecker.js host= port= +# 提示指定host + +>node nodejsChecker.js host=node5 +Successfully connected to TDengine +Query OK, 0 row(s) affected (0.00997610s) +[ OK ] time cost: 14 ms, execute statement ====> create database if not exists testnodejs +Query OK, 0 row(s) affected (0.00235920s) +[ OK ] time cost: 4 ms, execute statement ====> use testnodejs +Query OK, 0 row(s) affected (0.06604280s) +[ OK ] time cost: 67 ms, execute statement ====> drop table if exists testnodejs.weather +Query OK, 0 row(s) affected (0.59403290s) +[ OK ] time cost: 595 ms, execute statement ====> create table if not exists testnodejs.weather(ts timestamp, temperature float, humidity int) +Query OK, 1 row(s) affected (0.01058950s) +[ OK ] time cost: 12 ms, execute statement ====> insert into testnodejs.weather (ts, temperature, humidity) values(now, 20.5, 34) +Query OK, 1 row(s) in set (0.00401490s) +[ OK ] time cost: 10 ms, execute statement ====> select * from testnodejs.weather +Connection is closed + + ts | temperature | humidity | +===================================================================== +2020-10-27 18:49:15.547 | 20.5 | 34 | +``` + +## 指南 + +### 如何设置主机名和hosts + +在server上查看hostname和fqdn +```shell +查看hostname +# hostname +taos-server + +查看fqdn +# hostname -f +taos-server +``` + +windows下hosts文件位于: +C:\\Windows\System32\drivers\etc\hosts +修改hosts文件,添加server的ip和hostname + +``` +192.168.56.101 node5 +``` + +> 什么是FQDN? +> +> FQDN(Full qualified domain name)全限定域名,fqdn由2部分组成:hostname+domainname。 +> +> 例如,一个邮件服务器的fqdn可能是:mymail.somecollege.edu,其中mymail是hostname(主机名),somcollege.edu是domainname(域名)。本例中,.edu是顶级域名,.somecollege是二级域名。 +> +> 当连接服务器时,必须指定fqdn,然后,dns服务器通过查看dns表,将hostname解析为相应的ip地址。如果只指定hostname(不指定domainname),应用程序可能服务解析主机名。因为如果你试图访问不在本地的远程服务器时,本地的dns服务器和可能没有远程服务器的hostname列表。 +> +> 参考:https://kb.iu.edu/d/aiuv diff --git a/tests/examples/nodejs/nodejsChecker.js b/tests/examples/nodejs/nodejsChecker.js new file mode 100644 index 0000000000000000000000000000000000000000..f838d5cc8465dba70b5372a5d7720a8cff69544a --- /dev/null +++ b/tests/examples/nodejs/nodejsChecker.js @@ -0,0 +1,60 @@ +const taos = require('td2.0-connector'); + + +var host = null; +var port = 6030; +for(var i = 2; i < global.process.argv.length; i++){ + var key = global.process.argv[i].split("=")[0]; + var value = global.process.argv[i].split("=")[1]; + + if("host" == key){ + host = value; + } + if("port" == key){ + port = value; + } +} + +if(host == null){ + console.log("Usage: node nodejsChecker.js host= port="); + process.exit(0); +} + +// establish connection +var conn = taos.connect({host:host, user:"root", password:"taosdata",port:port}); +var cursor = conn.cursor(); +// create database +executeSql("create database if not exists test", 0); +// use db +executeSql("use test", 0); +// drop table +executeSql("drop table if exists test.weather", 0); +// create table +executeSql("create table if not exists test.weather(ts timestamp, temperature float, humidity int)", 0); +// insert +executeSql("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)", 1); +// select +executeQuery("select * from test.weather"); +// close connection +conn.close(); + +function executeQuery(sql){ + var start = new Date().getTime(); + var promise = cursor.query(sql, true); + var end = new Date().getTime(); + promise.then(function(result){ + printSql(sql, result != null,(end - start)); + result.pretty(); + }); +} + +function executeSql(sql, affectRows){ + var start = new Date().getTime(); + var promise = cursor.execute(sql); + var end = new Date().getTime(); + printSql(sql, promise == affectRows, (end - start)); +} + +function printSql(sql, succeed, cost){ + console.log("[ "+(succeed ? "OK" : "ERROR!")+" ] time cost: " + cost + " ms, execute statement ====> " + sql); +} diff --git a/tests/examples/python/PYTHONConnectorChecker/PythonChecker.py b/tests/examples/python/PYTHONConnectorChecker/PythonChecker.py new file mode 100644 index 0000000000000000000000000000000000000000..d74f021ffcf3aa33c551cc265243b5139c23b757 --- /dev/null +++ b/tests/examples/python/PYTHONConnectorChecker/PythonChecker.py @@ -0,0 +1,114 @@ +import taos +import time +import sys +import getopt +class ConnectorChecker: + def init(self): + self.host = "127.0.0.1" + self.dbName = "test" + self.tbName = "weather" + self.user = "root" + self.password = "taosdata" + + + def sethdt(self,FQDN,dbname,tbname): + if(FQDN): + self.host=FQDN + if(dbname): + self.dbname=dbname + if(tbname): + self.tbName + def printSql(self,sql,elapsed): + print("[ "+"OK"+" ]"+" time cost: %s ms, execute statement ====> %s" + %(elapsed,sql)) + def executeQuery(self,sql): + try: + start=time.time() + execute = self.cl.execute(sql) + elapsed = (time.time()-start)*1000 + self.printSql(sql,elapsed) + data = self.cl.fetchall() + numOfRows = self.cl.rowcount + numOfCols = len(self.cl.description) + for irow in range(numOfRows): + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) + except Exception as e: + print("Failure sql: %s,exception: %s" %sql,str(e)) + def execute(self,sql): + try: + start=time.time() + execute = self.cl.execute(sql) + elapsed = (time.time()-start)*1000 + self.printSql(sql,elapsed) + + except Exception as e: + print("Failure sql: %s,exception: %s" % + sql,str(e)) + def close(self): + print("connetion closed.") + self.cl.close() + self.conn.close() + def createDatabase(self): + sql="create database if not exists %s" % self.dbName + self.execute(sql) + def useDatabase(self): + sql="use %s" % self.dbName + self.execute(sql) + def createTable(self): + sql="create table if not exists %s.%s (ts timestamp, temperature float, humidity int)"%(self.dbName,self.tbName) + self.execute(sql) + def checkDropTable(self): + sql="drop table if exists " + self.dbName + "." + self.tbName + "" + self.execute(sql) + def checkInsert(self): + sql="insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)" + self.execute(sql) + def checkSelect(self): + sql = "select * from test.weather" + self.executeQuery(sql) + def srun(self): + try: + self.conn = taos.connect(host=self.host,user=self.user,password=self.password) + #self.conn = taos.connect(self.host,self.user,self.password) + except Exception as e: + print("connection failed: %s"%self.host) + exit(1) + print("[ OK ] Connection established.") + self.cl = self.conn.cursor() + +def main(argv): + FQDN='' + dbname='' + tbname='' + try: + opts, args = getopt.getopt(argv,"h:d:t:",["FQDN=","ifile=","ofile="]) + except getopt.GetoptError: + print ('PYTHONConnectorChecker.py -h ') + sys.exit(2) + for opt, arg in opts: + if opt in ("-h", "--FQDN"): + FQDN=arg + elif opt in ("-d", "--dbname"): + dbname = arg + elif opt in ("-t", "--tbname"): + tbname = arg + + checker = ConnectorChecker() + checker.init() + checker.sethdt(FQDN,dbname,tbname) + checker.srun() + checker.createDatabase() + checker.useDatabase() + checker.checkDropTable() + checker.createTable() + checker.checkInsert() + checker.checkSelect() + checker.checkDropTable() + checker.close() + + + +if __name__ == "__main__": + main(sys.argv[1:]) + + diff --git a/tests/examples/rust/src/bindings.rs b/tests/examples/rust/src/bindings.rs index b93e833ba05dbc4bbf2a390a5cf801d27c340590..fc13647130995b2a85b485236ec9a7ba30c1cc1b 100644 --- a/tests/examples/rust/src/bindings.rs +++ b/tests/examples/rust/src/bindings.rs @@ -308,12 +308,6 @@ extern "C" { extern "C" { pub fn taos_unsubscribe(tsub: *mut ::std::os::raw::c_void); } -extern "C" { - pub fn taos_subfields_count(tsub: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_fetch_subfields(tsub: *mut ::std::os::raw::c_void) -> *mut TAOS_FIELD; -} extern "C" { pub fn taos_open_stream( taos: *mut ::std::os::raw::c_void, diff --git a/tests/examples/rust/src/subscriber.rs b/tests/examples/rust/src/subscriber.rs index b6812d7b6e5b48016c62c3bb45d11bfb7fd85e0b..78c6f5cd8d036be537da11f34f829d48750d2a73 100644 --- a/tests/examples/rust/src/subscriber.rs +++ b/tests/examples/rust/src/subscriber.rs @@ -37,16 +37,16 @@ impl Subscriber { println!("subscribed to {} user:{}, db:{}, tb:{}, time:{}, mseconds:{}", host, username, db, table, time, mseconds); - let mut fields = taos_fetch_subfields(tsub); + let mut fields = taos_fetch_fields(tsub); if fields.is_null() { taos_unsubscribe(tsub); - return Err("fetch subfields error") + return Err("fetch fields error") } - let fcount = taos_subfields_count(tsub); + let fcount = taos_field_count(tsub); if fcount == 0 { taos_unsubscribe(tsub); - return Err("subfields count is 0") + return Err("fields count is 0") } Ok(Subscriber{tsub, fields, fcount}) @@ -74,4 +74,4 @@ impl Drop for Subscriber { fn drop(&mut self) { unsafe {taos_unsubscribe(self.tsub);} } -} \ No newline at end of file +} diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat new file mode 100755 index 0000000000000000000000000000000000000000..efd8961bb0be2eb6f20e291114b92b00469b984f --- /dev/null +++ b/tests/gotest/batchtest.bat @@ -0,0 +1,20 @@ +@echo off +echo ==== start Go connector test cases test ==== +cd /d %~dp0 + +set severIp=%1 +set serverPort=%2 +if "%severIp%"=="" (set severIp=127.0.0.1) +if "%serverPort%"=="" (set serverPort=6030) + +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct + +cd case001 +case001.bat %severIp% %serverPort% + +rem cd case002 +rem case002.bat + +:: cd case002 +:: case002.bat diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh old mode 100644 new mode 100755 index a027dd0d7ce04c599233157bdd618fad3885c809..0fbbf40714b3349651beea9302e66628b31a22ac --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -1,5 +1,21 @@ #!/bin/bash -bash ./case001/case001.sh -#bash ./case002/case002.sh -#bash ./case003/case003.sh +echo "==== start Go connector test cases test ====" + +severIp=$1 +serverPort=$2 + +if [ ! -n "$severIp" ]; then + severIp=127.0.0.1 +fi + +if [ ! -n "$serverPort" ]; then + serverPort=6030 +fi + +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct + +bash ./case001/case001.sh $severIp $serverPort +#bash ./case002/case002.sh $severIp $serverPort +#bash ./case003/case003.sh $severIp $serverPort diff --git a/tests/gotest/case001/case001.bat b/tests/gotest/case001/case001.bat new file mode 100644 index 0000000000000000000000000000000000000000..ebec576e724ccb14319dd380c9783a783ac0db62 --- /dev/null +++ b/tests/gotest/case001/case001.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run cases001.go + +del go.* +go mod init demotest +go build +demotest.exe -h %1 -p %2 +cd .. + diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go index 1d5ede6d21a0bacad34cb807a16b50e0ae643512..fb94f566dd7fa9ef8932bc28326310681998b410 100644 --- a/tests/gotest/case001/case001.go +++ b/tests/gotest/case001/case001.go @@ -16,20 +16,53 @@ package main import ( "database/sql" + "flag" "fmt" _ "github.com/taosdata/driver-go/taosSql" "log" + "strconv" "time" ) +type config struct { + hostName string + serverPort int + user string + password string +} + +var configPara config +var url string + +func init() { + flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.") + flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.") + flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") + flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") + + flag.Parse() +} + +func printAllArgs() { + fmt.Printf("\n============= args parse result: =============\n") + fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("serverPort: %v\n", configPara.serverPort) + fmt.Printf("usr: %v\n", configPara.user) + fmt.Printf("password: %v\n", configPara.password) + fmt.Printf("================================================\n") +} + func main() { + printAllArgs() taosDriverName := "taosSql" demodb := "demodb" demot := "demot" fmt.Printf("\n======== start demo test ========\n") + + url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" // open connect to taos server - db, err := sql.Open(taosDriverName, "root:taosdata@/tcp(192.168.1.217:7100)/") + db, err := sql.Open(taosDriverName, url) if err != nil { log.Fatalf("Open database error: %s\n", err) } diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 5a9034c4d18e257eaaf9324c570fbc17b01c548b..831e9f83ac482c0a2c668e2ad0d16c4bf59f19aa 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -1,10 +1,6 @@ #!/bin/bash -################################################## -# -# Do go test -# -################################################## +echo "==== start run cases001.go" set +e #set -x @@ -12,59 +8,14 @@ set +e script_dir="$(dirname $(readlink -f $0))" #echo "pwd: $script_dir, para0: $0" -execName=$0 -execName=`echo ${execName##*/}` -goName=`echo ${execName%.*}` - -###### step 1: start one taosd -scriptDir=$script_dir/../../script/sh -bash $scriptDir/stop_dnodes.sh -bash $scriptDir/deploy.sh -n dnode1 -i 1 -bash $scriptDir/cfg.sh -n dnode1 -c walLevel -v 0 -bash $scriptDir/exec.sh -n dnode1 -s start - -###### step 2: set config item -TAOS_CFG=/etc/taos/taos.cfg -HOSTNAME=`hostname -f` - -if [ ! -f ${TAOS_CFG} ]; then - touch -f $TAOS_CFG -fi - -echo " " > $TAOS_CFG -echo "firstEp ${HOSTNAME}:7100" >> $TAOS_CFG -echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG -echo "serverPort 7100" >> $TAOS_CFG -#echo "dataDir $DATA_DIR" >> $TAOS_CFG -#echo "logDir $LOG_DIR" >> $TAOS_CFG -#echo "scriptDir ${CODE_DIR}/../script" >> $TAOS_CFG -echo "numOfLogLines 100000000" >> $TAOS_CFG -echo "dDebugFlag 135" >> $TAOS_CFG -echo "mDebugFlag 135" >> $TAOS_CFG -echo "sdbDebugFlag 135" >> $TAOS_CFG -echo "rpcDebugFlag 135" >> $TAOS_CFG -echo "tmrDebugFlag 131" >> $TAOS_CFG -echo "cDebugFlag 135" >> $TAOS_CFG -echo "httpDebugFlag 135" >> $TAOS_CFG -echo "monitorDebugFlag 135" >> $TAOS_CFG -echo "udebugFlag 135" >> $TAOS_CFG -echo "tablemetakeeptimer 5" >> $TAOS_CFG -echo "wal 0" >> $TAOS_CFG -echo "asyncLog 0" >> $TAOS_CFG -echo "locale en_US.UTF-8" >> $TAOS_CFG -echo "enableCoreFile 1" >> $TAOS_CFG -echo " " >> $TAOS_CFG - -ulimit -n 600000 -ulimit -c unlimited -# -##sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e -# +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` ###### step 3: start build cd $script_dir rm -f go.* -go mod init $goName +go mod init demotest go build -sleep 1s -sudo ./$goName +sleep 1s +./demotest -h $1 -p $2 diff --git a/tests/pytest/cluster/bananceTest.py b/tests/pytest/cluster/bananceTest.py new file mode 100644 index 0000000000000000000000000000000000000000..ef25afa7d2f7ea3b5358f8ba74d6702d28d54c85 --- /dev/null +++ b/tests/pytest/cluster/bananceTest.py @@ -0,0 +1,57 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random +import time + +class ClusterTestcase: + + ## test case 32 ## + def run(self): + + nodes = Nodes() + nodes.addConfigs("maxVgroupsPerDb", "10") + nodes.addConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + dnodes = [] + for i in range(10): + dnodes.append(int(tdSql.getData(i, 4))) + + s = set(dnodes) + if len(s) < 3: + tdLog.exit("cluster is not balanced") + + tdLog.info("cluster is balanced") + + nodes.removeConfigs("maxVgroupsPerDb", "10") + nodes.removeConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/basicTest.py b/tests/pytest/cluster/basicTest.py new file mode 100644 index 0000000000000000000000000000000000000000..b990d7fd982a490383939707a32635d37e546b13 --- /dev/null +++ b/tests/pytest/cluster/basicTest.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 1, 33 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + ## Test case 1 ## + tdLog.info("Test case 1 repeat %d times" % ctest.repeat) + for i in range(ctest.repeat): + tdLog.info("Start Round %d" % (i + 1)) + replica = random.randint(1,3) + ctest.createSTable(replica) + ctest.run() + tdLog.sleep(10) + tdSql.query("select count(*) from %s.%s" %(ctest.dbName, ctest.stbName)) + tdSql.checkData(0, 0, ctest.numberOfRecords * ctest.numberOfTables) + tdLog.info("Round %d completed" % (i + 1)) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/changeReplicaTest.py b/tests/pytest/cluster/changeReplicaTest.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa68edbfee2db599076befdf9bed5f4b4be3c83 --- /dev/null +++ b/tests/pytest/cluster/changeReplicaTest.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 7, ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + + tdSql.execute("alter database %s replica 2" % ctest.dbName) + tdLog.sleep(30) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + tdSql.checkData(i, 7, "slave") + + tdSql.execute("alter database %s replica 3" % ctest.dbName) + tdLog.sleep(30) + tdSql.query("show vgroups") + for i in range(10): + tdSql.checkData(i, 5, "master") + tdSql.checkData(i, 7, "slave") + tdSql.checkData(i, 9, "slave") + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py new file mode 100644 index 0000000000000000000000000000000000000000..36af8ac42e56e1b8a7ab2237305a6bf286103552 --- /dev/null +++ b/tests/pytest/cluster/clusterSetup.py @@ -0,0 +1,202 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from fabric import Connection +from util.sql import * +from util.log import * +import taos +import random +import threading +import logging + +class Node: + def __init__(self, index, username, hostIP, hostName, password, homeDir): + self.index = index + self.username = username + self.hostIP = hostIP + self.hostName = hostName + self.homeDir = homeDir + self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)}) + + def startTaosd(self): + try: + self.conn.run("sudo systemctl start taosd") + except Exception as e: + print("Start Taosd error for node %d " % self.index) + logging.exception(e) + + def stopTaosd(self): + try: + self.conn.run("sudo systemctl stop taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + + def restartTaosd(self): + try: + self.conn.run("sudo systemctl restart taosd") + except Exception as e: + print("Stop Taosd error for node %d " % self.index) + logging.exception(e) + + def removeTaosd(self): + try: + self.conn.run("rmtaos") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def installTaosd(self, packagePath): + self.conn.put(packagePath, self.homeDir) + self.conn.cd(self.homeDir) + self.conn.run("tar -zxf $(basename '%s')" % packagePath) + with self.conn.cd("TDengine-enterprise-server"): + self.conn.run("yes|./install.sh") + + def configTaosd(self, taosConfigKey, taosConfigValue): + self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + + def removeTaosConfig(self, taosConfigKey, taosConfigValue): + self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + + def configHosts(self, ip, name): + self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + + def removeData(self): + try: + self.conn.run("sudo rm -rf /var/lib/taos/*") + except Exception as e: + print("remove taosd data error for node %d " % self.index) + logging.exception(e) + + def removeLog(self): + try: + self.conn.run("sudo rm -rf /var/log/taos/*") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def removeDataForMnode(self): + try: + self.conn.run("sudo rm -rf /var/lib/taos/*") + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + + def removeDataForVnode(self, id): + try: + self.conn.run("sudo rm -rf /var/lib/taos/vnode%d/*.data" % id) + except Exception as e: + print("remove taosd error for node %d " % self.index) + logging.exception(e) + +class Nodes: + def __init__(self): + self.node1 = Node(1, 'ubuntu', '192.168.1.52', 'node1', 'tbase125!', '/home/ubuntu') + self.node2 = Node(2, 'ubuntu', '192.168.1.53', 'node2', 'tbase125!', '/home/ubuntu') + self.node3 = Node(3, 'ubuntu', '192.168.1.54', 'node3', 'tbase125!', '/home/ubuntu') + + def stopAllTaosd(self): + self.node1.stopTaosd() + self.node2.stopTaosd() + self.node3.stopTaosd() + + def startAllTaosd(self): + self.node1.startTaosd() + self.node2.startTaosd() + self.node3.startTaosd() + + def restartAllTaosd(self): + self.node1.restartTaosd() + self.node2.restartTaosd() + self.node3.restartTaosd() + + def addConfigs(self, configKey, configValue): + self.node1.configTaosd(configKey, configValue) + self.node2.configTaosd(configKey, configValue) + self.node3.configTaosd(configKey, configValue) + + def removeConfigs(self, configKey, configValue): + self.node1.removeTaosConfig(configKey, configValue) + self.node2.removeTaosConfig(configKey, configValue) + self.node3.removeTaosConfig(configKey, configValue) + + def removeAllDataFiles(self): + self.node1.removeData() + self.node2.removeData() + self.node3.removeData() + +class ClusterTest: + def __init__(self, hostName): + self.host = hostName + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos" + self.dbName = "mytest" + self.stbName = "meters" + self.numberOfThreads = 20 + self.numberOfTables = 10000 + self.numberOfRecords = 1000 + self.tbPrefix = "t" + self.ts = 1538548685000 + self.repeat = 1 + + def connectDB(self): + self.conn = taos.connect( + host=self.host, + user=self.user, + password=self.password, + config=self.config) + + def createSTable(self, replica): + cursor = self.conn.cursor() + tdLog.info("drop database if exists %s" % self.dbName) + cursor.execute("drop database if exists %s" % self.dbName) + tdLog.info("create database %s replica %d" % (self.dbName, replica)) + cursor.execute("create database %s replica %d" % (self.dbName, replica)) + tdLog.info("use %s" % self.dbName) + cursor.execute("use %s" % self.dbName) + tdLog.info("drop table if exists %s" % self.stbName) + cursor.execute("drop table if exists %s" % self.stbName) + tdLog.info("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) + cursor.execute("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName) + cursor.close() + + def insertData(self, threadID): + print("Thread %d: starting" % threadID) + cursor = self.conn.cursor() + tablesPerThread = int(self.numberOfTables / self.numberOfThreads) + baseTableID = tablesPerThread * threadID + for i in range (tablesPerThread): + cursor.execute("create table %s%d using %s tags(%d)" % (self.tbPrefix, baseTableID + i, self.stbName, baseTableID + i)) + query = "insert into %s%d values" % (self.tbPrefix, baseTableID + i) + base = self.numberOfRecords * i + for j in range(self.numberOfRecords): + query += "(%d, %f, %d, %d)" % (self.ts + base + j, random.random(), random.randint(210, 230), random.randint(0, 10)) + cursor.execute(query) + cursor.close() + print("Thread %d: finishing" % threadID) + + def run(self): + threads = [] + tdLog.info("Inserting data") + for i in range(self.numberOfThreads): + thread = threading.Thread(target=self.insertData, args=(i,)) + threads.append(thread) + thread.start() + + for i in range(self.numberOfThreads): + threads[i].join() \ No newline at end of file diff --git a/tests/pytest/cluster/dataFileRecoveryTest.py b/tests/pytest/cluster/dataFileRecoveryTest.py new file mode 100644 index 0000000000000000000000000000000000000000..089d3fffc1499a8d9cafc87a8d94252111fcd604 --- /dev/null +++ b/tests/pytest/cluster/dataFileRecoveryTest.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 20, 21, 22 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(3) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + nodes.node2.stopTaosd() + tdSql.execute("use %s" % ctest.dbName) + tdSql.query("show vgroups") + vnodeID = tdSql.getData(0, 0) + nodes.node2.removeDataForVnode(vnodeID) + nodes.node2.startTaosd() + + # Wait for vnode file to recover + for i in range(10): + tdSql.query("select count(*) from t0") + + tdLog.sleep(10) + + for i in range(10): + tdSql.query("select count(*) from t0") + tdSql.checkData(0, 0, 1000) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/fullDnodesTest.py b/tests/pytest/cluster/fullDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4b10d97a24dfbb156122aa0afdbb5d22ce3941 --- /dev/null +++ b/tests/pytest/cluster/fullDnodesTest.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ##Cover test case 5 ## + def run(self): + # cluster environment set up + nodes = Nodes() + nodes.addConfigs("maxVgroupsPerDb", "10") + nodes.addConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + + tdSql.init(ctest.conn.cursor(), False) + tdSql.execute("use %s" % ctest.dbName) + tdSql.error("create table tt1 using %s tags(1)" % ctest.stbName) + + nodes.removeConfigs("maxVgroupsPerDb", "10") + nodes.removeConfigs("maxTablesPerVnode", "1000") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/killAndRestartDnodesTest.py b/tests/pytest/cluster/killAndRestartDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..be927e862f616c7fbe490e733a18984b6971ef1f --- /dev/null +++ b/tests/pytest/cluster/killAndRestartDnodesTest.py @@ -0,0 +1,75 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 7, 10 ## + def run(self): + # cluster environment set up + tdLog.info("Test case 7, 10") + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + nodes.node1.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "offline") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node1.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node2.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "offline") + tdSql.checkData(2, 4, "ready") + + nodes.node2.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + nodes.node3.stopTaosd() + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "offline") + + nodes.node3.startTaosd() + tdSql.checkRows(3) + tdSql.checkData(0, 4, "ready") + tdSql.checkData(1, 4, "ready") + tdSql.checkData(2, 4, "ready") + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/offlineThresholdTest.py b/tests/pytest/cluster/offlineThresholdTest.py new file mode 100644 index 0000000000000000000000000000000000000000..8373424f93c8217250907e09620c8523d63071ad --- /dev/null +++ b/tests/pytest/cluster/offlineThresholdTest.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## cover test case 6, 8, 9, 11 ## + def run(self): + # cluster environment set up + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + tdSql.init(ctest.conn.cursor(), False) + + nodes.addConfigs("offlineThreshold", "10") + nodes.removeAllDataFiles() + nodes.restartAllTaosd() + nodes.node3.stopTaosd() + + tdLog.sleep(10) + tdSql.query("show dnodes") + tdSql.checkRows(3) + tdSql.checkData(2, 4, "offline") + + tdLog.sleep(60) + tdSql.checkRows(3) + tdSql.checkData(2, 4, "dropping") + + tdLog.sleep(300) + tdSql.checkRows(2) + + nodes.removeConfigs("offlineThreshold", "10") + nodes.restartAllTaosd() + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() \ No newline at end of file diff --git a/tests/pytest/cluster/oneReplicaOfflineTest.py b/tests/pytest/cluster/oneReplicaOfflineTest.py new file mode 100644 index 0000000000000000000000000000000000000000..0223dfe01add9faca7987d7767f5c41a58b8edd2 --- /dev/null +++ b/tests/pytest/cluster/oneReplicaOfflineTest.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 28, 29, 30, 31 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(3) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + + nodes.node2.stopTaosd() + for i in range(100): + tdSql.execute("drop table t%d" % i) + + nodes.node2.startTaosd() + tdSql.query("show tables") + tdSql.checkRows(9900) + + nodes.node2.stopTaosd() + for i in range(10): + tdSql.execute("create table a%d using meters tags(2)" % i) + + nodes.node2.startTaosd() + tdSql.query("show tables") + tdSql.checkRows(9910) + + nodes.node2.stopTaosd() + tdSql.execute("alter table meters add col col6 int") + nodes.node2.startTaosd() + + nodes.node2.stopTaosd() + tdSql.execute("drop database %s" % ctest.dbName) + + nodes.node2.startTaosd() + tdSql.query("show databases") + tdSql.checkRows(0) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/queryTimeTest.py b/tests/pytest/cluster/queryTimeTest.py new file mode 100644 index 0000000000000000000000000000000000000000..74a9081ccf4fd8abc175e2e0c82b0c6feedcbb26 --- /dev/null +++ b/tests/pytest/cluster/queryTimeTest.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random +import time + +class ClusterTestcase: + + ## test case 32 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.execute("use %s" % ctest.dbName) + totalTime = 0 + for i in range(10): + startTime = time.time() + tdSql.query("select * from %s" % ctest.stbName) + totalTime += time.time() - startTime + print("replica 1: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10)) + + tdSql.execute("alter database %s replica 3" % ctest.dbName) + tdLog.sleep(60) + totalTime = 0 + for i in range(10): + startTime = time.time() + tdSql.query("select * from %s" % ctest.stbName) + totalTime += time.time() - startTime + print("replica 3: avarage query time for %d records: %f seconds" % (ctest.numberOfTables * ctest.numberOfRecords,totalTime / 10)) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/stopAllDnodesTest.py b/tests/pytest/cluster/stopAllDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..a71ae52e3d7a640bb589f3bafe16b2e4d45c7b93 --- /dev/null +++ b/tests/pytest/cluster/stopAllDnodesTest.py @@ -0,0 +1,45 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 19 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + tdSql.init(ctest.conn.cursor(), False) + + tdSql.query("show databases") + count = tdSql.queryRows; + + nodes.stopAllTaosd() + nodes.node1.startTaosd() + tdSql.error("show databases") + + nodes.node2.startTaosd() + tdSql.error("show databases") + + nodes.node3.startTaosd() + tdLog.sleep(10) + tdSql.query("show databases") + tdSql.checkRows(count) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/stopTwoDnodesTest.py b/tests/pytest/cluster/stopTwoDnodesTest.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9958e2d32018b6a89a3e0d08da2c1597151ff2 --- /dev/null +++ b/tests/pytest/cluster/stopTwoDnodesTest.py @@ -0,0 +1,48 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 17, 18 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + tdSql.query("show databases") + count = tdSql.queryRows; + tdSql.execute("use %s" % ctest.dbName) + tdSql.execute("alter database %s replica 3" % ctest.dbName) + nodes.node2.stopTaosd() + nodes.node3.stopTaosd() + tdSql.error("show databases") + + nodes.node2.startTaosd() + tdSql.error("show databases") + + nodes.node3.startTaosd() + tdSql.query("show databases") + tdSql.checkRows(count) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/syncingTest.py b/tests/pytest/cluster/syncingTest.py new file mode 100644 index 0000000000000000000000000000000000000000..96be048d231e35f67e40fc4785d2e19337ed408b --- /dev/null +++ b/tests/pytest/cluster/syncingTest.py @@ -0,0 +1,50 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from clusterSetup import * +from util.sql import tdSql +from util.log import tdLog +import random + +class ClusterTestcase: + + ## test case 24, 25, 26, 27 ## + def run(self): + + nodes = Nodes() + ctest = ClusterTest(nodes.node1.hostName) + ctest.connectDB() + ctest.createSTable(1) + ctest.run() + tdSql.init(ctest.conn.cursor(), False) + + + tdSql.execute("use %s" % ctest.dbName) + tdSql.execute("alter database %s replica 3" % ctest.dbName) + + for i in range(100): + tdSql.execute("drop table t%d" % i) + + for i in range(100): + tdSql.execute("create table a%d using meters tags(1)" % i) + + tdSql.execute("alter table meters add col col5 int") + tdSql.execute("alter table meters drop col col5 int") + tdSql.execute("drop database %s" % ctest.dbName) + + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +ct = ClusterTestcase() +ct.run() diff --git a/tests/pytest/cluster/testcluster.sh b/tests/pytest/cluster/testcluster.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e15a498c0a73db450699fe66d63d07c3b18dbe5 --- /dev/null +++ b/tests/pytest/cluster/testcluster.sh @@ -0,0 +1,12 @@ +python3 basicTest.py +python3 bananceTest.py +python3 changeReplicaTest.py +python3 dataFileRecoveryTest.py +python3 fullDnodesTest.py +python3 killAndRestartDnodesTest.py +python3 offlineThresholdTest.py +python3 oneReplicaOfflineTest.py +python3 queryTimeTest.py +python3 stopAllDnodesTest.py +python3 stopTwoDnodesTest.py +python3 syncingTest.py \ No newline at end of file diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index faefc8a1c296c10e8b4fc18c2e72a6b0ff8fda44..39a4cb48fdc22060f63f443a4ac8142cd6a6903e 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -12,88 +12,105 @@ # -*- coding: utf-8 -*- import threading import taos - +import sys import json import time import random # query sql query_sql = [ # first supertable -"select count(*) from test.meters where c1 > 50;", -"select count(*) from test.meters where c2 >= 50 and c2 < 100;", -"select count(*) from test.meters where c3 != 5;", +"select count(*) from test.meters ;", "select count(*) from test.meters where t3 > 2;", "select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'fi%';", -"select count(*) from test.meters where t7 like '_econd';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", "select count(*) from test.meters interval(1n) order by ts desc;", -"select first(*) from test.meters;", -"select last(*) from test.meters;", +#"select max(c0) from test.meters group by tbname", +"select first(ts) from test.meters where t5 >5000 and t5<5100;", +"select last(ts) from test.meters where t5 >5000 and t5<5100;", "select last_row(*) from test.meters;", "select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters;", +"select avg(c1) from test.meters where t5 >5000 and t5<5100;", "select bottom(c1, 2) from test.t1;", "select diff(c1) from test.t1;", "select leastsquares(c1, 1, 1) from test.t1 ;", -"select max(c1) from test.meters;", -"select min(c1) from test.meters;", -"select c1 + c2 * c3 + c1 / c5 + c4 + c2 from test.t1;", +"select max(c1) from test.meters where t5 >5000 and t5<5100;", +"select min(c1) from test.meters where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", "select percentile(c1, 50) from test.t1;", "select spread(c1) from test.t1 ;", "select stddev(c1) from test.t1;", -"select sum(c1) from test.meters;", -"select top(c1, 2) from test.meters;" -"select twa(c6) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c6) from test.meters;", -"select bottom(c6, 2) from test.t1;", -"select diff(c6) from test.t1;", -"select leastsquares(c6, 1, 1) from test.t1 ;", -"select max(c6) from test.meters;", -"select min(c6) from test.meters;", -"select c6 + c2 * c3 + c6 / c5 + c4 + c2 from test.t1;", -"select percentile(c6, 50) from test.t1;", -"select spread(c6) from test.t1 ;", -"select stddev(c6) from test.t1;", -"select sum(c6) from test.meters;", -"select top(c6, 2) from test.meters;", +"select sum(c1) from test.meters where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" +"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c4) from test.meters where t5 >5000 and t5<5100;", +"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", +"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", +"select leastsquares(c4, 1, 1) from test.t1 ;", +"select max(c4) from test.meters where t5 >5000 and t5<5100;", +"select min(c4) from test.meters where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", +"select percentile(c5, 50) from test.t1;", +"select spread(c5) from test.t1 ;", +"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", +"select sum(c5) from test.meters where t5 >5000 and t5<5100;", +"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", +#all vnode +"select count(*) from test.meters where t5 >5000 and t5<5100", +"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", # second supertable -"select count(*) from test.meters1 where c1 > 50;", -"select count(*) from test.meters1 where c2 >= 50 and c2 < 100;", -"select count(*) from test.meters1 where c3 != 5;", "select count(*) from test.meters1 where t3 > 2;", "select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters1 where t7 like 'fi%';", -"select count(*) from test.meters1 where t7 like '_econd';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", "select count(*) from test.meters1 interval(1n) order by ts desc;", -"select first(*) from test.meters1;", -"select last(*) from test.meters1;", -"select last_row(*) from test.meters1;", +#"select max(c0) from test.meters1 group by tbname", +"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last_row(*) from test.meters1 ;", "select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters1;", -"select bottom(c1, 2) from test.m1;", -"select diff(c1) from test.m1;", +"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", +"select diff(c1) from test.m1 ;", "select leastsquares(c1, 1, 1) from test.m1 ;", -"select max(c1) from test.meters1;", -"select min(c1) from test.meters1;", -"select c1 + c2 * c3 + c1 / c5 + c3 + c2 from test.m1;", +"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", "select percentile(c1, 50) from test.m1;", "select spread(c1) from test.m1 ;", "select stddev(c1) from test.m1;", -"select sum(c1) from test.meters1;", -"select top(c1, 2) from test.meters1;", -"select twa(c6) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c6) from test.meters1;", -"select bottom(c6, 2) from test.m1;", -"select diff(c6) from test.m1;", -"select leastsquares(c6, 1, 1) from test.m1 ;", -"select max(c6) from test.meters1;", -"select min(c6) from test.meters1;", -"select c6 + c2 * c3 + c6 / c5 + c3 + c2 from test.m1;", -"select percentile(c6, 50) from test.m1;", -"select spread(c6) from test.m1 ;", -"select stddev(c6) from test.m1;", -"select sum(c6) from test.meters1;", -"select top(c6, 2) from test.meters1;" +"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", +"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c5, 2) from test.m1;", +"select diff(c5) from test.m1;", +"select leastsquares(c5, 1, 1) from test.m1 ;", +"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c0 from test.m1;", +"select percentile(c4, 50) from test.m1;", +"select spread(c4) from test.m1 ;", +"select stddev(c4) from test.m1;", +"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", +"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +#all vnode +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", +#join +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", +# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" ] class ConcurrentInquiry: @@ -101,7 +118,8 @@ class ConcurrentInquiry: self.numOfTherads = 50 self.ts=1500000001000 - + def SetThreadsNum(self,num): + self.numOfTherads=num def query_thread(self,threadID): host = "10.211.55.14" user = "root" @@ -112,6 +130,7 @@ class ConcurrentInquiry: password, ) cl = conn.cursor() + cl.execute("use test;") print("Thread %d: starting" % threadID) @@ -121,12 +140,16 @@ class ConcurrentInquiry: for i in ran_query_sql: print("Thread %d : %s"% (threadID,i)) try: + start = time.time() cl.execute(i) cl.fetchall + end = time.time() + print("time cost :",end-start) except Exception as e: print( "Failure thread%d, sql: %s,exception: %s" % (threadID, str(i),str(e))) + exit(-1) print("Thread %d: finishing" % threadID) @@ -134,9 +157,9 @@ class ConcurrentInquiry: def run(self): - + threads = [] - for i in range(50): + for i in range(self.numOfTherads): thread = threading.Thread(target=self.query_thread, args=(i,)) threads.append(thread) thread.start() diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index 4ffe35fc3c94edbdd194e03171696a1d681387c1..0af09634df5a5c418797ae4bd352c319fcbc74fa 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -54,6 +54,7 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR # Now we are all let, and let's see if we can find a crash. Note we pass all params +CRASH_GEN_EXEC=crash_gen_bootstrap.py if [[ $1 == '--valgrind' ]]; then shift export PYTHONMALLOC=malloc @@ -66,14 +67,16 @@ if [[ $1 == '--valgrind' ]]; then --leak-check=yes \ --suppressions=crash_gen/valgrind_taos.supp \ $PYTHON_EXEC \ - ./crash_gen/crash_gen.py $@ > $VALGRIND_OUT 2> $VALGRIND_ERR + $CRASH_GEN_EXEC $@ > $VALGRIND_OUT 2> $VALGRIND_ERR elif [[ $1 == '--helgrind' ]]; then shift + HELGRIND_OUT=helgrind.out + HELGRIND_ERR=helgrind.err valgrind \ --tool=helgrind \ $PYTHON_EXEC \ - ./crash_gen/crash_gen.py $@ + $CRASH_GEN_EXEC $@ > $HELGRIND_OUT 2> $HELGRIND_ERR else - $PYTHON_EXEC ./crash_gen/crash_gen.py $@ + $PYTHON_EXEC $CRASH_GEN_EXEC $@ fi diff --git a/tests/pytest/crash_gen/README.md b/tests/pytest/crash_gen/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6788ab1a63d0a7c515558695605d1ec8ac5fb7f9 --- /dev/null +++ b/tests/pytest/crash_gen/README.md @@ -0,0 +1,130 @@ +

    User's Guide to the Crash_Gen Tool

    + +# Introduction + +To effectively test and debug our TDengine product, we have developed a simple tool to +exercise various functions of the system in a randomized fashion, hoping to expose +maximum number of problems, hopefully without a pre-determined scenario. + +# Preparation + +To run this tool, please ensure the followed preparation work is done first. + +1. Fetch a copy of the TDengine source code, and build it successfully in the `build/` + directory +1. Ensure that the system has Python3.8 or above properly installed. We use + Ubuntu 20.04LTS as our own development environment, and suggest you also use such + an environment if possible. + +# Simple Execution + +To run the tool with the simplest method, follow the steps below: + +1. Open a terminal window, start the `taosd` service in the `build/` directory + (or however you prefer to start the `taosd` service) +1. Open another terminal window, go into the `tests/pytest/` directory, and + run `./crash_gen.sh -p -t 3 -s 10` (change the two parameters here as you wish) +1. Watch the output to the end and see if you get a `SUCCESS` or `FAILURE` + +That's it! + +# Running Clusters + +This tool also makes it easy to test/verify the clustering capabilities of TDengine. You +can start a cluster quite easily with the following command: + +``` +$ cd tests/pytest/ +$ ./crash_gen.sh -e -o 3 +``` + +The `-e` option above tells the tool to start the service, and do not run any tests, while +the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster. +Obviously you can adjust the the number here. + +## Behind the Scenes + +When the tool runs a cluster, it users a number of directories, each holding the information +for a single DNode, see: + +``` +$ ls build/cluster* +build/cluster_dnode_0: +cfg data log + +build/cluster_dnode_1: +cfg data log + +build/cluster_dnode_2: +cfg data log +``` + +Therefore, when something goes wrong and you want to reset everything with the cluster, simple +erase all the files: + +``` +$ rm -rf build/cluster_dnode_* +``` + +## Addresses and Ports + +The DNodes in the cluster all binds the the `127.0.0.1` IP address (for now anyway), and +uses port 6030 for the first DNode, and 6130 for the 2nd one, and so on. + +## Testing Against a Cluster + +In a separate terminal window, you can invoke the tool in client mode and test against +a cluster, such as: + +``` +$ ./crash_gen.sh -p -t 10 -s 100 -i 3 +``` + +Here the `-i` option tells the tool to always create tables with 3 replicas, and run +all tests against such tables. + +# Additional Features + +The exhaustive features of the tool is available through the `-h` option: + +``` +$ ./crash_gen.sh -h +usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r] + [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x] + +TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) +--------------------------------------------------------------------- +1. You build TDengine in the top level ./build directory, as described in offical docs +2. You run the server there before this script: ./build/bin/taosd -c test/cfg + +optional arguments: + -h, --help show this help message and exit + -a, --auto-start-service + Automatically start/stop the TDengine service (default: false) + -b MAX_DBS, --max-dbs MAX_DBS + Maximum number of DBs to keep, set to disable dropping DB. (default: 0) + -c CONNECTOR_TYPE, --connector-type CONNECTOR_TYPE + Connector type to use: native, rest, or mixed (default: 10) + -d, --debug Turn on DEBUG mode for more logging (default: false) + -e, --run-tdengine Run TDengine service in foreground (default: false) + -g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS + Ignore error codes, comma separated, 0x supported (default: None) + -i MAX_REPLICAS, --max-replicas MAX_REPLICAS + Maximum number of replicas to use, when testing against clusters. (default: 1) + -l, --larger-data Write larger amount of data during write operations (default: false) + -n, --dynamic-db-table-names + Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false) + -o NUM_DNODES, --num-dnodes NUM_DNODES + Number of Dnodes to initialize, used with -e option. (default: 1) + -p, --per-thread-db-connection + Use a single shared db connection (default: false) + -r, --record-ops Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false) + -s MAX_STEPS, --max-steps MAX_STEPS + Maximum number of steps to run (default: 100) + -t NUM_THREADS, --num-threads NUM_THREADS + Number of threads to run (default: 10) + -v, --verify-data Verify data written in a number of places by reading back (default: false) + -x, --continue-on-exception + Continue execution after encountering unexpected/disallowed errors/exceptions (default: false) +``` + diff --git a/tests/pytest/crash_gen/crash_gen.py b/tests/pytest/crash_gen/crash_gen.py index 48196ab383c974b5c5d3f5ebc54773cd846353e6..8d2b0080bc69f5ca84852448dc5ccb197044b319 100755 --- a/tests/pytest/crash_gen/crash_gen.py +++ b/tests/pytest/crash_gen/crash_gen.py @@ -14,42 +14,36 @@ # For type hinting before definition, ref: # https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel from __future__ import annotations -import taos -from util.sql import * -from util.cases import * -from util.dnodes import * -from util.log import * -from queue import Queue, Empty -from typing import IO + from typing import Set from typing import Dict from typing import List -from requests.auth import HTTPBasicAuth +from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none + import textwrap -import datetime -import logging import time +import datetime import random +import logging import threading -import requests import copy import argparse import getopt import sys import os -import io import signal import traceback import resource from guppy import hpy import gc -try: - import psutil -except: - print("Psutil module needed, please install: sudo pip3 install psutil") - sys.exit(-1) +from .service_manager import ServiceManager, TdeInstance +from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress +from .db import DbConn, MyTDSql, DbConnNative, DbManager + +import taos +import requests # Require Python 3 if sys.version_info[0] < 3: @@ -59,41 +53,37 @@ if sys.version_info[0] < 3: # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace -gConfig = argparse.Namespace() # Dummy value, will be replaced later -gSvcMgr = None # TODO: refactor this hack, use dep injection -logger = None # type: Logger - -def runThread(wt: WorkerThread): - wt.run() +gConfig: argparse.Namespace +gSvcMgr: ServiceManager # TODO: refactor this hack, use dep injection +# logger: logging.Logger +gContainer: Container -class CrashGenError(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self.errno = errno - - def __str__(self): - return self.msg +# def runThread(wt: WorkerThread): +# wt.run() class WorkerThread: - def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator, - # te: TaskExecutor, - ): # note: main thread context! + def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator): + """ + Note: this runs in the main thread context + """ # self._curStep = -1 self._pool = pool self._tid = tid self._tc = tc # type: ThreadCoordinator # self.threadIdent = threading.get_ident() - self._thread = threading.Thread(target=runThread, args=(self,)) + # self._thread = threading.Thread(target=runThread, args=(self,)) + self._thread = threading.Thread(target=self.run) self._stepGate = threading.Event() # Let us have a DB connection of our own if (gConfig.per_thread_db_connection): # type: ignore # print("connector_type = {}".format(gConfig.connector_type)) - if gConfig.connector_type == 'native': - self._dbConn = DbConn.createNative() + tInst = gContainer.defTdeInstance + if gConfig.connector_type == 'native': + self._dbConn = DbConn.createNative(tInst.getDbTarget()) elif gConfig.connector_type == 'rest': - self._dbConn = DbConn.createRest() + self._dbConn = DbConn.createRest(tInst.getDbTarget()) elif gConfig.connector_type == 'mixed': if Dice.throw(2) == 0: # 1/2 chance self._dbConn = DbConn.createNative() @@ -105,10 +95,10 @@ class WorkerThread: # self._dbInUse = False # if "use db" was executed already def logDebug(self, msg): - logger.debug(" TRD[{}] {}".format(self._tid, msg)) + Logging.debug(" TRD[{}] {}".format(self._tid, msg)) def logInfo(self, msg): - logger.info(" TRD[{}] {}".format(self._tid, msg)) + Logging.info(" TRD[{}] {}".format(self._tid, msg)) # def dbInUse(self): # return self._dbInUse @@ -127,10 +117,10 @@ class WorkerThread: def run(self): # initialization after thread starts, in the thread context # self.isSleeping = False - logger.info("Starting to run thread: {}".format(self._tid)) + Logging.info("Starting to run thread: {}".format(self._tid)) if (gConfig.per_thread_db_connection): # type: ignore - logger.debug("Worker thread openning database connection") + Logging.debug("Worker thread openning database connection") self._dbConn.open() self._doTaskLoop() @@ -140,7 +130,7 @@ class WorkerThread: if self._dbConn.isOpen: #sometimes it is not open self._dbConn.close() else: - logger.warning("Cleaning up worker thread, dbConn already closed") + Logging.warning("Cleaning up worker thread, dbConn already closed") def _doTaskLoop(self): # while self._curStep < self._pool.maxSteps: @@ -151,15 +141,15 @@ class WorkerThread: tc.crossStepBarrier() # shared barrier first, INCLUDING the last one except threading.BrokenBarrierError as err: # main thread timed out print("_bto", end="") - logger.debug("[TRD] Worker thread exiting due to main thread barrier time-out") + Logging.debug("[TRD] Worker thread exiting due to main thread barrier time-out") break - logger.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) self.crossStepGate() # then per-thread gate, after being tapped - logger.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) if not self._tc.isRunning(): print("_wts", end="") - logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") + Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) @@ -178,15 +168,15 @@ class WorkerThread: raise # Fetch a task from the Thread Coordinator - logger.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + Logging.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) task = tc.fetchTask() # Execute such a task - logger.debug("[TRD] Worker thread [{}] about to execute task: {}".format( + Logging.debug("[TRD] Worker thread [{}] about to execute task: {}".format( self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) - logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) # self._dbInUse = False # there may be changes between steps # print("_wtd", end=None) # worker thread died @@ -209,7 +199,7 @@ class WorkerThread: self.verifyThreadSelf() # only allowed by ourselves # Wait again at the "gate", waiting to be "tapped" - logger.debug( + Logging.debug( "[TRD] Worker thread {} about to cross the step gate".format( self._tid)) self._stepGate.wait() @@ -222,7 +212,7 @@ class WorkerThread: self.verifyThreadMain() # only allowed for main thread if self._thread.is_alive(): - logger.debug("[TRD] Tapping worker thread {}".format(self._tid)) + Logging.debug("[TRD] Tapping worker thread {}".format(self._tid)) self._stepGate.set() # wake up! time.sleep(0) # let the released thread run a bit else: @@ -253,7 +243,7 @@ class WorkerThread: class ThreadCoordinator: - WORKER_THREAD_TIMEOUT = 60 # one minute + WORKER_THREAD_TIMEOUT = 180 # one minute def __init__(self, pool: ThreadPool, dbManager: DbManager): self._curStep = -1 # first step is 0 @@ -267,7 +257,7 @@ class ThreadCoordinator: self._stepBarrier = threading.Barrier( self._pool.numThreads + 1) # one barrier for all threads self._execStats = ExecutionStats() - self._runStatus = MainExec.STATUS_RUNNING + self._runStatus = Status.STATUS_RUNNING self._initDbs() def getTaskExecutor(self): @@ -280,14 +270,14 @@ class ThreadCoordinator: self._stepBarrier.wait(timeout) def requestToStop(self): - self._runStatus = MainExec.STATUS_STOPPING + self._runStatus = Status.STATUS_STOPPING self._execStats.registerFailure("User Interruption") def _runShouldEnd(self, transitionFailed, hasAbortedTask, workerTimeout): maxSteps = gConfig.max_steps # type: ignore if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 return True - if self._runStatus != MainExec.STATUS_RUNNING: + if self._runStatus != Status.STATUS_RUNNING: return True if transitionFailed: return True @@ -308,7 +298,7 @@ class ThreadCoordinator: def _releaseAllWorkerThreads(self, transitionFailed): self._curStep += 1 # we are about to get into next step. TODO: race condition here! # Now not all threads had time to go to sleep - logger.debug( + Logging.debug( "--\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step @@ -316,7 +306,7 @@ class ThreadCoordinator: if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) - logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( + Logging.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( self._curStep)) # Now not all threads had time to go to sleep # Worker threads will wake up at this point, and each execute it's own task self.tapAllThreads() # release all worker thread from their "gates" @@ -325,10 +315,10 @@ class ThreadCoordinator: # Now main thread (that's us) is ready to enter a step # let other threads go past the pool barrier, but wait at the # thread gate - logger.debug("[TRD] Main thread about to cross the barrier") + Logging.debug("[TRD] Main thread about to cross the barrier") self.crossStepBarrier(timeout=self.WORKER_THREAD_TIMEOUT) self._stepBarrier.reset() # Other worker threads should now be at the "gate" - logger.debug("[TRD] Main thread finished crossing the barrier") + Logging.debug("[TRD] Main thread finished crossing the barrier") def _doTransition(self): transitionFailed = False @@ -336,11 +326,11 @@ class ThreadCoordinator: for x in self._dbs: db = x # type: Database sm = db.getStateMachine() - logger.debug("[STT] starting transitions for DB: {}".format(db.getName())) + Logging.debug("[STT] starting transitions for DB: {}".format(db.getName())) # at end of step, transiton the DB state tasksForDb = db.filterTasks(self._executedTasks) sm.transition(tasksForDb, self.getDbManager().getDbConn()) - logger.debug("[STT] transition ended for DB: {}".format(db.getName())) + Logging.debug("[STT] transition ended for DB: {}".format(db.getName())) # Due to limitation (or maybe not) of the TD Python library, # we cannot share connections across threads @@ -348,14 +338,14 @@ class ThreadCoordinator: # Moving below to task loop # if sm.hasDatabase(): # for t in self._pool.threadList: - # logger.debug("[DB] use db for all worker threads") + # Logging.debug("[DB] use db for all worker threads") # t.useDb() # t.execSql("use db") # main thread executing "use # db" on behalf of every worker thread except taos.error.ProgrammingError as err: if (err.msg == 'network unavailable'): # broken DB connection - logger.info("DB connection broken, execution failed") + Logging.info("DB connection broken, execution failed") traceback.print_stack() transitionFailed = True self._te = None # Not running any more @@ -368,7 +358,7 @@ class ThreadCoordinator: self.resetExecutedTasks() # clear the tasks after we are done # Get ready for next step - logger.debug("<-- Step {} finished, trasition failed = {}".format(self._curStep, transitionFailed)) + Logging.debug("<-- Step {} finished, trasition failed = {}".format(self._curStep, transitionFailed)) return transitionFailed def run(self): @@ -382,8 +372,9 @@ class ThreadCoordinator: hasAbortedTask = False workerTimeout = False while not self._runShouldEnd(transitionFailed, hasAbortedTask, workerTimeout): - if not gConfig.debug: # print this only if we are not in debug mode - print(".", end="", flush=True) + if not gConfig.debug: # print this only if we are not in debug mode + Progress.emit(Progress.STEP_BOUNDARY) + # print(".", end="", flush=True) # if (self._curStep % 2) == 0: # print memory usage once every 10 steps # memUsage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # print("[m:{}]".format(memUsage), end="", flush=True) # print memory usage @@ -395,8 +386,9 @@ class ThreadCoordinator: try: self._syncAtBarrier() # For now just cross the barrier + Progress.emit(Progress.END_THREAD_STEP) except threading.BrokenBarrierError as err: - logger.info("Main loop aborted, caused by worker thread time-out") + Logging.info("Main loop aborted, caused by worker thread time-out") self._execStats.registerFailure("Aborted due to worker thread timeout") print("\n\nWorker Thread time-out detected, important thread info:") ts = ThreadStacks() @@ -409,7 +401,7 @@ class ThreadCoordinator: # threads are QUIET. hasAbortedTask = self._hasAbortedTask() # from previous step if hasAbortedTask: - logger.info("Aborted task encountered, exiting test program") + Logging.info("Aborted task encountered, exiting test program") self._execStats.registerFailure("Aborted Task Encountered") break # do transition only if tasks are error free @@ -420,29 +412,30 @@ class ThreadCoordinator: transitionFailed = True errno2 = Helper.convertErrno(err.errno) # correct error scheme errMsg = "Transition failed: errno=0x{:X}, msg: {}".format(errno2, err) - logger.info(errMsg) + Logging.info(errMsg) traceback.print_exc() self._execStats.registerFailure(errMsg) # Then we move on to the next step + Progress.emit(Progress.BEGIN_THREAD_STEP) self._releaseAllWorkerThreads(transitionFailed) if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate" - logger.debug("Abnormal ending of main thraed") + Logging.debug("Abnormal ending of main thraed") elif workerTimeout: - logger.debug("Abnormal ending of main thread, due to worker timeout") + Logging.debug("Abnormal ending of main thread, due to worker timeout") else: # regular ending, workers waiting at "barrier" - logger.debug("Regular ending, main thread waiting for all worker threads to stop...") + Logging.debug("Regular ending, main thread waiting for all worker threads to stop...") self._syncAtBarrier() self._te = None # No more executor, time to end - logger.debug("Main thread tapping all threads one last time...") + Logging.debug("Main thread tapping all threads one last time...") self.tapAllThreads() # Let the threads run one last time - logger.debug("\r\n\n--> Main thread ready to finish up...") - logger.debug("Main thread joining all threads") + Logging.debug("\r\n\n--> Main thread ready to finish up...") + Logging.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - logger.info("\nAll worker threads finished") + Logging.info("\nAll worker threads finished") self._execStats.endExec() def cleanup(self): # free resources @@ -474,7 +467,7 @@ class ThreadCoordinator: wakeSeq.append(i) else: wakeSeq.insert(0, i) - logger.debug( + Logging.debug( "[TRD] Main thread waking up worker threads: {}".format( str(wakeSeq))) # TODO: set dice seed to a deterministic value @@ -492,9 +485,11 @@ class ThreadCoordinator: dbc = self.getDbManager().getDbConn() if gConfig.max_dbs == 0: self._dbs.append(Database(0, dbc)) - else: + else: + baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic + )*333) % 888 if gConfig.dynamic_db_table_names else 0 for i in range(gConfig.max_dbs): - self._dbs.append(Database(i, dbc)) + self._dbs.append(Database(baseDbNumber + i, dbc)) def pickDatabase(self): idxDb = 0 @@ -512,7 +507,7 @@ class ThreadCoordinator: # pick a task type for current state db = self.pickDatabase() - taskType = db.getStateMachine().pickTaskType() # type: Task + taskType = db.getStateMachine().pickTaskType() # dynamic name of class return taskType(self._execStats, db) # create a task from it def resetExecutedTasks(self): @@ -522,13 +517,6 @@ class ThreadCoordinator: with self._lock: self._executedTasks.append(task) -# We define a class to run a number of threads in locking steps. - -class Helper: - @classmethod - def convertErrno(cls, errno): - return errno if (errno > 0) else 0x80000000 + errno - class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads @@ -546,7 +534,7 @@ class ThreadPool: def joinAll(self): for workerThread in self.threadList: - logger.debug("Joining thread...") + Logging.debug("Joining thread...") workerThread._thread.join() def cleanup(self): @@ -603,7 +591,7 @@ class LinearQueue(): def allocate(self, i): with self._lock: - # logger.debug("LQ allocating item {}".format(i)) + # Logging.debug("LQ allocating item {}".format(i)) if (i in self.inUse): raise RuntimeError( "Cannot re-use same index in queue: {}".format(i)) @@ -611,7 +599,7 @@ class LinearQueue(): def release(self, i): with self._lock: - # logger.debug("LQ releasing item {}".format(i)) + # Logging.debug("LQ releasing item {}".format(i)) self.inUse.remove(i) # KeyError possible, TODO: why? def size(self): @@ -633,357 +621,6 @@ class LinearQueue(): return ret -class DbConn: - TYPE_NATIVE = "native-c" - TYPE_REST = "rest-api" - TYPE_INVALID = "invalid" - - @classmethod - def create(cls, connType): - if connType == cls.TYPE_NATIVE: - return DbConnNative() - elif connType == cls.TYPE_REST: - return DbConnRest() - else: - raise RuntimeError( - "Unexpected connection type: {}".format(connType)) - - @classmethod - def createNative(cls): - return cls.create(cls.TYPE_NATIVE) - - @classmethod - def createRest(cls): - return cls.create(cls.TYPE_REST) - - def __init__(self): - self.isOpen = False - self._type = self.TYPE_INVALID - self._lastSql = None - - def getLastSql(self): - return self._lastSql - - def open(self): - if (self.isOpen): - raise RuntimeError("Cannot re-open an existing DB connection") - - # below implemented by child classes - self.openByType() - - logger.debug("[DB] data connection opened, type = {}".format(self._type)) - self.isOpen = True - - def queryScalar(self, sql) -> int: - return self._queryAny(sql) - - def queryString(self, sql) -> str: - return self._queryAny(sql) - - def _queryAny(self, sql): # actual query result as an int - if (not self.isOpen): - raise RuntimeError("Cannot query database until connection is open") - nRows = self.query(sql) - if nRows != 1: - raise taos.error.ProgrammingError( - "Unexpected result for query: {}, rows = {}".format(sql, nRows), - (0x991 if nRows==0 else 0x992) - ) - if self.getResultRows() != 1 or self.getResultCols() != 1: - raise RuntimeError("Unexpected result set for query: {}".format(sql)) - return self.getQueryResult()[0][0] - - def use(self, dbName): - self.execute("use {}".format(dbName)) - - def existsDatabase(self, dbName: str): - ''' Check if a certain database exists ''' - self.query("show databases") - dbs = [v[0] for v in self.getQueryResult()] # ref: https://stackoverflow.com/questions/643823/python-list-transformation - # ret2 = dbName in dbs - # print("dbs = {}, str = {}, ret2={}, type2={}".format(dbs, dbName,ret2, type(dbName))) - return dbName in dbs # TODO: super weird type mangling seen, once here - - def hasTables(self): - return self.query("show tables") > 0 - - def execute(self, sql): - ''' Return the number of rows affected''' - raise RuntimeError("Unexpected execution, should be overriden") - - def safeExecute(self, sql): - '''Safely execute any SQL query, returning True/False upon success/failure''' - try: - self.execute(sql) - return True # ignore num of results, return success - except taos.error.ProgrammingError as err: - return False # failed, for whatever TAOS reason - # Not possile to reach here, non-TAOS exception would have been thrown - - def query(self, sql) -> int: # return num rows returned - ''' Return the number of rows affected''' - raise RuntimeError("Unexpected execution, should be overriden") - - def openByType(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getQueryResult(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getResultRows(self): - raise RuntimeError("Unexpected execution, should be overriden") - - def getResultCols(self): - raise RuntimeError("Unexpected execution, should be overriden") - -# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql - - -class DbConnRest(DbConn): - def __init__(self): - super().__init__() - self._type = self.TYPE_REST - self._url = "http://localhost:6041/rest/sql" # fixed for now - self._result = None - - def openByType(self): # Open connection - pass # do nothing, always open - - def close(self): - if (not self.isOpen): - raise RuntimeError("Cannot clean up database until connection is open") - # Do nothing for REST - logger.debug("[DB] REST Database connection closed") - self.isOpen = False - - def _doSql(self, sql): - self._lastSql = sql # remember this, last SQL attempted - try: - r = requests.post(self._url, - data = sql, - auth = HTTPBasicAuth('root', 'taosdata')) - except: - print("REST API Failure (TODO: more info here)") - raise - rj = r.json() - # Sanity check for the "Json Result" - if ('status' not in rj): - raise RuntimeError("No status in REST response") - - if rj['status'] == 'error': # clearly reported error - if ('code' not in rj): # error without code - raise RuntimeError("REST error return without code") - errno = rj['code'] # May need to massage this in the future - # print("Raising programming error with REST return: {}".format(rj)) - raise taos.error.ProgrammingError( - rj['desc'], errno) # todo: check existance of 'desc' - - if rj['status'] != 'succ': # better be this - raise RuntimeError( - "Unexpected REST return status: {}".format( - rj['status'])) - - nRows = rj['rows'] if ('rows' in rj) else 0 - self._result = rj - return nRows - - def execute(self, sql): - if (not self.isOpen): - raise RuntimeError( - "Cannot execute database commands until connection is open") - logger.debug("[SQL-REST] Executing SQL: {}".format(sql)) - nRows = self._doSql(sql) - logger.debug( - "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) - return nRows - - def query(self, sql): # return rows affected - return self.execute(sql) - - def getQueryResult(self): - return self._result['data'] - - def getResultRows(self): - print(self._result) - raise RuntimeError("TBD") - # return self._tdSql.queryRows - - def getResultCols(self): - print(self._result) - raise RuntimeError("TBD") - - # Duplicate code from TDMySQL, TODO: merge all this into DbConnNative - - -class MyTDSql: - # Class variables - _clsLock = threading.Lock() # class wide locking - longestQuery = None # type: str - longestQueryTime = 0.0 # seconds - lqStartTime = 0.0 - # lqEndTime = 0.0 # Not needed, as we have the two above already - - def __init__(self, hostAddr, cfgPath): - # Make the DB connection - self._conn = taos.connect(host=hostAddr, config=cfgPath) - self._cursor = self._conn.cursor() - - self.queryRows = 0 - self.queryCols = 0 - self.affectedRows = 0 - - # def init(self, cursor, log=True): - # self.cursor = cursor - # if (log): - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # self.cursor.log(caller.filename + ".sql") - - def close(self): - self._cursor.close() # can we double close? - self._conn.close() # TODO: very important, cursor close does NOT close DB connection! - self._cursor.close() - - def _execInternal(self, sql): - startTime = time.time() - ret = self._cursor.execute(sql) - # print("\nSQL success: {}".format(sql)) - queryTime = time.time() - startTime - # Record the query time - cls = self.__class__ - if queryTime > (cls.longestQueryTime + 0.01) : - with cls._clsLock: - cls.longestQuery = sql - cls.longestQueryTime = queryTime - cls.lqStartTime = startTime - return ret - - def query(self, sql): - self.sql = sql - try: - self._execInternal(sql) - self.queryResult = self._cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self._cursor.description) - except Exception as e: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # args = (caller.filename, caller.lineno, sql, repr(e)) - # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) - raise - return self.queryRows - - def execute(self, sql): - self.sql = sql - try: - self.affectedRows = self._execInternal(sql) - except Exception as e: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # args = (caller.filename, caller.lineno, sql, repr(e)) - # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) - raise - return self.affectedRows - - -class DbConnNative(DbConn): - # Class variables - _lock = threading.Lock() - _connInfoDisplayed = False - totalConnections = 0 # Not private - - def __init__(self): - super().__init__() - self._type = self.TYPE_NATIVE - self._conn = None - # self._cursor = None - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("communit")] - else: - projPath = selfPath[:selfPath.find("tests")] - - buildPath = None - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - if buildPath == None: - raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}" - .format(selfPath, projPath)) - return buildPath - - - def openByType(self): # Open connection - cfgPath = self.getBuildPath() + "/test/cfg" - hostAddr = "127.0.0.1" - - cls = self.__class__ # Get the class, to access class variables - with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!! - if not cls._connInfoDisplayed: - cls._connInfoDisplayed = True # updating CLASS variable - logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath)) - # Make the connection - # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable - # self._cursor = self._conn.cursor() - # Record the count in the class - self._tdSql = MyTDSql(hostAddr, cfgPath) # making DB connection - cls.totalConnections += 1 - - self._tdSql.execute('reset query cache') - # self._cursor.execute('use db') # do this at the beginning of every - - # Open connection - # self._tdSql = MyTDSql() - # self._tdSql.init(self._cursor) - - def close(self): - if (not self.isOpen): - raise RuntimeError("Cannot clean up database until connection is open") - self._tdSql.close() - # Decrement the class wide counter - cls = self.__class__ # Get the class, to access class variables - with cls._lock: - cls.totalConnections -= 1 - - logger.debug("[DB] Database connection closed") - self.isOpen = False - - def execute(self, sql): - if (not self.isOpen): - raise RuntimeError("Cannot execute database commands until connection is open") - logger.debug("[SQL] Executing SQL: {}".format(sql)) - self._lastSql = sql - nRows = self._tdSql.execute(sql) - logger.debug( - "[SQL] Execution Result, nRows = {}, SQL = {}".format( - nRows, sql)) - return nRows - - def query(self, sql): # return rows affected - if (not self.isOpen): - raise RuntimeError( - "Cannot query database until connection is open") - logger.debug("[SQL] Executing SQL: {}".format(sql)) - self._lastSql = sql - nRows = self._tdSql.query(sql) - logger.debug( - "[SQL] Query Result, nRows = {}, SQL = {}".format( - nRows, sql)) - return nRows - # results are in: return self._tdSql.queryResult - - def getQueryResult(self): - return self._tdSql.queryResult - - def getResultRows(self): - return self._tdSql.queryRows - - def getResultCols(self): - return self._tdSql.queryCols - - class AnyState: STATE_INVALID = -1 STATE_EMPTY = 0 # nothing there, no even a DB @@ -1232,7 +869,7 @@ class StateMechine: def init(self, dbc: DbConn): # late initailization, don't save the dbConn self._curState = self._findCurrentState(dbc) # starting state - logger.debug("Found Starting State: {}".format(self._curState)) + Logging.debug("Found Starting State: {}".format(self._curState)) # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -1270,7 +907,7 @@ class StateMechine: raise RuntimeError( "No suitable task types found for state: {}".format( self._curState)) - logger.debug( + Logging.debug( "[OPS] Tasks found for state {}: {}".format( self._curState, typesToStrings(taskTypes))) @@ -1280,27 +917,27 @@ class StateMechine: ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state dbName =self._db.getName() if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! - logger.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) + Logging.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) return StateEmpty() # did not do this when openning connection, and this is NOT the worker # thread, which does this on their own dbc.use(dbName) if not dbc.hasTables(): # no tables - logger.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) return StateDbOnly() sTable = self._db.getFixedSuperTable() if sTable.hasRegTables(dbc, dbName): # no regular tables - logger.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) return StateSuperTableOnly() else: # has actual tables - logger.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) + Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) return StateHasData() # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): if (len(tasks) == 0): # before 1st step, or otherwise empty - logger.debug("[STT] Starting State: {}".format(self._curState)) + Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing # this should show up in the server log, separating steps @@ -1336,7 +973,7 @@ class StateMechine: # Nothing for sure newState = self._findCurrentState(dbc) - logger.debug("[STT] New DB state determined: {}".format(newState)) + Logging.debug("[STT] New DB state determined: {}".format(newState)) # can old state move to new state through the tasks? self._curState.verifyTasksToState(tasks, newState) self._curState = newState @@ -1354,7 +991,7 @@ class StateMechine: # read data task, default to 10: TODO: change to a constant weights.append(10) i = self._weighted_choice_sub(weights) - # logger.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) + # Logging.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes))) return taskTypes[i] # ref: @@ -1372,6 +1009,8 @@ class Database: possibly in a cluster environment. For now we use it to manage state transitions in that database + + TODO: consider moving, but keep in mind it contains "StateMachine" ''' _clsLock = threading.Lock() # class wide lock _lastInt = 101 # next one is initial integer @@ -1433,7 +1072,7 @@ class Database: t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above - logger.info("Setting up TICKS to start from: {}".format(t4)) + Logging.info("Setting up TICKS to start from: {}".format(t4)) return t4 @classmethod @@ -1468,64 +1107,6 @@ class Database: return ret -class DbManager(): - ''' This is a wrapper around DbConn(), to make it easier to use. - - TODO: rename this to DbConnManager - ''' - def __init__(self): - self.tableNumQueue = LinearQueue() # TODO: delete? - # self.openDbServerConnection() - self._dbConn = DbConn.createNative() if ( - gConfig.connector_type == 'native') else DbConn.createRest() - try: - self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected - except taos.error.ProgrammingError as err: - # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) - if (err.msg == 'client disconnected'): # cannot open DB connection - print( - "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") - sys.exit(2) - else: - print("Failed to connect to DB, errno = {}, msg: {}" - .format(Helper.convertErrno(err.errno), err.msg)) - raise - except BaseException: - print("[=] Unexpected exception") - raise - - # Do this after dbConn is in proper shape - # Moved to Database() - # self._stateMachine = StateMechine(self._dbConn) - - def getDbConn(self): - return self._dbConn - - # TODO: not used any more, to delete - def pickAndAllocateTable(self): # pick any table, and "use" it - return self.tableNumQueue.pickAndAllocate() - - # TODO: Not used any more, to delete - def addTable(self): - with self._lock: - tIndex = self.tableNumQueue.push() - return tIndex - - # Not used any more, to delete - def releaseTable(self, i): # return the table back, so others can use it - self.tableNumQueue.release(i) - - # TODO: not used any more, delete - def getTableNameToDelete(self): - tblNum = self.tableNumQueue.pop() # TODO: race condition! - if (not tblNum): # maybe false - return False - - return "table_{}".format(tblNum) - - def cleanUp(self): - self._dbConn.close() - class TaskExecutor(): class BoundedList: def __init__(self, size=10): @@ -1584,10 +1165,10 @@ class TaskExecutor(): self._boundedList.add(n) # def logInfo(self, msg): - # logger.info(" T[{}.x]: ".format(self._curStep) + msg) + # Logging.info(" T[{}.x]: ".format(self._curStep) + msg) # def logDebug(self, msg): - # logger.debug(" T[{}.x]: ".format(self._curStep) + msg) + # Logging.debug(" T[{}.x]: ".format(self._curStep) + msg) class Task(): @@ -1600,19 +1181,19 @@ class Task(): @classmethod def allocTaskNum(cls): Task.taskSn += 1 # IMPORTANT: cannot use cls.taskSn, since each sub class will have a copy - # logger.debug("Allocating taskSN: {}".format(Task.taskSn)) + # Logging.debug("Allocating taskSN: {}".format(Task.taskSn)) return Task.taskSn def __init__(self, execStats: ExecutionStats, db: Database): self._workerThread = None - self._err = None # type: Exception + self._err: Optional[Exception] = None self._aborted = False self._curStep = None self._numRows = None # Number of rows affected # Assign an incremental task serial number self._taskNum = self.allocTaskNum() - # logger.debug("Creating new task {}...".format(self._taskNum)) + # Logging.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats self._db = db # A task is always associated/for a specific DB @@ -1645,15 +1226,22 @@ class Task(): "To be implemeted by child classes, class name: {}".format( self.__class__.__name__)) + def _isServiceStable(self): + if not gSvcMgr: + return True # we don't run service, so let's assume it's stable + return gSvcMgr.isStable() # otherwise let's examine the service + def _isErrAcceptable(self, errno, msg): if errno in [ 0x05, # TSDB_CODE_RPC_NOT_READY 0x0B, # Unable to establish connection, more details in TD-1648 - # 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 0x217, # "db not selected", client side defined error code - 0x218, # "Table does not exist" client side defined error code - 0x360, 0x362, - 0x369, # tag already exists + # 0x218, # "Table does not exist" client side defined error code + 0x360, # Table already exists + 0x362, + # 0x369, # tag already exists 0x36A, 0x36B, 0x36D, 0x381, 0x380, # "db not selected" @@ -1662,12 +1250,17 @@ class Task(): 0x503, 0x510, # vnode not in ready state 0x14, # db not ready, errno changed - 0x600, + 0x600, # Invalid table ID, why? 1000 # REST catch-all error ]: return True # These are the ALWAYS-ACCEPTABLE ones - elif (errno in [ 0x0B ]) and gConfig.auto_start_service: - return True # We may get "network unavilable" when restarting service + # This case handled below already. + # elif (errno in [ 0x0B ]) and gConfig.auto_start_service: + # return True # We may get "network unavilable" when restarting service + elif gConfig.ignore_errors: # something is specified on command line + moreErrnos = [int(v, 0) for v in gConfig.ignore_errors.split(',')] + if errno in moreErrnos: + return True elif errno == 0x200 : # invalid SQL, we need to div in a bit more if msg.find("invalid column name") != -1: return True @@ -1675,8 +1268,8 @@ class Task(): return True elif msg.find("duplicated column names") != -1: # also alter table tag issues return True - elif (gSvcMgr!=None) and gSvcMgr.isRestarting(): - logger.info("Ignoring error when service is restarting: errno = {}, msg = {}".format(errno, msg)) + elif not self._isServiceStable(): # We are managing service, and ... + Logging.info("Ignoring error when service starting/stopping: errno = {}, msg = {}".format(errno, msg)) return True return False # Not an acceptable error @@ -1735,10 +1328,11 @@ class Task(): self._aborted = True traceback.print_exc() except BaseException: # TODO: what is this again??!! - self.logDebug( - "[=] Unexpected exception, SQL: {}".format( - wt.getDbConn().getLastSql())) - raise + raise RuntimeError("Punt") + # self.logDebug( + # "[=] Unexpected exception, SQL: {}".format( + # wt.getDbConn().getLastSql())) + # raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) self.logDebug("[X] task execution completed, {}, status: {}".format( @@ -1817,14 +1411,14 @@ class ExecutionStats: self._failureReason = reason def printStats(self): - logger.info( + Logging.info( "----------------------------------------------------------------------") - logger.info( + Logging.info( "| Crash_Gen test {}, with the following stats:". format( "FAILED (reason: {})".format( self._failureReason) if self._failed else "SUCCEEDED")) - logger.info("| Task Execution Times (success/total):") - execTimesAny = 0 + Logging.info("| Task Execution Times (success/total):") + execTimesAny = 0.0 for k, n in self._execTimes.items(): execTimesAny += n[0] errStr = None @@ -1834,28 +1428,28 @@ class ExecutionStats: errStrs = ["0x{:X}:{}".format(eno, n) for (eno, n) in errors.items()] # print("error strings = {}".format(errStrs)) errStr = ", ".join(errStrs) - logger.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) + Logging.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) - logger.info( + Logging.info( "| Total Tasks Executed (success or not): {} ".format(execTimesAny)) - logger.info( + Logging.info( "| Total Tasks In Progress at End: {}".format( self._tasksInProgress)) - logger.info( + Logging.info( "| Total Task Busy Time (elapsed time when any task is in progress): {:.3f} seconds".format( self._accRunTime)) - logger.info( + Logging.info( "| Average Per-Task Execution Time: {:.3f} seconds".format(self._accRunTime / execTimesAny)) - logger.info( + Logging.info( "| Total Elapsed Time (from wall clock): {:.3f} seconds".format( self._elapsedTime)) - logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) - logger.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) - logger.info("| Longest native query time: {:.3f} seconds, started: {}". + Logging.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) + Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) + Logging.info("| Longest native query time: {:.3f} seconds, started: {}". format(MyTDSql.longestQueryTime, time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))) ) - logger.info("| Longest native query: {}".format(MyTDSql.longestQuery)) - logger.info( + Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery)) + Logging.info( "----------------------------------------------------------------------") @@ -1865,11 +1459,14 @@ class StateTransitionTask(Task): LARGE_NUMBER_OF_RECORDS = 50 SMALL_NUMBER_OF_RECORDS = 3 + _baseTableNumber = None + + _endState = None + @classmethod def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - - _endState = None + @classmethod def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") @@ -1889,7 +1486,10 @@ class StateTransitionTask(Task): @classmethod def getRegTableName(cls, i): - return "reg_table_{}".format(i) + if ( StateTransitionTask._baseTableNumber is None): + StateTransitionTask._baseTableNumber = Dice.throw( + 999) if gConfig.dynamic_db_table_names else 0 + return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) def execute(self, wt: WorkerThread): super().execute(wt) @@ -1909,7 +1509,8 @@ class TaskCreateDb(StateTransitionTask): # was: self.execWtSql(wt, "create database db") repStr = "" if gConfig.max_replicas != 1: - numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N + # numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N + numReplica = gConfig.max_replicas # fixed, always repStr = "replica {}".format(numReplica) self.execWtSql(wt, "create database {} {}" .format(self._db.getName(), repStr) ) @@ -1925,7 +1526,7 @@ class TaskDropDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): self.execWtSql(wt, "drop database {}".format(self._db.getName())) - logger.debug("[OPS] database dropped at {}".format(time.time())) + Logging.debug("[OPS] database dropped at {}".format(time.time())) class TaskCreateSuperTable(StateTransitionTask): @classmethod @@ -1938,7 +1539,7 @@ class TaskCreateSuperTable(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): if not self._db.exists(wt.getDbConn()): - logger.debug("Skipping task, no DB yet") + Logging.debug("Skipping task, no DB yet") return sTable = self._db.getFixedSuperTable() # type: TdSuperTable @@ -1973,7 +1574,7 @@ class TdSuperTable: dbc.query("select TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) - logger.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) + Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) raise qr = dbc.getQueryResult() @@ -2045,15 +1646,39 @@ class TaskReadData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canReadData() + # def _canRestartService(self): + # if not gSvcMgr: + # return True # always + # return gSvcMgr.isActive() # only if it's running TODO: race condition here + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): sTable = self._db.getFixedSuperTable() - # 1 in 5 chance, simulate a broken connection. - if random.randrange(5) == 0: # TODO: break connection in all situations - wt.getDbConn().close() - wt.getDbConn().open() - print("_r", end="", flush=True) - + # 1 in 5 chance, simulate a broken connection, only if service stable (not restarting) + if random.randrange(20)==0: # and self._canRestartService(): # TODO: break connection in all situations + # Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG + Progress.emit(Progress.SERVICE_RECONNECT_START) + try: + wt.getDbConn().close() + wt.getDbConn().open() + except ConnectionError as err: # may fail + if not gSvcMgr: + Logging.error("Failed to reconnect in client-only mode") + raise # Not OK if we are running in client-only mode + if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to + Logging.error("Failed to reconnect when managed server is running") + raise # Not OK if we are running normally + + Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) + # Logging.info("Ignoring DB reconnect error") + + # print("_r", end="", flush=True) + Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) + # The above might have taken a lot of time, service might be running + # by now, causing error below to be incorrectly handled due to timing issue + return # TODO: fix server restart status race condtion + + dbc = wt.getDbConn() dbName = self._db.getName() for rTbName in sTable.getRegTables(dbc, dbName): # regular tables @@ -2088,7 +1713,7 @@ class TaskReadData(StateTransitionTask): dbc.execute("select {} from {}.{}".format(aggExpr, dbName, sTable.getName())) except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) - logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) + Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) raise class TaskDropSuperTable(StateTransitionTask): @@ -2119,7 +1744,7 @@ class TaskDropSuperTable(StateTransitionTask): errno2 = Helper.convertErrno(err.errno) if (errno2 in [0x362]): # mnode invalid table name isSuccess = False - logger.debug("[DB] Acceptable error when dropping a table") + Logging.debug("[DB] Acceptable error when dropping a table") continue # try to delete next regular table if (not tickOutput): @@ -2199,20 +1824,19 @@ class TaskAddData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() - # We use these two files to record operations to DB, useful for power-off - # tests - fAddLogReady = None - fAddLogDone = None + # We use these two files to record operations to DB, useful for power-off tests + fAddLogReady = None # type: TextIOWrapper + fAddLogDone = None # type: TextIOWrapper @classmethod def prepToRecordOps(cls): if gConfig.record_ops: if (cls.fAddLogReady is None): - logger.info( + Logging.info( "Recording in a file operations to be performed...") cls.fAddLogReady = open("add_log_ready.txt", "w") if (cls.fAddLogDone is None): - logger.info("Recording in a file operations completed...") + Logging.info("Recording in a file operations completed...") cls.fAddLogDone = open("add_log_done.txt", "w") @classmethod @@ -2288,490 +1912,8 @@ class TaskAddData(StateTransitionTask): self.activeTable.discard(i) # not raising an error, unlike remove -# Deterministic random number generator -class Dice(): - seeded = False # static, uninitialized - - @classmethod - def seed(cls, s): # static - if (cls.seeded): - raise RuntimeError( - "Cannot seed the random generator more than once") - cls.verifyRNG() - random.seed(s) - cls.seeded = True # TODO: protect against multi-threading - - @classmethod - def verifyRNG(cls): # Verify that the RNG is determinstic - random.seed(0) - x1 = random.randrange(0, 1000) - x2 = random.randrange(0, 1000) - x3 = random.randrange(0, 1000) - if (x1 != 864 or x2 != 394 or x3 != 776): - raise RuntimeError("System RNG is not deterministic") - - @classmethod - def throw(cls, stop): # get 0 to stop-1 - return cls.throwRange(0, stop) - - @classmethod - def throwRange(cls, start, stop): # up to stop-1 - if (not cls.seeded): - raise RuntimeError("Cannot throw dice before seeding it") - return random.randrange(start, stop) - - @classmethod - def choice(cls, cList): - return random.choice(cList) - - -class LoggingFilter(logging.Filter): - def filter(self, record: logging.LogRecord): - if (record.levelno >= logging.INFO): - return True # info or above always log - - # Commenting out below to adjust... - - # if msg.startswith("[TRD]"): - # return False - return True - - -class MyLoggingAdapter(logging.LoggerAdapter): - def process(self, msg, kwargs): - return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs - # return '[%s] %s' % (self.extra['connid'], msg), kwargs - - -class SvcManager: - def __init__(self): - print("Starting TDengine Service Manager") - # signal.signal(signal.SIGTERM, self.sigIntHandler) # Moved to MainExec - # signal.signal(signal.SIGINT, self.sigIntHandler) - # signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! - self.inSigHandler = False - # self._status = MainExec.STATUS_RUNNING # set inside - # _startTaosService() - self.svcMgrThread = None # type: ServiceManagerThread - self._lock = threading.Lock() - self._isRestarting = False - def _doMenu(self): - choice = "" - while True: - print("\nInterrupting Service Program, Choose an Action: ") - print("1: Resume") - print("2: Terminate") - print("3: Restart") - # Remember to update the if range below - # print("Enter Choice: ", end="", flush=True) - while choice == "": - choice = input("Enter Choice: ") - if choice != "": - break # done with reading repeated input - if choice in ["1", "2", "3"]: - break # we are done with whole method - print("Invalid choice, please try again.") - choice = "" # reset - return choice - - def sigUsrHandler(self, signalNumber, frame): - print("Interrupting main thread execution upon SIGUSR1") - if self.inSigHandler: # already - print("Ignoring repeated SIG...") - return # do nothing if it's already not running - self.inSigHandler = True - - choice = self._doMenu() - if choice == "1": - # TODO: can the sub-process be blocked due to us not reading from - # queue? - self.sigHandlerResume() - elif choice == "2": - self.stopTaosService() - elif choice == "3": # Restart - self.restart() - else: - raise RuntimeError("Invalid menu choice: {}".format(choice)) - - self.inSigHandler = False - - def sigIntHandler(self, signalNumber, frame): - print("SvcManager: INT Signal Handler starting...") - if self.inSigHandler: - print("Ignoring repeated SIG_INT...") - return - self.inSigHandler = True - - self.stopTaosService() - print("SvcManager: INT Signal Handler returning...") - self.inSigHandler = False - - def sigHandlerResume(self): - print("Resuming TDengine service manager thread (main thread)...\n\n") - - def _checkServiceManagerThread(self): - if self.svcMgrThread: # valid svc mgr thread - if self.svcMgrThread.isStopped(): # done? - self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? - self.svcMgrThread = None # no more - - def _procIpcAll(self): - while self.isRunning() or self.isRestarting() : # for as long as the svc mgr thread is still here - if self.isRunning(): - self.svcMgrThread.procIpcBatch() # regular processing, - self._checkServiceManagerThread() - elif self.isRetarting(): - print("Service restarting...") - time.sleep(0.5) # pause, before next round - print( - "Service Manager Thread (with subprocess) has ended, main thread now exiting...") - - def startTaosService(self): - with self._lock: - if self.svcMgrThread: - raise RuntimeError("Cannot start TAOS service when one may already be running") - - # Find if there's already a taosd service, and then kill it - for proc in psutil.process_iter(): - if proc.name() == 'taosd': - print("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupe") - time.sleep(2.0) - proc.kill() - # print("Process: {}".format(proc.name())) - - - self.svcMgrThread = ServiceManagerThread() # create the object - print("Attempting to start TAOS service started, printing out output...") - self.svcMgrThread.start() - self.svcMgrThread.procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines - print("TAOS service started") - - def stopTaosService(self, outputLines=20): - with self._lock: - if not self.isRunning(): - logger.warning("Cannot stop TAOS service, not running") - return - - print("Terminating Service Manager Thread (SMT) execution...") - self.svcMgrThread.stop() - if self.svcMgrThread.isStopped(): - self.svcMgrThread.procIpcBatch(outputLines) # one last time - self.svcMgrThread = None - print("End of TDengine Service Output") - print("----- TDengine Service (managed by SMT) is now terminated -----\n") - else: - print("WARNING: SMT did not terminate as expected") - - def run(self): - self.startTaosService() - self._procIpcAll() # pump/process all the messages, may encounter SIG + restart - if self.isRunning(): # if sig handler hasn't destroyed it by now - self.stopTaosService() # should have started already - - def restart(self): - if self._isRestarting: - logger.warning("Cannot restart service when it's already restarting") - return - - self._isRestarting = True - if self.isRunning(): - self.stopTaosService() - else: - logger.warning("Service not running when restart requested") - - self.startTaosService() - self._isRestarting = False - - def isRunning(self): - return self.svcMgrThread != None - - def isRestarting(self): - return self._isRestarting - -class ServiceManagerThread: - MAX_QUEUE_SIZE = 10000 - - def __init__(self): - self._tdeSubProcess = None # type: TdeSubProcess - self._thread = None - self._status = None - - def getStatus(self): - return self._status - - def isRunning(self): - # return self._thread and self._thread.is_alive() - return self._status == MainExec.STATUS_RUNNING - - def isStopping(self): - return self._status == MainExec.STATUS_STOPPING - - def isStopped(self): - return self._status == MainExec.STATUS_STOPPED - - # Start the thread (with sub process), and wait for the sub service - # to become fully operational - def start(self): - if self._thread: - raise RuntimeError("Unexpected _thread") - if self._tdeSubProcess: - raise RuntimeError("TDengine sub process already created/running") - - self._status = MainExec.STATUS_STARTING - - self._tdeSubProcess = TdeSubProcess() - self._tdeSubProcess.start() - - self._ipcQueue = Queue() - self._thread = threading.Thread( # First thread captures server OUTPUT - target=self.svcOutputReader, - args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) - self._thread.daemon = True # thread dies with the program - self._thread.start() - - self._thread2 = threading.Thread( # 2nd thread captures server ERRORs - target=self.svcErrorReader, - args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) - self._thread2.daemon = True # thread dies with the program - self._thread2.start() - - # wait for service to start - for i in range(0, 100): - time.sleep(1.0) - # self.procIpcBatch() # don't pump message during start up - print("_zz_", end="", flush=True) - if self._status == MainExec.STATUS_RUNNING: - logger.info("[] TDengine service READY to process requests") - return # now we've started - # TODO: handle this better? - self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output - raise RuntimeError("TDengine service did not start successfully") - - def stop(self): - # can be called from both main thread or signal handler - print("Terminating TDengine service running as the sub process...") - if self.isStopped(): - print("Service already stopped") - return - if self.isStopping(): - print("Service is already being stopped") - return - # Linux will send Control-C generated SIGINT to the TDengine process - # already, ref: - # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes - if not self._tdeSubProcess: - raise RuntimeError("sub process object missing") - - self._status = MainExec.STATUS_STOPPING - retCode = self._tdeSubProcess.stop() - print("Attempted to stop sub process, got return code: {}".format(retCode)) - if (retCode==-11): # SGV - logger.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") - - if self._tdeSubProcess.isRunning(): # still running - print("FAILED to stop sub process, it is still running... pid = {}".format( - self._tdeSubProcess.getPid())) - else: - self._tdeSubProcess = None # not running any more - self.join() # stop the thread, change the status, etc. - - def join(self): - # TODO: sanity check - if not self.isStopping(): - raise RuntimeError( - "Unexpected status when ending svc mgr thread: {}".format( - self._status)) - - if self._thread: - self._thread.join() - self._thread = None - self._status = MainExec.STATUS_STOPPED - # STD ERR thread - self._thread2.join() - self._thread2 = None - else: - print("Joining empty thread, doing nothing") - - def _trimQueue(self, targetSize): - if targetSize <= 0: - return # do nothing - q = self._ipcQueue - if (q.qsize() <= targetSize): # no need to trim - return - - logger.debug("Triming IPC queue to target size: {}".format(targetSize)) - itemsToTrim = q.qsize() - targetSize - for i in range(0, itemsToTrim): - try: - q.get_nowait() - except Empty: - break # break out of for loop, no more trimming - - TD_READY_MSG = "TDengine is initialized successfully" - - def procIpcBatch(self, trimToTarget=0, forceOutput=False): - self._trimQueue(trimToTarget) # trim if necessary - # Process all the output generated by the underlying sub process, - # managed by IO thread - print("<", end="", flush=True) - while True: - try: - line = self._ipcQueue.get_nowait() # getting output at fast speed - self._printProgress("_o") - except Empty: - # time.sleep(2.3) # wait only if there's no output - # no more output - print(".>", end="", flush=True) - return # we are done with THIS BATCH - else: # got line, printing out - if forceOutput: - logger.info(line) - else: - logger.debug(line) - print(">", end="", flush=True) - - _ProgressBars = ["--", "//", "||", "\\\\"] - - def _printProgress(self, msg): # TODO: assuming 2 chars - print(msg, end="", flush=True) - pBar = self._ProgressBars[Dice.throw(4)] - print(pBar, end="", flush=True) - print('\b\b\b\b', end="", flush=True) - - def svcOutputReader(self, out: IO, queue): - # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python - # print("This is the svcOutput Reader...") - # for line in out : - for line in iter(out.readline, b''): - # print("Finished reading a line: {}".format(line)) - # print("Adding item to queue...") - try: - line = line.decode("utf-8").rstrip() - except UnicodeError: - print("\nNon-UTF8 server output: {}\n".format(line)) - - # This might block, and then causing "out" buffer to block - queue.put(line) - self._printProgress("_i") - - if self._status == MainExec.STATUS_STARTING: # we are starting, let's see if we have started - if line.find(self.TD_READY_MSG) != -1: # found - logger.info("Waiting for the service to become FULLY READY") - time.sleep(1.0) # wait for the server to truly start. TODO: remove this - logger.info("Service is now FULLY READY") - self._status = MainExec.STATUS_RUNNING - - # Trim the queue if necessary: TODO: try this 1 out of 10 times - self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size - - if self.isStopping(): # TODO: use thread status instead - # WAITING for stopping sub process to finish its outptu - print("_w", end="", flush=True) - - # queue.put(line) - # meaning sub process must have died - print("\nNo more output from IO thread managing TDengine service") - out.close() - - def svcErrorReader(self, err: IO, queue): - for line in iter(err.readline, b''): - print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line)) - - -class TdeSubProcess: - def __init__(self): - self.subProcess = None - - def getStdOut(self): - return self.subProcess.stdout - - def getStdErr(self): - return self.subProcess.stderr - - def isRunning(self): - return self.subProcess is not None - - def getPid(self): - return self.subProcess.pid - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("communit")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - return buildPath - - def start(self): - ON_POSIX = 'posix' in sys.builtin_module_names - - taosdPath = self.getBuildPath() + "/build/bin/taosd" - cfgPath = self.getBuildPath() + "/test/cfg" - - # Delete the log files - logPath = self.getBuildPath() + "/test/log" - # ref: https://stackoverflow.com/questions/1995373/deleting-all-files-in-a-directory-with-python/1995397 - # filelist = [ f for f in os.listdir(logPath) ] # if f.endswith(".bak") ] - # for f in filelist: - # filePath = os.path.join(logPath, f) - # print("Removing log file: {}".format(filePath)) - # os.remove(filePath) - if os.path.exists(logPath): - logPathSaved = logPath + "_" + time.strftime('%Y-%m-%d-%H-%M-%S') - logger.info("Saving old log files to: {}".format(logPathSaved)) - os.rename(logPath, logPathSaved) - # os.mkdir(logPath) # recreate, no need actually, TDengine will auto-create with proper perms - - svcCmd = [taosdPath, '-c', cfgPath] - # svcCmdSingle = "{} -c {}".format(taosdPath, cfgPath) - # svcCmd = ['vmstat', '1'] - if self.subProcess: # already there - raise RuntimeError("Corrupt process state") - - # print("Starting service: {}".format(svcCmd)) - self.subProcess = subprocess.Popen( - svcCmd, shell=False, - # svcCmdSingle, shell=True, # capture core dump? - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # bufsize=1, # not supported in binary mode - close_fds=ON_POSIX - ) # had text=True, which interferred with reading EOF - - def stop(self): - if not self.subProcess: - print("Sub process already stopped") - return -1 - - retCode = self.subProcess.poll() # contains real sub process return code - if retCode: # valid return code, process ended - self.subProcess = None - else: # process still alive, let's interrupt it - print( - "Sub process is running, sending SIG_INT and waiting for it to terminate...") - # sub process should end, then IPC queue should end, causing IO - # thread to end - self.subProcess.send_signal(signal.SIGINT) - try: - self.subProcess.wait(10) - retCode = self.subProcess.returncode - except subprocess.TimeoutExpired as err: - print("Time out waiting for TDengine service process to exit") - retCode = -3 - else: - print("TDengine service process terminated successfully from SIG_INT") - retCode = -4 - self.subProcess = None - return retCode class ThreadStacks: # stack info for all threads def __init__(self): @@ -2808,17 +1950,17 @@ class ClientManager: # signal.signal(signal.SIGTERM, self.sigIntHandler) # signal.signal(signal.SIGINT, self.sigIntHandler) - self._status = MainExec.STATUS_RUNNING + self._status = Status.STATUS_RUNNING self.tc = None self.inSigHandler = False def sigIntHandler(self, signalNumber, frame): - if self._status != MainExec.STATUS_RUNNING: + if self._status != Status.STATUS_RUNNING: print("Repeated SIGINT received, forced exit...") # return # do nothing if it's already not running sys.exit(-1) - self._status = MainExec.STATUS_STOPPING # immediately set our status + self._status = Status.STATUS_STOPPING # immediately set our status print("ClientManager: Terminating program...") self.tc.requestToStop() @@ -2898,15 +2040,20 @@ class ClientManager: # self._printLastNumbers() global gConfig - dbManager = DbManager() # Regular function + # Prepare Tde Instance + global gContainer + tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" + + dbManager = DbManager(gConfig.connector_type, tInst.getDbTarget()) # Regular function thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) + print("Starting client instance to: {}".format(tInst)) self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) if svcMgr: # gConfig.auto_start_service: - svcMgr.stopTaosService() + svcMgr.stopTaosServices() svcMgr = None # Print exec status, etc., AFTER showing messages from the server self.conclude() @@ -2936,18 +2083,10 @@ class ClientManager: # self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections self.tc.printStats() - - - class MainExec: - STATUS_STARTING = 1 - STATUS_RUNNING = 2 - STATUS_STOPPING = 3 - STATUS_STOPPED = 4 - def __init__(self): self._clientMgr = None - self._svcMgr = None + self._svcMgr = None # type: ServiceManager signal.signal(signal.SIGTERM, self.sigIntHandler) signal.signal(signal.SIGINT, self.sigIntHandler) @@ -2960,219 +2099,185 @@ class MainExec: self._svcMgr.sigUsrHandler(signalNumber, frame) def sigIntHandler(self, signalNumber, frame): - if self._svcMgr: + if self._svcMgr: self._svcMgr.sigIntHandler(signalNumber, frame) - if self._clientMgr: + if self._clientMgr: self._clientMgr.sigIntHandler(signalNumber, frame) def runClient(self): global gSvcMgr if gConfig.auto_start_service: - self._svcMgr = SvcManager() - gSvcMgr = self._svcMgr # hack alert - self._svcMgr.startTaosService() # we start, don't run + gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert + gSvcMgr.startTaosServices() # we start, don't run self._clientMgr = ClientManager() ret = None try: ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside except requests.exceptions.ConnectionError as err: - logger.warning("Failed to open REST connection to DB: {}".format(err.getMessage())) + Logging.warning("Failed to open REST connection to DB: {}".format(err.getMessage())) # don't raise return ret def runService(self): global gSvcMgr - self._svcMgr = SvcManager() - gSvcMgr = self._svcMgr # save it in a global variable TODO: hack alert - - self._svcMgr.run() # run to some end state - self._svcMgr = None - gSvcMgr = None - - def runTemp(self): # for debugging purposes - # # Hack to exercise reading from disk, imcreasing coverage. TODO: fix - # dbc = dbState.getDbConn() - # sTbName = dbState.getFixedSuperTableName() - # dbc.execute("create database if not exists db") - # if not dbState.getState().equals(StateEmpty()): - # dbc.execute("use db") - - # rTables = None - # try: # the super table may not exist - # sql = "select TBNAME from db.{}".format(sTbName) - # logger.info("Finding out tables in super table: {}".format(sql)) - # dbc.query(sql) # TODO: analyze result set later - # logger.info("Fetching result") - # rTables = dbc.getQueryResult() - # logger.info("Result: {}".format(rTables)) - # except taos.error.ProgrammingError as err: - # logger.info("Initial Super table OPS error: {}".format(err)) - - # # sys.exit() - # if ( not rTables == None): - # # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0]))) - # try: - # for rTbName in rTables : # regular tables - # ds = dbState - # logger.info("Inserting into table: {}".format(rTbName[0])) - # sql = "insert into db.{} values ('{}', {});".format( - # rTbName[0], - # ds.getNextTick(), ds.getNextInt()) - # dbc.execute(sql) - # for rTbName in rTables : # regular tables - # dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure - # logger.info("Initial READING operation is successful") - # except taos.error.ProgrammingError as err: - # logger.info("Initial WRITE/READ error: {}".format(err)) - - # Sandbox testing code - # dbc = dbState.getDbConn() - # while True: - # rows = dbc.query("show databases") - # print("Rows: {}, time={}".format(rows, time.time())) - return - - -def main(): - # Super cool Python argument library: - # https://docs.python.org/3/library/argparse.html - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description=textwrap.dedent('''\ - TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) - --------------------------------------------------------------------- - 1. You build TDengine in the top level ./build directory, as described in offical docs - 2. You run the server there before this script: ./build/bin/taosd -c test/cfg - - ''')) - - # parser.add_argument('-a', '--auto-start-service', action='store_true', - # help='Automatically start/stop the TDengine service (default: false)') - # parser.add_argument('-c', '--connector-type', action='store', default='native', type=str, - # help='Connector type to use: native, rest, or mixed (default: 10)') - # parser.add_argument('-d', '--debug', action='store_true', - # help='Turn on DEBUG mode for more logging (default: false)') - # parser.add_argument('-e', '--run-tdengine', action='store_true', - # help='Run TDengine service in foreground (default: false)') - # parser.add_argument('-l', '--larger-data', action='store_true', - # help='Write larger amount of data during write operations (default: false)') - # parser.add_argument('-p', '--per-thread-db-connection', action='store_true', - # help='Use a single shared db connection (default: false)') - # parser.add_argument('-r', '--record-ops', action='store_true', - # help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - # parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int, - # help='Maximum number of steps to run (default: 100)') - # parser.add_argument('-t', '--num-threads', action='store', default=5, type=int, - # help='Number of threads to run (default: 10)') - # parser.add_argument('-x', '--continue-on-exception', action='store_true', - # help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') - - parser.add_argument( - '-a', - '--auto-start-service', - action='store_true', - help='Automatically start/stop the TDengine service (default: false)') - parser.add_argument( - '-b', - '--max-dbs', - action='store', - default=0, - type=int, - help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') - parser.add_argument( - '-c', - '--connector-type', - action='store', - default='native', - type=str, - help='Connector type to use: native, rest, or mixed (default: 10)') - parser.add_argument( - '-d', - '--debug', - action='store_true', - help='Turn on DEBUG mode for more logging (default: false)') - parser.add_argument( - '-e', - '--run-tdengine', - action='store_true', - help='Run TDengine service in foreground (default: false)') - parser.add_argument( - '-i', - '--max-replicas', - action='store', - default=1, - type=int, - help='Maximum number of replicas to use, when testing against clusters. (default: 1)') - parser.add_argument( - '-l', - '--larger-data', - action='store_true', - help='Write larger amount of data during write operations (default: false)') - parser.add_argument( - '-p', - '--per-thread-db-connection', - action='store_true', - help='Use a single shared db connection (default: false)') - parser.add_argument( - '-r', - '--record-ops', - action='store_true', - help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') - parser.add_argument( - '-s', - '--max-steps', - action='store', - default=1000, - type=int, - help='Maximum number of steps to run (default: 100)') - parser.add_argument( - '-t', - '--num-threads', - action='store', - default=5, - type=int, - help='Number of threads to run (default: 10)') - parser.add_argument( - '-v', - '--verify-data', - action='store_true', - help='Verify data written in a number of places by reading back (default: false)') - parser.add_argument( - '-x', - '--continue-on-exception', - action='store_true', - help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') - - global gConfig - gConfig = parser.parse_args() - - # Logging Stuff - global logger - _logger = logging.getLogger('CrashGen') # real logger - _logger.addFilter(LoggingFilter()) - ch = logging.StreamHandler() - _logger.addHandler(ch) - - # Logging adapter, to be used as a logger - logger = MyLoggingAdapter(_logger, []) - - if (gConfig.debug): - logger.setLevel(logging.DEBUG) # default seems to be INFO - else: - logger.setLevel(logging.INFO) - - Dice.seed(0) # initial seeding of dice - - # Run server or client - mExec = MainExec() - if gConfig.run_tdengine: # run server - mExec.runService() - else: - return mExec.runClient() - - -if __name__ == "__main__": - exitCode = main() - # print("Exiting with code: {}".format(exitCode)) - sys.exit(exitCode) + gSvcMgr = self._svcMgr = ServiceManager(gConfig.num_dnodes) # save it in a global variable TODO: hack alert + + gSvcMgr.run() # run to some end state + gSvcMgr = self._svcMgr = None + + def init(self): # TODO: refactor + global gContainer + gContainer = Container() # micky-mouse DI + + global gSvcMgr # TODO: refactor away + gSvcMgr = None + + # Super cool Python argument library: + # https://docs.python.org/3/library/argparse.html + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=textwrap.dedent('''\ + TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) + --------------------------------------------------------------------- + 1. You build TDengine in the top level ./build directory, as described in offical docs + 2. You run the server there before this script: ./build/bin/taosd -c test/cfg + + ''')) + + parser.add_argument( + '-a', + '--auto-start-service', + action='store_true', + help='Automatically start/stop the TDengine service (default: false)') + parser.add_argument( + '-b', + '--max-dbs', + action='store', + default=0, + type=int, + help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') + parser.add_argument( + '-c', + '--connector-type', + action='store', + default='native', + type=str, + help='Connector type to use: native, rest, or mixed (default: 10)') + parser.add_argument( + '-d', + '--debug', + action='store_true', + help='Turn on DEBUG mode for more logging (default: false)') + parser.add_argument( + '-e', + '--run-tdengine', + action='store_true', + help='Run TDengine service in foreground (default: false)') + parser.add_argument( + '-g', + '--ignore-errors', + action='store', + default=None, + type=str, + help='Ignore error codes, comma separated, 0x supported (default: None)') + parser.add_argument( + '-i', + '--max-replicas', + action='store', + default=1, + type=int, + help='Maximum number of replicas to use, when testing against clusters. (default: 1)') + parser.add_argument( + '-l', + '--larger-data', + action='store_true', + help='Write larger amount of data during write operations (default: false)') + parser.add_argument( + '-n', + '--dynamic-db-table-names', + action='store_true', + help='Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false)') + parser.add_argument( + '-o', + '--num-dnodes', + action='store', + default=1, + type=int, + help='Number of Dnodes to initialize, used with -e option. (default: 1)') + parser.add_argument( + '-p', + '--per-thread-db-connection', + action='store_true', + help='Use a single shared db connection (default: false)') + parser.add_argument( + '-r', + '--record-ops', + action='store_true', + help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)') + parser.add_argument( + '-s', + '--max-steps', + action='store', + default=1000, + type=int, + help='Maximum number of steps to run (default: 100)') + parser.add_argument( + '-t', + '--num-threads', + action='store', + default=5, + type=int, + help='Number of threads to run (default: 10)') + parser.add_argument( + '-v', + '--verify-data', + action='store_true', + help='Verify data written in a number of places by reading back (default: false)') + parser.add_argument( + '-x', + '--continue-on-exception', + action='store_true', + help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)') + + global gConfig + gConfig = parser.parse_args() + + Logging.clsInit(gConfig) + + Dice.seed(0) # initial seeding of dice + + def run(self): + if gConfig.run_tdengine: # run server + try: + self.runService() + return 0 # success + except ConnectionError as err: + Logging.error("Failed to make DB connection, please check DB instance manually") + return -1 # failure + else: + return self.runClient() + + +class Container(): + _propertyList = {'defTdeInstance'} + + def __init__(self): + self._cargo = {} # No cargo at the beginning + + def _verifyValidProperty(self, name): + if not name in self._propertyList: + raise CrashGenError("Invalid container property: {}".format(name)) + + # Called for an attribute, when other mechanisms fail (compare to __getattribute__) + def __getattr__(self, name): + self._verifyValidProperty(name) + return self._cargo[name] # just a simple lookup + + def __setattr__(self, name, value): + if name == '_cargo' : # reserved vars + super().__setattr__(name, value) + return + self._verifyValidProperty(name) + self._cargo[name] = value + diff --git a/tests/pytest/crash_gen/db.py b/tests/pytest/crash_gen/db.py new file mode 100644 index 0000000000000000000000000000000000000000..43c855647c03d1de3e55393eb85c77250a00a602 --- /dev/null +++ b/tests/pytest/crash_gen/db.py @@ -0,0 +1,435 @@ +from __future__ import annotations + +import sys +import time +import threading +import requests +from requests.auth import HTTPBasicAuth + +import taos +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.log import * + +from .misc import Logging, CrashGenError, Helper, Dice +import os +import datetime +# from .service_manager import TdeInstance + +class DbConn: + TYPE_NATIVE = "native-c" + TYPE_REST = "rest-api" + TYPE_INVALID = "invalid" + + @classmethod + def create(cls, connType, dbTarget): + if connType == cls.TYPE_NATIVE: + return DbConnNative(dbTarget) + elif connType == cls.TYPE_REST: + return DbConnRest(dbTarget) + else: + raise RuntimeError( + "Unexpected connection type: {}".format(connType)) + + @classmethod + def createNative(cls, dbTarget) -> DbConn: + return cls.create(cls.TYPE_NATIVE, dbTarget) + + @classmethod + def createRest(cls, dbTarget) -> DbConn: + return cls.create(cls.TYPE_REST, dbTarget) + + def __init__(self, dbTarget): + self.isOpen = False + self._type = self.TYPE_INVALID + self._lastSql = None + self._dbTarget = dbTarget + + def __repr__(self): + return "[DbConn: type={}, target={}]".format(self._type, self._dbTarget) + + def getLastSql(self): + return self._lastSql + + def open(self): + if (self.isOpen): + raise RuntimeError("Cannot re-open an existing DB connection") + + # below implemented by child classes + self.openByType() + + Logging.debug("[DB] data connection opened: {}".format(self)) + self.isOpen = True + + def close(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def queryScalar(self, sql) -> int: + return self._queryAny(sql) + + def queryString(self, sql) -> str: + return self._queryAny(sql) + + def _queryAny(self, sql): # actual query result as an int + if (not self.isOpen): + raise RuntimeError("Cannot query database until connection is open") + nRows = self.query(sql) + if nRows != 1: + raise taos.error.ProgrammingError( + "Unexpected result for query: {}, rows = {}".format(sql, nRows), + (0x991 if nRows==0 else 0x992) + ) + if self.getResultRows() != 1 or self.getResultCols() != 1: + raise RuntimeError("Unexpected result set for query: {}".format(sql)) + return self.getQueryResult()[0][0] + + def use(self, dbName): + self.execute("use {}".format(dbName)) + + def existsDatabase(self, dbName: str): + ''' Check if a certain database exists ''' + self.query("show databases") + dbs = [v[0] for v in self.getQueryResult()] # ref: https://stackoverflow.com/questions/643823/python-list-transformation + # ret2 = dbName in dbs + # print("dbs = {}, str = {}, ret2={}, type2={}".format(dbs, dbName,ret2, type(dbName))) + return dbName in dbs # TODO: super weird type mangling seen, once here + + def hasTables(self): + return self.query("show tables") > 0 + + def execute(self, sql): + ''' Return the number of rows affected''' + raise RuntimeError("Unexpected execution, should be overriden") + + def safeExecute(self, sql): + '''Safely execute any SQL query, returning True/False upon success/failure''' + try: + self.execute(sql) + return True # ignore num of results, return success + except taos.error.ProgrammingError as err: + return False # failed, for whatever TAOS reason + # Not possile to reach here, non-TAOS exception would have been thrown + + def query(self, sql) -> int: # return num rows returned + ''' Return the number of rows affected''' + raise RuntimeError("Unexpected execution, should be overriden") + + def openByType(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getQueryResult(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultRows(self): + raise RuntimeError("Unexpected execution, should be overriden") + + def getResultCols(self): + raise RuntimeError("Unexpected execution, should be overriden") + +# Sample: curl -u root:taosdata -d "show databases" localhost:6020/rest/sql + + +class DbConnRest(DbConn): + REST_PORT_INCREMENT = 11 + + def __init__(self, dbTarget: DbTarget): + super().__init__(dbTarget) + self._type = self.TYPE_REST + restPort = dbTarget.port + 11 + self._url = "http://{}:{}/rest/sql".format( + dbTarget.hostAddr, dbTarget.port + self.REST_PORT_INCREMENT) + self._result = None + + def openByType(self): # Open connection + pass # do nothing, always open + + def close(self): + if (not self.isOpen): + raise RuntimeError("Cannot clean up database until connection is open") + # Do nothing for REST + Logging.debug("[DB] REST Database connection closed") + self.isOpen = False + + def _doSql(self, sql): + self._lastSql = sql # remember this, last SQL attempted + try: + r = requests.post(self._url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + except: + print("REST API Failure (TODO: more info here)") + raise + rj = r.json() + # Sanity check for the "Json Result" + if ('status' not in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + self._result = rj + return nRows + + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError( + "Cannot execute database commands until connection is open") + Logging.debug("[SQL-REST] Executing SQL: {}".format(sql)) + nRows = self._doSql(sql) + Logging.debug( + "[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql)) + return nRows + + def query(self, sql): # return rows affected + return self.execute(sql) + + def getQueryResult(self): + return self._result['data'] + + def getResultRows(self): + print(self._result) + raise RuntimeError("TBD") # TODO: finish here to support -v under -c rest + # return self._tdSql.queryRows + + def getResultCols(self): + print(self._result) + raise RuntimeError("TBD") + + # Duplicate code from TDMySQL, TODO: merge all this into DbConnNative + + +class MyTDSql: + # Class variables + _clsLock = threading.Lock() # class wide locking + longestQuery = None # type: str + longestQueryTime = 0.0 # seconds + lqStartTime = 0.0 + # lqEndTime = 0.0 # Not needed, as we have the two above already + + def __init__(self, hostAddr, cfgPath): + # Make the DB connection + self._conn = taos.connect(host=hostAddr, config=cfgPath) + self._cursor = self._conn.cursor() + + self.queryRows = 0 + self.queryCols = 0 + self.affectedRows = 0 + + # def init(self, cursor, log=True): + # self.cursor = cursor + # if (log): + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # self.cursor.log(caller.filename + ".sql") + + def close(self): + self._cursor.close() # can we double close? + self._conn.close() # TODO: very important, cursor close does NOT close DB connection! + self._cursor.close() + + def _execInternal(self, sql): + startTime = time.time() + ret = self._cursor.execute(sql) + # print("\nSQL success: {}".format(sql)) + queryTime = time.time() - startTime + # Record the query time + cls = self.__class__ + if queryTime > (cls.longestQueryTime + 0.01) : + with cls._clsLock: + cls.longestQuery = sql + cls.longestQueryTime = queryTime + cls.lqStartTime = startTime + return ret + + def query(self, sql): + self.sql = sql + try: + self._execInternal(sql) + self.queryResult = self._cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self._cursor.description) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.queryRows + + def execute(self, sql): + self.sql = sql + try: + self.affectedRows = self._execInternal(sql) + except Exception as e: + # caller = inspect.getframeinfo(inspect.stack()[1][0]) + # args = (caller.filename, caller.lineno, sql, repr(e)) + # tdLog.exit("%s(%d) failed: sql:%s, %s" % args) + raise + return self.affectedRows + +class DbTarget: + def __init__(self, cfgPath, hostAddr, port): + self.cfgPath = cfgPath + self.hostAddr = hostAddr + self.port = port + + def __repr__(self): + return "[DbTarget: cfgPath={}, host={}:{}]".format( + Helper.getFriendlyPath(self.cfgPath), self.hostAddr, self.port) + + def getEp(self): + return "{}:{}".format(self.hostAddr, self.port) + +class DbConnNative(DbConn): + # Class variables + _lock = threading.Lock() + # _connInfoDisplayed = False # TODO: find another way to display this + totalConnections = 0 # Not private + + def __init__(self, dbTarget): + super().__init__(dbTarget) + self._type = self.TYPE_NATIVE + self._conn = None + # self._cursor = None + + def openByType(self): # Open connection + # global gContainer + # tInst = tInst or gContainer.defTdeInstance # set up in ClientManager, type: TdeInstance + # cfgPath = self.getBuildPath() + "/test/cfg" + # cfgPath = tInst.getCfgDir() + # hostAddr = tInst.getHostAddr() + + cls = self.__class__ # Get the class, to access class variables + with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!! + dbTarget = self._dbTarget + # if not cls._connInfoDisplayed: + # cls._connInfoDisplayed = True # updating CLASS variable + Logging.debug("Initiating TAOS native connection to {}".format(dbTarget)) + # Make the connection + # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable + # self._cursor = self._conn.cursor() + # Record the count in the class + self._tdSql = MyTDSql(dbTarget.hostAddr, dbTarget.cfgPath) # making DB connection + cls.totalConnections += 1 + + self._tdSql.execute('reset query cache') + # self._cursor.execute('use db') # do this at the beginning of every + + # Open connection + # self._tdSql = MyTDSql() + # self._tdSql.init(self._cursor) + + def close(self): + if (not self.isOpen): + raise RuntimeError("Cannot clean up database until connection is open") + self._tdSql.close() + # Decrement the class wide counter + cls = self.__class__ # Get the class, to access class variables + with cls._lock: + cls.totalConnections -= 1 + + Logging.debug("[DB] Database connection closed") + self.isOpen = False + + def execute(self, sql): + if (not self.isOpen): + raise RuntimeError("Cannot execute database commands until connection is open") + Logging.debug("[SQL] Executing SQL: {}".format(sql)) + self._lastSql = sql + nRows = self._tdSql.execute(sql) + Logging.debug( + "[SQL] Execution Result, nRows = {}, SQL = {}".format( + nRows, sql)) + return nRows + + def query(self, sql): # return rows affected + if (not self.isOpen): + raise RuntimeError( + "Cannot query database until connection is open") + Logging.debug("[SQL] Executing SQL: {}".format(sql)) + self._lastSql = sql + nRows = self._tdSql.query(sql) + Logging.debug( + "[SQL] Query Result, nRows = {}, SQL = {}".format( + nRows, sql)) + return nRows + # results are in: return self._tdSql.queryResult + + def getQueryResult(self): + return self._tdSql.queryResult + + def getResultRows(self): + return self._tdSql.queryRows + + def getResultCols(self): + return self._tdSql.queryCols + + +class DbManager(): + ''' This is a wrapper around DbConn(), to make it easier to use. + + TODO: rename this to DbConnManager + ''' + def __init__(self, cType, dbTarget): + # self.tableNumQueue = LinearQueue() # TODO: delete? + # self.openDbServerConnection() + self._dbConn = DbConn.createNative(dbTarget) if ( + cType == 'native') else DbConn.createRest(dbTarget) + try: + self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected + except taos.error.ProgrammingError as err: + # print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err)) + if (err.msg == 'client disconnected'): # cannot open DB connection + print( + "Cannot establish DB connection, please re-run script without parameter, and follow the instructions.") + sys.exit(2) + else: + print("Failed to connect to DB, errno = {}, msg: {}" + .format(Helper.convertErrno(err.errno), err.msg)) + raise + except BaseException: + print("[=] Unexpected exception") + raise + + # Do this after dbConn is in proper shape + # Moved to Database() + # self._stateMachine = StateMechine(self._dbConn) + + def getDbConn(self): + return self._dbConn + + # TODO: not used any more, to delete + def pickAndAllocateTable(self): # pick any table, and "use" it + return self.tableNumQueue.pickAndAllocate() + + # TODO: Not used any more, to delete + def addTable(self): + with self._lock: + tIndex = self.tableNumQueue.push() + return tIndex + + # Not used any more, to delete + def releaseTable(self, i): # return the table back, so others can use it + self.tableNumQueue.release(i) + + # TODO: not used any more, delete + def getTableNameToDelete(self): + tblNum = self.tableNumQueue.pop() # TODO: race condition! + if (not tblNum): # maybe false + return False + + return "table_{}".format(tblNum) + + def cleanUp(self): + self._dbConn.close() + diff --git a/tests/pytest/crash_gen/misc.py b/tests/pytest/crash_gen/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..34a33c6af60d6fe419ba9b565265e6769a78ce14 --- /dev/null +++ b/tests/pytest/crash_gen/misc.py @@ -0,0 +1,181 @@ +import threading +import random +import logging +import os + + +class CrashGenError(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self.errno = errno + + def __str__(self): + return self.msg + + +class LoggingFilter(logging.Filter): + def filter(self, record: logging.LogRecord): + if (record.levelno >= logging.INFO): + return True # info or above always log + + # Commenting out below to adjust... + + # if msg.startswith("[TRD]"): + # return False + return True + + +class MyLoggingAdapter(logging.LoggerAdapter): + def process(self, msg, kwargs): + return "[{}] {}".format(threading.get_ident() % 10000, msg), kwargs + # return '[%s] %s' % (self.extra['connid'], msg), kwargs + + +class Logging: + logger = None + + @classmethod + def getLogger(cls): + return logger + + @classmethod + def clsInit(cls, gConfig): # TODO: refactor away gConfig + if cls.logger: + return + + # Logging Stuff + # global misc.logger + _logger = logging.getLogger('CrashGen') # real logger + _logger.addFilter(LoggingFilter()) + ch = logging.StreamHandler() + _logger.addHandler(ch) + + # Logging adapter, to be used as a logger + print("setting logger variable") + # global logger + cls.logger = MyLoggingAdapter(_logger, []) + + if (gConfig.debug): + cls.logger.setLevel(logging.DEBUG) # default seems to be INFO + else: + cls.logger.setLevel(logging.INFO) + + @classmethod + def info(cls, msg): + cls.logger.info(msg) + + @classmethod + def debug(cls, msg): + cls.logger.debug(msg) + + @classmethod + def warning(cls, msg): + cls.logger.warning(msg) + + @classmethod + def error(cls, msg): + cls.logger.error(msg) + +class Status: + STATUS_STARTING = 1 + STATUS_RUNNING = 2 + STATUS_STOPPING = 3 + STATUS_STOPPED = 4 + + def __init__(self, status): + self.set(status) + + def __repr__(self): + return "[Status: v={}]".format(self._status) + + def set(self, status): + self._status = status + + def get(self): + return self._status + + def isStarting(self): + return self._status == Status.STATUS_STARTING + + def isRunning(self): + # return self._thread and self._thread.is_alive() + return self._status == Status.STATUS_RUNNING + + def isStopping(self): + return self._status == Status.STATUS_STOPPING + + def isStopped(self): + return self._status == Status.STATUS_STOPPED + + def isStable(self): + return self.isRunning() or self.isStopped() + +# Deterministic random number generator +class Dice(): + seeded = False # static, uninitialized + + @classmethod + def seed(cls, s): # static + if (cls.seeded): + raise RuntimeError( + "Cannot seed the random generator more than once") + cls.verifyRNG() + random.seed(s) + cls.seeded = True # TODO: protect against multi-threading + + @classmethod + def verifyRNG(cls): # Verify that the RNG is determinstic + random.seed(0) + x1 = random.randrange(0, 1000) + x2 = random.randrange(0, 1000) + x3 = random.randrange(0, 1000) + if (x1 != 864 or x2 != 394 or x3 != 776): + raise RuntimeError("System RNG is not deterministic") + + @classmethod + def throw(cls, stop): # get 0 to stop-1 + return cls.throwRange(0, stop) + + @classmethod + def throwRange(cls, start, stop): # up to stop-1 + if (not cls.seeded): + raise RuntimeError("Cannot throw dice before seeding it") + return random.randrange(start, stop) + + @classmethod + def choice(cls, cList): + return random.choice(cList) + +class Helper: + @classmethod + def convertErrno(cls, errno): + return errno if (errno > 0) else 0x80000000 + errno + + @classmethod + def getFriendlyPath(cls, path): # returns .../xxx/yyy + ht1 = os.path.split(path) + ht2 = os.path.split(ht1[0]) + return ".../" + ht2[1] + '/' + ht1[1] + + +class Progress: + STEP_BOUNDARY = 0 + BEGIN_THREAD_STEP = 1 + END_THREAD_STEP = 2 + SERVICE_HEART_BEAT= 3 + SERVICE_RECONNECT_START = 4 + SERVICE_RECONNECT_SUCCESS = 5 + SERVICE_RECONNECT_FAILURE = 6 + tokens = { + STEP_BOUNDARY: '.', + BEGIN_THREAD_STEP: '[', + END_THREAD_STEP: '] ', + SERVICE_HEART_BEAT: '.Y.', + SERVICE_RECONNECT_START: '', + SERVICE_RECONNECT_FAILURE: '.xr>', + } + + @classmethod + def emit(cls, token): + print(cls.tokens[token], end="", flush=True) diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..196e9d944ab07fa9aa4dce8d73b3dc0ccb4cd168 --- /dev/null +++ b/tests/pytest/crash_gen/service_manager.py @@ -0,0 +1,738 @@ +import os +import io +import sys +import threading +import signal +import logging +import time +import subprocess + +from typing import IO, List + +try: + import psutil +except: + print("Psutil module needed, please install: sudo pip3 install psutil") + sys.exit(-1) + +from queue import Queue, Empty + +from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress +from .db import DbConn, DbTarget + +class TdeInstance(): + """ + A class to capture the *static* information of a TDengine instance, + including the location of the various files/directories, and basica + configuration. + """ + + @classmethod + def _getBuildPath(cls): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("communit")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = None + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + if buildPath == None: + raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}" + .format(selfPath, projPath)) + return buildPath + + def __init__(self, subdir='test', tInstNum=0, port=6030, fepPort=6030): + self._buildDir = self._getBuildPath() + self._subdir = '/' + subdir # TODO: tolerate "/" + self._port = port # TODO: support different IP address too + self._fepPort = fepPort + + self._tInstNum = tInstNum + self._smThread = ServiceManagerThread() + + def getDbTarget(self): + return DbTarget(self.getCfgDir(), self.getHostAddr(), self._port) + + def getPort(self): + return self._port + + def __repr__(self): + return "[TdeInstance: {}, subdir={}]".format( + self._buildDir, Helper.getFriendlyPath(self._subdir)) + + def generateCfgFile(self): + # print("Logger = {}".format(logger)) + # buildPath = self.getBuildPath() + # taosdPath = self._buildPath + "/build/bin/taosd" + + cfgDir = self.getCfgDir() + cfgFile = cfgDir + "/taos.cfg" # TODO: inquire if this is fixed + if os.path.exists(cfgFile): + if os.path.isfile(cfgFile): + Logging.warning("Config file exists already, skip creation: {}".format(cfgFile)) + return # cfg file already exists, nothing to do + else: + raise CrashGenError("Invalid config file: {}".format(cfgFile)) + # Now that the cfg file doesn't exist + if os.path.exists(cfgDir): + if not os.path.isdir(cfgDir): + raise CrashGenError("Invalid config dir: {}".format(cfgDir)) + # else: good path + else: + os.makedirs(cfgDir, exist_ok=True) # like "mkdir -p" + # Now we have a good cfg dir + cfgValues = { + 'runDir': self.getRunDir(), + 'ip': '127.0.0.1', # TODO: change to a network addressable ip + 'port': self._port, + 'fepPort': self._fepPort, + } + cfgTemplate = """ +dataDir {runDir}/data +logDir {runDir}/log + +charset UTF-8 + +firstEp {ip}:{fepPort} +fqdn {ip} +serverPort {port} + +# was all 135 below +dDebugFlag 135 +cDebugFlag 135 +rpcDebugFlag 135 +qDebugFlag 135 +# httpDebugFlag 143 +# asyncLog 0 +# tables 10 +maxtablesPerVnode 10 +rpcMaxTime 101 +# cache 2 +keep 36500 +# walLevel 2 +walLevel 1 +# +# maxConnections 100 +""" + cfgContent = cfgTemplate.format_map(cfgValues) + f = open(cfgFile, "w") + f.write(cfgContent) + f.close() + + def rotateLogs(self): + logPath = self.getLogDir() + # ref: https://stackoverflow.com/questions/1995373/deleting-all-files-in-a-directory-with-python/1995397 + if os.path.exists(logPath): + logPathSaved = logPath + "_" + time.strftime('%Y-%m-%d-%H-%M-%S') + Logging.info("Saving old log files to: {}".format(logPathSaved)) + os.rename(logPath, logPathSaved) + # os.mkdir(logPath) # recreate, no need actually, TDengine will auto-create with proper perms + + + def getExecFile(self): # .../taosd + return self._buildDir + "/build/bin/taosd" + + def getRunDir(self): # TODO: rename to "root dir" ?! + return self._buildDir + self._subdir + + def getCfgDir(self): # path, not file + return self.getRunDir() + "/cfg" + + def getLogDir(self): + return self.getRunDir() + "/log" + + def getHostAddr(self): + return "127.0.0.1" + + def getServiceCmdLine(self): # to start the instance + return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + + def _getDnodes(self, dbc): + dbc.query("show dnodes") + cols = dbc.getQueryResult() # id,end_point,vnodes,cores,status,role,create_time,offline reason + return {c[1]:c[4] for c in cols} # {'xxx:6030':'ready', 'xxx:6130':'ready'} + + def createDnode(self, dbt: DbTarget): + """ + With a connection to the "first" EP, let's create a dnode for someone else who + wants to join. + """ + dbc = DbConn.createNative(self.getDbTarget()) + dbc.open() + + if dbt.getEp() in self._getDnodes(dbc): + Logging.info("Skipping DNode creation for: {}".format(dbt)) + dbc.close() + return + + sql = "CREATE DNODE \"{}\"".format(dbt.getEp()) + dbc.execute(sql) + dbc.close() + + def getStatus(self): + return self._smThread.getStatus() + + def getSmThread(self): + return self._smThread + + def start(self): + if not self.getStatus().isStopped(): + raise CrashGenError("Cannot start instance from status: {}".format(self.getStatus())) + + Logging.info("Starting TDengine instance: {}".format(self)) + self.generateCfgFile() # service side generates config file, client does not + self.rotateLogs() + + self._smThread.start(self.getServiceCmdLine()) + + def stop(self): + self._smThread.stop() + + def isFirst(self): + return self._tInstNum == 0 + + +class TdeSubProcess: + """ + A class to to represent the actual sub process that is the run-time + of a TDengine instance. + + It takes a TdeInstance object as its parameter, with the rationale being + "a sub process runs an instance". + """ + + # RET_ALREADY_STOPPED = -1 + # RET_TIME_OUT = -3 + # RET_SUCCESS = -4 + + def __init__(self): + self.subProcess = None + # if tInst is None: + # raise CrashGenError("Empty instance not allowed in TdeSubProcess") + # self._tInst = tInst # Default create at ServiceManagerThread + + def getStdOut(self): + return self.subProcess.stdout + + def getStdErr(self): + return self.subProcess.stderr + + def isRunning(self): + return self.subProcess is not None + + def getPid(self): + return self.subProcess.pid + + def start(self, cmdLine): + ON_POSIX = 'posix' in sys.builtin_module_names + + # Sanity check + if self.subProcess: # already there + raise RuntimeError("Corrupt process state") + + self.subProcess = subprocess.Popen( + cmdLine, + shell=False, + # svcCmdSingle, shell=True, # capture core dump? + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + # bufsize=1, # not supported in binary mode + close_fds=ON_POSIX + ) # had text=True, which interferred with reading EOF + + def stop(self): + """ + Stop a sub process, and try to return a meaningful return code. + + Common POSIX signal values (from man -7 signal): + SIGHUP 1 + SIGINT 2 + SIGQUIT 3 + SIGILL 4 + SIGTRAP 5 + SIGABRT 6 + SIGIOT 6 + SIGBUS 7 + SIGEMT - + SIGFPE 8 + SIGKILL 9 + SIGUSR1 10 + SIGSEGV 11 + SIGUSR2 12 + """ + if not self.subProcess: + print("Sub process already stopped") + return # -1 + + retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) + if retCode: # valid return code, process ended + retCode = -retCode # only if valid + Logging.warning("TSP.stop(): process ended itself") + self.subProcess = None + return retCode + + # process still alive, let's interrupt it + print("Terminate running process, send SIG_INT and wait...") + # sub process should end, then IPC queue should end, causing IO thread to end + # sig = signal.SIGINT + sig = signal.SIGKILL + self.subProcess.send_signal(sig) # SIGNINT or SIGKILL + self.subProcess.wait(20) + retCode = self.subProcess.returncode # should always be there + # May throw subprocess.TimeoutExpired exception above, therefore + # The process is guranteed to have ended by now + self.subProcess = None + if retCode != 0: # != (- signal.SIGINT): + Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format(sig, retCode)) + else: + Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(sig)) + return - retCode + +class ServiceManager: + PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process + + def __init__(self, numDnodes): # >1 when we run a cluster + Logging.info("TDengine Service Manager (TSM) created") + self._numDnodes = numDnodes # >1 means we have a cluster + self._lock = threading.Lock() + # signal.signal(signal.SIGTERM, self.sigIntHandler) # Moved to MainExec + # signal.signal(signal.SIGINT, self.sigIntHandler) + # signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! + + self.inSigHandler = False + # self._status = MainExec.STATUS_RUNNING # set inside + # _startTaosService() + self._runCluster = (numDnodes > 1) + self._tInsts : List[TdeInstance] = [] + for i in range(0, numDnodes): + ti = self._createTdeInstance(i) # construct tInst + self._tInsts.append(ti) + + # self.svcMgrThreads : List[ServiceManagerThread] = [] + # for i in range(0, numDnodes): + # thread = self._createThread(i) # construct tInst + # self.svcMgrThreads.append(thread) + + def _createTdeInstance(self, dnIndex): + if not self._runCluster: # single instance + subdir = 'test' + else: # Create all threads in a cluster + subdir = 'cluster_dnode_{}'.format(dnIndex) + fepPort= 6030 # firstEP Port + port = fepPort + dnIndex * 100 + return TdeInstance(subdir, dnIndex, port, fepPort) + # return ServiceManagerThread(dnIndex, ti) + + def _doMenu(self): + choice = "" + while True: + print("\nInterrupting Service Program, Choose an Action: ") + print("1: Resume") + print("2: Terminate") + print("3: Restart") + # Remember to update the if range below + # print("Enter Choice: ", end="", flush=True) + while choice == "": + choice = input("Enter Choice: ") + if choice != "": + break # done with reading repeated input + if choice in ["1", "2", "3"]: + break # we are done with whole method + print("Invalid choice, please try again.") + choice = "" # reset + return choice + + def sigUsrHandler(self, signalNumber, frame): + print("Interrupting main thread execution upon SIGUSR1") + if self.inSigHandler: # already + print("Ignoring repeated SIG...") + return # do nothing if it's already not running + self.inSigHandler = True + + choice = self._doMenu() + if choice == "1": + self.sigHandlerResume() # TODO: can the sub-process be blocked due to us not reading from queue? + elif choice == "2": + self.stopTaosServices() + elif choice == "3": # Restart + self.restart() + else: + raise RuntimeError("Invalid menu choice: {}".format(choice)) + + self.inSigHandler = False + + def sigIntHandler(self, signalNumber, frame): + print("ServiceManager: INT Signal Handler starting...") + if self.inSigHandler: + print("Ignoring repeated SIG_INT...") + return + self.inSigHandler = True + + self.stopTaosServices() + print("ServiceManager: INT Signal Handler returning...") + self.inSigHandler = False + + def sigHandlerResume(self): + print("Resuming TDengine service manager (main thread)...\n\n") + + # def _updateThreadStatus(self): + # if self.svcMgrThread: # valid svc mgr thread + # if self.svcMgrThread.isStopped(): # done? + # self.svcMgrThread.procIpcBatch() # one last time. TODO: appropriate? + # self.svcMgrThread = None # no more + + def isActive(self): + """ + Determine if the service/cluster is active at all, i.e. at least + one thread is not "stopped". + """ + for ti in self._tInsts: + if not ti.getStatus().isStopped(): + return True + return False + + def isRunning(self): + for ti in self._tInsts: + if not ti.getStatus().isRunning(): + return False + return True + + + # def isRestarting(self): + # """ + # Determine if the service/cluster is being "restarted", i.e., at least + # one thread is in "restarting" status + # """ + # for thread in self.svcMgrThreads: + # if thread.isRestarting(): + # return True + # return False + + def isStable(self): + """ + Determine if the service/cluster is "stable", i.e. all of the + threads are in "stable" status. + """ + for ti in self._tInsts: + if not ti.getStatus().isStable(): + return False + return True + + def _procIpcAll(self): + while self.isActive(): + Progress.emit(Progress.SERVICE_HEART_BEAT) + for ti in self._tInsts: # all thread objects should always be valid + # while self.isRunning() or self.isRestarting() : # for as long as the svc mgr thread is still here + status = ti.getStatus() + if status.isRunning(): + th = ti.getSmThread() + th.procIpcBatch() # regular processing, + if status.isStopped(): + th.procIpcBatch() # one last time? + # self._updateThreadStatus() + + time.sleep(self.PAUSE_BETWEEN_IPC_CHECK) # pause, before next round + # raise CrashGenError("dummy") + print("Service Manager Thread (with subprocess) ended, main thread exiting...") + + def _getFirstInstance(self): + return self._tInsts[0] + + def startTaosServices(self): + with self._lock: + if self.isActive(): + raise RuntimeError("Cannot start TAOS service(s) when one/some may already be running") + + # Find if there's already a taosd service, and then kill it + for proc in psutil.process_iter(): + if proc.name() == 'taosd': + print("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") + time.sleep(2.0) + proc.kill() + # print("Process: {}".format(proc.name())) + + # self.svcMgrThread = ServiceManagerThread() # create the object + + for ti in self._tInsts: + ti.start() + if not ti.isFirst(): + tFirst = self._getFirstInstance() + tFirst.createDnode(ti.getDbTarget()) + ti.getSmThread().procIpcBatch(trimToTarget=10, forceOutput=True) # for printing 10 lines + + def stopTaosServices(self): + with self._lock: + if not self.isActive(): + Logging.warning("Cannot stop TAOS service(s), already not active") + return + + for ti in self._tInsts: + ti.stop() + + def run(self): + self.startTaosServices() + self._procIpcAll() # pump/process all the messages, may encounter SIG + restart + if self.isActive(): # if sig handler hasn't destroyed it by now + self.stopTaosServices() # should have started already + + def restart(self): + if not self.isStable(): + Logging.warning("Cannot restart service/cluster, when not stable") + return + + # self._isRestarting = True + if self.isActive(): + self.stopTaosServices() + else: + Logging.warning("Service not active when restart requested") + + self.startTaosServices() + # self._isRestarting = False + + # def isRunning(self): + # return self.svcMgrThread != None + + # def isRestarting(self): + # return self._isRestarting + +class ServiceManagerThread: + """ + A class representing a dedicated thread which manages the "sub process" + of the TDengine service, interacting with its STDOUT/ERR. + + It takes a TdeInstance parameter at creation time, or create a default + """ + MAX_QUEUE_SIZE = 10000 + + def __init__(self): + # Set the sub process + self._tdeSubProcess = None # type: TdeSubProcess + + # Arrange the TDengine instance + # self._tInstNum = tInstNum # instance serial number in cluster, ZERO based + # self._tInst = tInst or TdeInstance() # Need an instance + + self._thread = None # The actual thread, # type: threading.Thread + self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually. + + def __repr__(self): + return "[SvcMgrThread: status={}, subProc={}]".format( + self.getStatus(), self._tdeSubProcess) + + def getStatus(self): + return self._status + + # Start the thread (with sub process), and wait for the sub service + # to become fully operational + def start(self, cmdLine): + if self._thread: + raise RuntimeError("Unexpected _thread") + if self._tdeSubProcess: + raise RuntimeError("TDengine sub process already created/running") + + Logging.info("Attempting to start TAOS service: {}".format(self)) + + self._status.set(Status.STATUS_STARTING) + self._tdeSubProcess = TdeSubProcess() + self._tdeSubProcess.start(cmdLine) + + self._ipcQueue = Queue() + self._thread = threading.Thread( # First thread captures server OUTPUT + target=self.svcOutputReader, + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + self._thread.daemon = True # thread dies with the program + self._thread.start() + + self._thread2 = threading.Thread( # 2nd thread captures server ERRORs + target=self.svcErrorReader, + args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) + self._thread2.daemon = True # thread dies with the program + self._thread2.start() + + # wait for service to start + for i in range(0, 100): + time.sleep(1.0) + # self.procIpcBatch() # don't pump message during start up + print("_zz_", end="", flush=True) + if self._status.isRunning(): + Logging.info("[] TDengine service READY to process requests") + Logging.info("[] TAOS service started: {}".format(self)) + # self._verifyDnode(self._tInst) # query and ensure dnode is ready + # Logging.debug("[] TAOS Dnode verified: {}".format(self)) + return # now we've started + # TODO: handle failure-to-start better? + self.procIpcBatch(100, True) # display output before cronking out, trim to last 20 msgs, force output + raise RuntimeError("TDengine service did not start successfully: {}".format(self)) + + def _verifyDnode(self, tInst: TdeInstance): + dbc = DbConn.createNative(tInst.getDbTarget()) + dbc.open() + dbc.query("show dnodes") + # dbc.query("DESCRIBE {}.{}".format(dbName, self._stName)) + cols = dbc.getQueryResult() # id,end_point,vnodes,cores,status,role,create_time,offline reason + # ret = {row[0]:row[1] for row in stCols if row[3]=='TAG'} # name:type + isValid = False + for col in cols: + # print("col = {}".format(col)) + ep = col[1].split(':') # 10.1.30.2:6030 + print("Found ep={}".format(ep)) + if tInst.getPort() == int(ep[1]): # That's us + # print("Valid Dnode matched!") + isValid = True # now we are valid + break + if not isValid: + print("Failed to start dnode, sleep for a while") + time.sleep(600) + raise RuntimeError("Failed to start Dnode, expected port not found: {}". + format(tInst.getPort())) + dbc.close() + + def stop(self): + # can be called from both main thread or signal handler + print("Terminating TDengine service running as the sub process...") + if self.getStatus().isStopped(): + print("Service already stopped") + return + if self.getStatus().isStopping(): + print("Service is already being stopped") + return + # Linux will send Control-C generated SIGINT to the TDengine process + # already, ref: + # https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes + if not self._tdeSubProcess: + raise RuntimeError("sub process object missing") + + self._status.set(Status.STATUS_STOPPING) + # retCode = self._tdeSubProcess.stop() + try: + retCode = self._tdeSubProcess.stop() + # print("Attempted to stop sub process, got return code: {}".format(retCode)) + if retCode == signal.SIGSEGV : # SGV + Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") + except subprocess.TimeoutExpired as err: + print("Time out waiting for TDengine service process to exit") + else: + if self._tdeSubProcess.isRunning(): # still running, should now never happen + print("FAILED to stop sub process, it is still running... pid = {}".format( + self._tdeSubProcess.getPid())) + else: + self._tdeSubProcess = None # not running any more + self.join() # stop the thread, change the status, etc. + + # Check if it's really stopped + outputLines = 10 # for last output + if self.getStatus().isStopped(): + self.procIpcBatch(outputLines) # one last time + Logging.debug("End of TDengine Service Output: {}".format(self)) + Logging.info("----- TDengine Service (managed by SMT) is now terminated -----\n") + else: + print("WARNING: SMT did not terminate as expected: {}".format(self)) + + def join(self): + # TODO: sanity check + if not self.getStatus().isStopping(): + raise RuntimeError( + "SMT.Join(): Unexpected status: {}".format(self._status)) + + if self._thread: + self._thread.join() + self._thread = None + self._status.set(Status.STATUS_STOPPED) + # STD ERR thread + self._thread2.join() + self._thread2 = None + else: + print("Joining empty thread, doing nothing") + + def _trimQueue(self, targetSize): + if targetSize <= 0: + return # do nothing + q = self._ipcQueue + if (q.qsize() <= targetSize): # no need to trim + return + + Logging.debug("Triming IPC queue to target size: {}".format(targetSize)) + itemsToTrim = q.qsize() - targetSize + for i in range(0, itemsToTrim): + try: + q.get_nowait() + except Empty: + break # break out of for loop, no more trimming + + TD_READY_MSG = "TDengine is initialized successfully" + + def procIpcBatch(self, trimToTarget=0, forceOutput=False): + self._trimQueue(trimToTarget) # trim if necessary + # Process all the output generated by the underlying sub process, + # managed by IO thread + print("<", end="", flush=True) + while True: + try: + line = self._ipcQueue.get_nowait() # getting output at fast speed + self._printProgress("_o") + except Empty: + # time.sleep(2.3) # wait only if there's no output + # no more output + print(".>", end="", flush=True) + return # we are done with THIS BATCH + else: # got line, printing out + if forceOutput: + Logging.info(line) + else: + Logging.debug(line) + print(">", end="", flush=True) + + _ProgressBars = ["--", "//", "||", "\\\\"] + + def _printProgress(self, msg): # TODO: assuming 2 chars + print(msg, end="", flush=True) + pBar = self._ProgressBars[Dice.throw(4)] + print(pBar, end="", flush=True) + print('\b\b\b\b', end="", flush=True) + + def svcOutputReader(self, out: IO, queue): + # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python + # print("This is the svcOutput Reader...") + # for line in out : + for line in iter(out.readline, b''): + # print("Finished reading a line: {}".format(line)) + # print("Adding item to queue...") + try: + line = line.decode("utf-8").rstrip() + except UnicodeError: + print("\nNon-UTF8 server output: {}\n".format(line)) + + # This might block, and then causing "out" buffer to block + queue.put(line) + self._printProgress("_i") + + if self._status.isStarting(): # we are starting, let's see if we have started + if line.find(self.TD_READY_MSG) != -1: # found + Logging.info("Waiting for the service to become FULLY READY") + time.sleep(1.0) # wait for the server to truly start. TODO: remove this + Logging.info("Service is now FULLY READY") # TODO: more ID info here? + self._status.set(Status.STATUS_RUNNING) + + # Trim the queue if necessary: TODO: try this 1 out of 10 times + self._trimQueue(self.MAX_QUEUE_SIZE * 9 // 10) # trim to 90% size + + if self._status.isStopping(): # TODO: use thread status instead + # WAITING for stopping sub process to finish its outptu + print("_w", end="", flush=True) + + # queue.put(line) + # meaning sub process must have died + Logging.info("\nEnd of stream detected for TDengine STDOUT: {}".format(self)) + out.close() + + def svcErrorReader(self, err: IO, queue): + for line in iter(err.readline, b''): + print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line)) + Logging.info("\nEnd of stream detected for TDengine STDERR: {}".format(self)) + err.close() \ No newline at end of file diff --git a/tests/pytest/crash_gen_bootstrap.py b/tests/pytest/crash_gen_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..a3417d21a85ec5ea26c7ebc22ffe398fc436eebe --- /dev/null +++ b/tests/pytest/crash_gen_bootstrap.py @@ -0,0 +1,23 @@ +# -----!/usr/bin/python3.7 +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +import sys +from crash_gen.crash_gen import MainExec + +if __name__ == "__main__": + + mExec = MainExec() + mExec.init() + exitCode = mExec.run() + + print("Exiting with code: {}".format(exitCode)) + sys.exit(exitCode) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index fd973b8b7604c9c6b7148a336ca1ac082cd1b548..294bc52a945e02fd24196c2d0088d7553a7839d9 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -18,10 +18,13 @@ python3 ./test.py -f insert/multi.py python3 ./test.py -f insert/randomNullCommit.py python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py +python3 ./test.py -f insert/insertIntoTwoTables.py +python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py +python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py # tag @@ -148,7 +151,9 @@ python3 ./test.py -f query/select_last_crash.py python3 ./test.py -f query/queryNullValueTest.py python3 ./test.py -f query/queryInsertValue.py python3 ./test.py -f query/queryConnection.py +python3 ./test.py -f query/queryCountCSVData.py python3 ./test.py -f query/natualInterval.py +python3 ./test.py -f query/bug1471.py #stream python3 ./test.py -f stream/metric_1.py @@ -183,7 +188,7 @@ python3 ./test.py -f functions/function_leastsquares.py -r 1 python3 ./test.py -f functions/function_max.py -r 1 python3 ./test.py -f functions/function_min.py -r 1 python3 ./test.py -f functions/function_operations.py -r 1 -python3 ./test.py -f functions/function_percentile.py +python3 ./test.py -f functions/function_percentile.py -r 1 python3 ./test.py -f functions/function_spread.py -r 1 python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh new file mode 100755 index 0000000000000000000000000000000000000000..1a4c12a16c7d9bc4e8b2dd327765251745d44223 --- /dev/null +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log + +for memError in `grep 'ERROR SUMMARY' crash_gen_mem_err.log | awk '{print $4}'` +do +if [ -n "$memError" ]; then + if [ "$memError" -gt 12 ]; then + echo -e "${RED} ## Memory errors number valgrind reports is $memError.\ + More than our threshold! ## ${NC}" + fi +fi +done + +grep 'start to execute\|definitely lost:' valgrind.err|grep -v 'grep'|uniq|tee crash_gen-definitely-lost-out.log +for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | awk '{print $7}'` +do + +if [ -n "$defiMemError" ]; then + if [ "$defiMemError" -gt 3 ]; then + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + exit 8 + fi +fi +done \ No newline at end of file diff --git a/tests/pytest/insert/before_1970.py b/tests/pytest/insert/before_1970.py new file mode 100644 index 0000000000000000000000000000000000000000..cb17b657aad1a3bfdb915c9661bad291b75d6f04 --- /dev/null +++ b/tests/pytest/insert/before_1970.py @@ -0,0 +1,80 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + + +class TDTestCase: + """ + add test data before 1970s + """ + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database if not exists demo keep 36500;"); + print("==============create db demo keep 365000 days") + tdSql.execute("use demo;") + tdSql.execute("CREATE table if not exists test (ts timestamp, f1 int);") + print("==============create table test") + + print("==============step2") + #TODO : should add more testcases + tdSql.execute("insert into test values('1930-12-12 01:19:20.345', 1);") + tdSql.execute("insert into test values('1969-12-30 23:59:59.999', 2);") + tdSql.execute("insert into test values(-3600, 3);") + tdSql.execute("insert into test values('2020-10-20 14:02:53.770', 4);") + print("==============insert data") + + # tdSql.query("select * from test;") + # + # tdSql.checkRows(3) + # + # tdSql.checkData(0,0,'1969-12-12 01:19:20.345000') + # tdSql.checkData(1,0,'1970-01-01 07:00:00.000000') + # tdSql.checkData(2,0,'2020-10-20 14:02:53.770000') + print("==============step3") + tdDnodes.stopAll() + tdDnodes.start(1) + print("==============restart taosd") + + + print("==============step4") + tdSql.execute("use demo;") + tdSql.query("select * from test;") + # print(tdSql.queryResult) + tdSql.checkRows(4) + tdSql.checkData(0,0,'1930-12-12 01:19:20.345000') + tdSql.checkData(1,0,'1969-12-30 23:59:59.999000') + tdSql.checkData(2,0,'1970-01-01 07:00:00.000000') + tdSql.checkData(3,0,'2020-10-20 14:02:53.770000') + print("==============check data") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/insertIntoTwoTables.py b/tests/pytest/insert/insertIntoTwoTables.py new file mode 100644 index 0000000000000000000000000000000000000000..8b4f423c3dae38a0d8218f7b113f2784259300b4 --- /dev/null +++ b/tests/pytest/insert/insertIntoTwoTables.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ### test case for TD-1758 ### + print("==============step1") + tdSql.execute( + "create table t0(ts timestamp, c int)") + tdSql.execute( + 'create table t1(ts timestamp, c binary(1))') + tdSql.execute( + "insert into t0 values(now,1) t1 values(now,'0')(now+1a,'1')(now+2a,'2')(now+3a,'3')(now+4a,'4')") + + print("==============step2") + + tdSql.query("select * from t0") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + tdSql.query("select * from t1") + tdSql.checkRows(5) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/bug1471.py b/tests/pytest/query/bug1471.py new file mode 100644 index 0000000000000000000000000000000000000000..f1cb0bdcdfbb085410d0001606208e11373687a1 --- /dev/null +++ b/tests/pytest/query/bug1471.py @@ -0,0 +1,73 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import time +import threading + + +class myThread(threading.Thread): + def __init__(self, conn): + threading.Thread.__init__(self) + self.event = threading.Event() + self.conn = taos.connect(conn._host, port=conn._port, config=conn._config) + + def run(self): + cur = self.conn.cursor() + self.event.wait() + cur.execute("drop database db") + cur.close() + self.conn.close() + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + for i in range(50): + print("round", i) + thread = myThread(tdSql.cursor._connection) + thread.start() + + tdSql.execute('reset query cache') + tdSql.execute('drop database if exists db') + tdSql.execute('create database db') + tdSql.execute('use db') + tdSql.execute("create table car (ts timestamp, s int)") + tdSql.execute("insert into car values('2020-10-19 17:00:00', 123)") + + thread.event.set() + try: + tdSql.query("select s from car where ts = '2020-10-19 17:00:00'") + except Exception as e: + pass + else: + tdSql.checkData(0, 0, 123) + + thread.join() + time.sleep(0.2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryCountCSVData.py b/tests/pytest/query/queryCountCSVData.py new file mode 100644 index 0000000000000000000000000000000000000000..6c73425faec24afeed0c8a5a168f575ec182c771 --- /dev/null +++ b/tests/pytest/query/queryCountCSVData.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + + +class TDTestCase: + """ + create table and insert data from disordered.csv which timestamp is disordered and + ordered.csv which timestamp is ordered. + then execute 'select count(*) from table xx;' + """ + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database if not exists demo;"); + tdSql.execute("use demo;") + tdSql.execute("CREATE TABLE IF NOT EXISTS test1 (ts TIMESTAMP, ValueID int, " + "VariantValue float, Quality int, Flags int);") + tdSql.execute("CREATE TABLE IF NOT EXISTS test2 (ts TIMESTAMP, ValueID int, " + "VariantValue float, Quality int, Flags int);") + ordered_csv = __file__.split('query')[0] + 'test_data/ordered.csv' + disordered_csv = __file__.split('query')[0] + 'test_data/disordered.csv' + + tdSql.execute(" insert into test1 file '{file}';".format(file=ordered_csv)) + tdSql.execute(" insert into test2 file '{file}';".format(file=disordered_csv)) + print("==============insert into test1 and test2 form test file") + + + print("==============step2") + tdSql.query('select * from test1;') + with open(ordered_csv) as f1: + num1 = len(f1.readlines()) + tdSql.checkRows(num1) + + + tdSql.query('select * from test2;') + with open(disordered_csv) as f2: + num2 = len(f2.readlines()) + tdSql.checkRows(num2) + print("=============execute select count(*) from xxx") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py index 17027cf498ff9e87b558866cd4d1e6a8c865afc0..a5e3ab21b3d7b639faf6e8b082a4ae58c5c430a6 100644 --- a/tests/pytest/query/queryJoin.py +++ b/tests/pytest/query/queryJoin.py @@ -95,14 +95,16 @@ class TDTestCase: tdSql.error( "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id group by stb_t.id") tdSql.error( - "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.name;") - tdSql.error( - "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.location = stb_t.name") + "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.name;") tdSql.execute("alter table stb_t add tag pid int") tdSql.execute("alter table tb_t1 set tag pid=2") tdSql.execute("alter table tb_t2 set tag pid=1") + tdSql.query( + "select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.location = stb_t.name") + tdSql.checkRows(0) + tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.pid") tdSql.checkRows(3) diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index a7fc08c5a39d7b9a59068463f521d83106865c2d..e09900acc4cde66b1f2505c0fb532889d2db81e4 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -11,6 +11,7 @@ # -*- coding: utf-8 -*- + import sys import os import taos @@ -32,17 +33,23 @@ class taosdemoQueryPerformace: def query(self): cursor = self.conn.cursor() - cursor.execute("use test") + cursor.execute("use test") totalTime = 0 - for i in range(100): - startTime = time.time() + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() cursor.execute("select count(*) from test.meters") totalTime += time.time() - startTime print("query time for: select count(*) from test.meters %f seconds" % (totalTime / 100)) totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.meters") totalTime += time.time() - startTime @@ -50,6 +57,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select count(*) from test.meters where loc='beijing'") totalTime += time.time() - startTime @@ -57,6 +67,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.meters where areaid=10") totalTime += time.time() - startTime @@ -64,6 +77,9 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select avg(f1), max(f2), min(f3) from test.t10 interval(10s)") totalTime += time.time() - startTime @@ -71,11 +87,34 @@ class taosdemoQueryPerformace: totalTime = 0 for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") startTime = time.time() cursor.execute("select last_row(*) from meters") totalTime += time.time() - startTime print("query time for: select last_row(*) from meters %f seconds" % (totalTime / 100)) + totalTime = 0 + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() + cursor.execute("select * from meters") + totalTime += time.time() - startTime + print("query time for: select * from meters %f seconds" % (totalTime / 100)) + + totalTime = 0 + for i in range(100): + if(sys.argv[1] == '1'): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + startTime = time.time() + cursor.execute("select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'") + totalTime += time.time() - startTime + print("query time for: select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000' %f seconds" % (totalTime / 100)) + if __name__ == '__main__': perftest = taosdemoQueryPerformace() perftest.initConnection() diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py index e5d3c8ce1f4eb9c1d2003bd659771562c9ea14e5..649e0dc1cb3191ba08b3f2da0a5edee3afc66575 100644 --- a/tests/pytest/query/querySort.py +++ b/tests/pytest/query/querySort.py @@ -96,6 +96,12 @@ class TDTestCase: tdSql.query("select * from st order by ts desc") self.checkColumnSorted(0, "desc") + print("======= step 2: verify order for special column =========") + + tdSql.query("select tbcol1 from st order by ts desc") + + tdSql.query("select tbcol6 from st order by ts desc") + for i in range(1, 10): tdSql.error("select * from st order by tbcol%d" % i) tdSql.error("select * from st order by tbcol%d asc" % i) diff --git a/tests/pytest/query/removeDBAndSTable.py b/tests/pytest/query/removeDBAndSTable.py new file mode 100644 index 0000000000000000000000000000000000000000..4616c7e378326af633a89905d746c7d51ce92139 --- /dev/null +++ b/tests/pytest/query/removeDBAndSTable.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database db_vplu"); + tdSql.execute("use db_vplu") + tdSql.execute("CREATE table if not exists st (ts timestamp, speed int) tags(id int)") + tdSql.execute("CREATE table if not exists st_vplu (ts timestamp, speed int) tags(id int)") + + print("==============step2") + + tdSql.execute("drop table st") + + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "st_vplu") + + tdDnodes.stopAll() + tdDnodes.start(1) + + tdSql.execute("use db_vplu") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "st_vplu") + + tdSql.execute("drop database db") + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "db_vplu") + + tdDnodes.stopAll() + tdDnodes.start(1) + + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "db_vplu") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/new.py b/tests/pytest/stream/new.py index eac93dc0e649f5d48481079d75851a27be270567..12ec6d4507710869632eac77d719217b3b0ed7b3 100644 --- a/tests/pytest/stream/new.py +++ b/tests/pytest/stream/new.py @@ -26,7 +26,6 @@ class TDTestCase: def run(self): rowNum = 200 - totalNum = 200 tdSql.prepare() tdLog.info("=============== step1") @@ -42,7 +41,9 @@ class TDTestCase: tdSql.execute("create table st as select count(*), count(tbcol), count(tbcol2) from mt interval(10s)") tdLog.info("=============== step3") + start = time.time() tdSql.waitedQuery("select * from st", 1, 120) + delay = int(time.time() - start) + 20 v = tdSql.getData(0, 3) if v >= 51: tdLog.exit("value is %d, which is larger than 51" % v) @@ -54,11 +55,18 @@ class TDTestCase: tdSql.execute("insert into tb%d values(now + %ds, %d, %d)" % (i, j, j, j)) tdLog.info("=============== step5") - tdLog.sleep(40) - tdSql.waitedQuery("select * from st order by ts desc", 1, 120) - v = tdSql.getData(0, 3) - if v <= 51: - tdLog.exit("value is %d, which is smaller than 51" % v) + maxValue = 0 + for i in range(delay): + time.sleep(1) + tdSql.query("select * from st order by ts desc") + v = tdSql.getData(0, 3) + if v > maxValue: + maxValue = v + if v > 51: + break + + if maxValue <= 51: + tdLog.exit("value is %d, which is smaller than 51" % maxValue) def stop(self): tdSql.close() diff --git a/tests/pytest/table/alter_wal0.py b/tests/pytest/table/alter_wal0.py new file mode 100644 index 0000000000000000000000000000000000000000..15ad69998f450b8e385cbf58052d246d9de27380 --- /dev/null +++ b/tests/pytest/table/alter_wal0.py @@ -0,0 +1,75 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + + +class TDTestCase: + """ + remove last tow bytes of file 'wal0',then restart taosd and create new tables. + """ + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database if not exists demo;"); + tdSql.execute("use demo;") + tdSql.execute("create table if not exists meters(ts timestamp, f1 int) tags(t1 int);"); + for i in range(1,11): + tdSql.execute("CREATE table if not exists test{num} using meters tags({num});".format(num=i)) + print("==============insert 10 tables") + + tdSql.query('show tables;') + tdSql.checkRows(10) + + print("==============step2") + tdDnodes.stopAll() + filename = '/var/lib/taos/mnode/wal/wal0' + + with open(filename, 'rb') as f1: + temp = f1.read() + + with open(filename, 'wb') as f2: + f2.write(temp[:-2]) + + tdDnodes.start(1) + print("==============remove last tow bytes of file 'wal0' and restart taosd") + + print("==============step3") + tdSql.execute("use demo;") + tdSql.query('show tables;') + tdSql.checkRows(10) + for i in range(11,21): + tdSql.execute("CREATE table if not exists test{num} using meters tags({num});".format(num=i)) + + tdSql.query('show tables;') + tdSql.checkRows(20) + print("==============check table numbers and create 10 tables") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/create_sensitive.py b/tests/pytest/table/create_sensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..1934b662c7a57c13c2a1b8e8dfd65ab6ddbe13a4 --- /dev/null +++ b/tests/pytest/table/create_sensitive.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +import sys +import string +import random +import subprocess +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('=============== step1') + tdLog.info('create table TestSensitiveT(ts timestamp, i int)') + tdSql.execute('create table TestSensitiveT(ts timestamp, i int)') + tdLog.info('create table TestSensitiveSt(ts timestamp,i int) tags(j int)') + tdSql.execute('create table TestSensitiveSt(ts timestamp,i int) tags(j int)') + tdLog.info('create table Abcde using TestSensitiveSt tags(1)') + tdSql.execute('create table AbcdeFgh using TestSensitiveSt tags(1)') + tdLog.info('=============== step2') + tdLog.info('test normal table ') + tdSql.error('create table testsensitivet(ts timestamp, i int)') + tdSql.error('create table testsensitivet(ts timestamp, j int)') + tdSql.error('create table testsensItivet(ts timestamp, j int)') + tdSql.error('create table TESTSENSITIVET(ts timestamp, i int)') + tdLog.info('=============== step3') + tdLog.info('test super table ') + tdSql.error('create table testsensitivest(ts timestamp,i int) tags(j int)') + tdSql.error('create table testsensitivest(ts timestamp,i int) tags(k int)') + tdSql.error('create table TESTSENSITIVEST(ts timestamp,i int) tags(j int)') + tdSql.error('create table Testsensitivest(ts timestamp,i int) tags(j int)') + tdLog.info('=============== step4') + tdLog.info('test subtable ') + tdSql.error('create table abcdefgh using TestSensitiveSt tags(1)') + tdSql.error('create table ABCDEFGH using TestSensitiveSt tags(1)') + tdSql.error('create table Abcdefgh using TestSensitiveSt tags(1)') + tdSql.error('create table abcdeFgh using TestSensitiveSt tags(1)') + tdSql.error('insert into table abcdefgh using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table ABCDEFGH using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table Abcdefgh using TestSensitiveSt tags(1) values(now,1)') + tdSql.error('insert into table abcdeFgH using TestSensitiveSt tags(1) values(now,1)') + tdSql.query('show tables') + tdLog.info('tdSql.checkRow(0)') + tdSql.checkRows(2) + + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tag_lite/datatype.py b/tests/pytest/tag_lite/datatype.py index f7fa9fa3a2dddaa3f352d02f74872ba42bd32f44..40f0ed7a090683faf149e56620ff63db7a52acb3 100644 --- a/tests/pytest/tag_lite/datatype.py +++ b/tests/pytest/tag_lite/datatype.py @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute('alter table stb add tag tnc nchar(10)') for tid in range(1, self.ntables + 1): tdSql.execute('alter table tb%d set tag tnc=\"%s\"' % - (tid, str(tid * 1.2))) + (tid, str(tid + 1000000000))) tdLog.info("insert %d data in to each %d tables" % (2, self.ntables)) for rid in range(self.rowsPerTable + 1, self.rowsPerTable + 3): sqlcmd = ['insert into'] diff --git a/tests/pytest/test_data/__init__.py b/tests/pytest/test_data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd7af5b9db25bf22f960bfca6bf18b1518cc86f --- /dev/null +++ b/tests/pytest/test_data/__init__.py @@ -0,0 +1,15 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + + +""" +this directory contains test data files +""" \ No newline at end of file diff --git a/tests/pytest/test_data/disordered.csv b/tests/pytest/test_data/disordered.csv new file mode 100644 index 0000000000000000000000000000000000000000..0e6fd75e5b1a43da360541f57377236e0984093a --- /dev/null +++ b/tests/pytest/test_data/disordered.csv @@ -0,0 +1,500 @@ +"2020-03-01 20:01:49.493","130","7.595","128","8392704" +"2020-03-01 20:01:50.493","130","7.598","128","8392704" +"2020-03-01 20:01:51.493","130","7.602","128","8392704" +"2020-03-01 20:01:52.493","130","7.604","128","8392704" +"2020-03-01 20:01:53.493","130","7.604","128","8392704" +"2020-03-01 20:01:54.493","130","7.606","128","8392704" +"2020-03-01 20:01:55.493","130","7.607","128","8392704" +"2020-03-01 20:01:56.493","130","7.607","128","8392704" +"2020-03-01 20:01:57.493","130","7.607","128","8392704" +"2020-03-01 20:01:58.493","130","7.607","128","8392704" +"2020-03-01 20:01:59.493","130","7.606","128","8392704" +"2020-03-01 20:02:00.493","130","7.606","128","8392704" +"2020-03-01 20:02:01.493","130","7.606","128","8392704" +"2020-03-01 20:02:02.493","130","7.607","128","8392704" +"2020-03-01 20:02:03.493","130","7.608","128","8392704" +"2020-03-01 20:02:04.493","130","7.609","128","8392704" +"2020-03-01 20:02:05.493","130","7.609","128","8392704" +"2020-03-01 20:02:06.493","130","7.608","128","8392704" +"2020-03-01 20:02:07.493","130","7.606","128","8392704" +"2020-03-01 20:02:08.493","130","7.606","128","8392704" +"2020-03-01 20:02:09.493","130","7.607","128","8392704" +"2020-03-01 20:02:10.493","130","7.609","128","8392704" +"2020-03-01 20:02:11.493","130","7.61","128","8392704" +"2020-03-01 20:02:12.493","130","7.611","128","8392704" +"2020-03-01 20:02:13.493","130","7.61","128","8392704" +"2020-03-01 20:02:14.493","130","7.607","128","8392704" +"2020-03-01 20:02:15.493","130","7.605","128","8392704" +"2020-03-01 20:02:16.493","130","7.604","128","8392704" +"2020-03-01 20:02:17.493","130","7.603","128","8392704" +"2020-03-01 20:02:18.493","130","7.604","128","8392704" +"2020-03-01 20:02:19.493","130","7.604","128","8392704" +"2020-03-01 20:02:20.493","130","7.603","128","8392704" +"2020-03-01 20:02:21.493","130","7.601","128","8392704" +"2020-03-01 20:02:22.493","130","7.598","128","8392704" +"2020-03-01 20:02:23.493","130","7.595","128","8392704" +"2020-03-01 20:02:24.493","130","7.594","128","8392704" +"2020-03-01 20:02:25.493","130","7.594","128","8392704" +"2020-03-01 20:02:26.493","130","7.595","128","8392704" +"2020-03-01 20:02:27.493","130","7.596","128","8392704" +"2020-03-01 20:02:28.493","130","7.596","128","8392704" +"2020-03-01 20:02:29.493","130","7.598","128","8392704" +"2020-03-01 20:02:30.493","130","7.6","128","8392704" +"2020-03-01 20:02:31.493","130","7.6","128","8392704" +"2020-03-01 20:02:32.493","130","7.6","128","8392704" +"2020-03-01 20:02:33.493","130","7.601","128","8392704" +"2020-03-01 20:02:34.493","130","7.603","128","8392704" +"2020-03-01 20:02:35.493","130","7.604","128","8392704" +"2020-03-01 20:02:36.493","130","7.605","128","8392704" +"2020-03-01 20:02:37.493","130","7.606","128","8392704" +"2020-03-01 20:02:38.493","130","7.61","128","8392704" +"2020-03-01 20:02:39.493","130","7.611","128","8392704" +"2020-03-01 20:02:40.493","130","7.61","128","8392704" +"2020-03-01 20:02:41.493","130","7.609","128","8392704" +"2020-03-01 20:02:42.493","130","7.611","128","8392704" +"2020-03-01 20:02:43.493","130","7.61","128","8392704" +"2020-03-01 20:02:44.493","130","7.607","128","8392704" +"2020-03-01 20:02:45.493","130","7.605","128","8392704" +"2020-03-01 20:02:46.493","130","7.606","128","8392704" +"2020-03-01 20:02:47.493","130","7.604","128","8392704" +"2020-03-01 20:02:48.493","130","7.599","128","8392704" +"2020-03-01 20:02:49.493","130","7.595","128","8392704" +"2020-03-01 06:41:17.493","130","6.742","128","8392704" +"2020-03-01 06:41:18.493","130","6.741","128","8392704" +"2020-03-01 06:41:19.493","130","6.737","128","8392704" +"2020-03-01 06:41:20.493","130","6.734","128","8392704" +"2020-03-01 06:41:21.493","130","6.734","128","8392704" +"2020-03-01 06:41:22.493","130","6.733","128","8392704" +"2020-03-01 06:41:23.493","130","6.736","128","8392704" +"2020-03-01 06:41:24.493","130","6.739","128","8392704" +"2020-03-01 06:41:25.493","130","6.738","128","8392704" +"2020-03-01 06:41:26.493","130","6.74","128","8392704" +"2020-03-01 06:41:27.493","130","6.745","128","8392704" +"2020-03-01 06:41:28.493","130","6.749","128","8392704" +"2020-03-01 06:41:29.493","130","6.753","128","8392704" +"2020-03-01 06:41:30.493","130","6.753","128","8392704" +"2020-03-01 06:41:31.493","130","6.757","128","8392704" +"2020-03-01 06:41:32.493","130","6.763","128","8392704" +"2020-03-01 06:41:33.493","130","6.765","128","8392704" +"2020-03-01 06:41:34.493","130","6.764","128","8392704" +"2020-03-01 06:41:35.493","130","6.762","128","8392704" +"2020-03-01 06:41:36.493","130","6.758","128","8392704" +"2020-03-01 06:41:37.493","130","6.756","128","8392704" +"2020-03-01 06:41:38.493","130","6.755","128","8392704" +"2020-03-01 06:41:39.493","130","6.754","128","8392704" +"2020-03-01 06:41:40.493","130","6.755","128","8392704" +"2020-03-01 06:41:41.493","130","6.756","128","8392704" +"2020-03-01 06:41:42.493","130","6.757","128","8392704" +"2020-03-01 06:41:43.493","130","6.756","128","8392704" +"2020-03-01 06:41:44.493","130","6.756","128","8392704" +"2020-03-01 06:41:45.493","130","6.756","128","8392704" +"2020-03-01 06:41:46.493","130","6.759","128","8392704" +"2020-03-01 06:41:47.493","130","6.759","128","8392704" +"2020-03-01 06:41:48.493","130","6.758","128","8392704" +"2020-03-01 06:41:49.493","130","6.758","128","8392704" +"2020-03-01 06:41:50.493","130","6.756","128","8392704" +"2020-03-01 06:41:51.493","130","6.755","128","8392704" +"2020-03-01 06:41:52.493","130","6.755","128","8392704" +"2020-03-01 06:41:53.493","130","6.754","128","8392704" +"2020-03-01 06:41:54.493","130","6.751","128","8392704" +"2020-03-01 06:41:55.493","130","6.752","128","8392704" +"2020-03-01 06:41:56.493","130","6.753","128","8392704" +"2020-03-01 06:41:57.493","130","6.753","128","8392704" +"2020-03-01 06:41:58.493","130","6.753","128","8392704" +"2020-03-01 06:41:59.493","130","6.755","128","8392704" +"2020-03-01 06:42:00.493","130","6.752","128","8392704" +"2020-03-01 06:42:01.493","130","6.75","128","8392704" +"2020-03-01 06:42:02.493","130","6.75","128","8392704" +"2020-03-01 06:42:03.493","130","6.753","128","8392704" +"2020-03-01 06:42:04.493","130","6.755","128","8392704" +"2020-03-01 06:42:05.493","130","6.754","128","8392704" +"2020-03-01 06:42:06.493","130","6.754","128","8392704" +"2020-03-01 06:42:07.493","130","6.752","128","8392704" +"2020-03-01 06:42:08.493","130","6.748","128","8392704" +"2020-03-01 06:42:09.493","130","6.747","128","8392704" +"2020-03-01 06:42:10.493","130","6.747","128","8392704" +"2020-03-01 06:42:11.493","130","6.748","128","8392704" +"2020-03-01 06:42:12.493","130","6.748","128","8392704" +"2020-03-01 06:42:13.493","130","6.75","128","8392704" +"2020-03-01 06:42:14.493","130","6.754","128","8392704" +"2020-03-01 06:42:15.493","130","6.754","128","8392704" +"2020-03-01 06:42:16.493","130","6.756","128","8392704" +"2020-03-01 06:42:17.493","130","6.756","128","8392704" +"2020-03-01 06:42:18.493","130","6.757","128","8392704" +"2020-03-01 06:42:19.493","130","6.757","128","8392704" +"2020-03-01 06:42:20.493","130","6.76","128","8392704" +"2020-03-01 06:42:21.493","130","6.761","128","8392704" +"2020-03-01 06:42:22.493","130","6.76","128","8392704" +"2020-03-01 06:42:23.493","130","6.76","128","8392704" +"2020-03-01 06:42:24.493","130","6.76","128","8392704" +"2020-03-01 06:42:25.493","130","6.76","128","8392704" +"2020-03-01 06:42:26.493","130","6.758","128","8392704" +"2020-03-01 06:42:27.493","130","6.757","128","8392704" +"2020-03-01 06:42:28.493","130","6.752","128","8392704" +"2020-03-01 06:42:29.493","130","6.746","128","8392704" +"2020-03-01 06:42:30.493","130","6.742","128","8392704" +"2020-03-01 06:42:31.493","130","6.741","128","8392704" +"2020-03-01 06:42:32.493","130","6.739","128","8392704" +"2020-03-01 06:42:33.493","130","6.739","128","8392704" +"2020-03-01 06:42:34.493","130","6.737","128","8392704" +"2020-03-01 06:42:35.493","130","6.737","128","8392704" +"2020-03-01 06:42:36.493","130","6.738","128","8392704" +"2020-03-01 06:42:37.493","130","6.739","128","8392704" +"2020-03-01 06:42:38.493","130","6.743","128","8392704" +"2020-03-01 06:42:39.493","130","6.747","128","8392704" +"2020-03-01 06:42:40.493","130","6.748","128","8392704" +"2020-03-01 06:42:41.493","130","6.746","128","8392704" +"2020-03-01 06:42:42.493","130","6.746","128","8392704" +"2020-03-01 06:42:43.493","130","6.745","128","8392704" +"2020-03-01 06:42:44.493","130","6.742","128","8392704" +"2020-03-01 06:42:45.493","130","6.741","128","8392704" +"2020-03-01 06:42:46.493","130","6.74","128","8392704" +"2020-03-01 06:42:47.493","130","6.742","128","8392704" +"2020-03-01 06:42:48.493","130","6.743","128","8392704" +"2020-03-01 06:42:49.493","130","6.742","128","8392704" +"2020-03-01 06:42:50.493","130","6.742","128","8392704" +"2020-03-01 06:42:51.493","130","6.741","128","8392704" +"2020-03-01 06:42:52.493","130","6.741","128","8392704" +"2020-03-01 06:42:53.493","130","6.742","128","8392704" +"2020-03-01 06:42:54.493","130","6.742","128","8392704" +"2020-03-01 06:42:55.493","130","6.745","128","8392704" +"2020-03-01 06:42:56.493","130","6.747","128","8392704" +"2020-03-01 06:42:57.493","130","6.748","128","8392704" +"2020-03-01 06:42:58.493","130","6.75","128","8392704" +"2020-03-01 06:42:59.493","130","6.75","128","8392704" +"2020-03-01 06:43:00.493","130","6.748","128","8392704" +"2020-03-01 06:43:01.493","130","6.748","128","8392704" +"2020-03-01 06:43:02.493","130","6.746","128","8392704" +"2020-03-01 06:43:03.493","130","6.745","128","8392704" +"2020-03-01 06:43:04.493","130","6.745","128","8392704" +"2020-03-01 06:43:05.493","130","6.745","128","8392704" +"2020-03-01 06:43:06.493","130","6.744","128","8392704" +"2020-03-01 06:43:07.493","130","6.749","128","8392704" +"2020-03-01 06:43:08.493","130","6.756","128","8392704" +"2020-03-01 06:43:09.493","130","6.759","128","8392704" +"2020-03-01 06:43:10.493","130","6.759","128","8392704" +"2020-03-01 06:43:11.493","130","6.758","128","8392704" +"2020-03-01 06:43:12.493","130","6.758","128","8392704" +"2020-03-01 06:43:13.493","130","6.759","128","8392704" +"2020-03-01 06:43:14.493","130","6.759","128","8392704" +"2020-03-01 06:43:15.493","130","6.753","128","8392704" +"2020-03-01 06:43:16.493","130","6.751","128","8392704" +"2020-03-01 20:02:50.493","130","7.596","128","8392704" +"2020-03-01 20:02:51.493","130","7.597","128","8392704" +"2020-03-01 20:02:52.493","130","7.599","128","8392704" +"2020-03-01 20:02:53.493","130","7.6","128","8392704" +"2020-03-01 20:02:54.493","130","7.601","128","8392704" +"2020-03-01 20:02:55.493","130","7.603","128","8392704" +"2020-03-01 20:02:56.493","130","7.602","128","8392704" +"2020-03-01 20:02:57.493","130","7.601","128","8392704" +"2020-03-01 20:02:58.493","130","7.601","128","8392704" +"2020-03-01 20:02:59.493","130","7.6","128","8392704" +"2020-03-01 20:03:00.493","130","7.599","128","8392704" +"2020-03-01 20:03:01.493","130","7.599","128","8392704" +"2020-03-01 20:03:02.493","130","7.6","128","8392704" +"2020-03-01 20:03:03.493","130","7.601","128","8392704" +"2020-03-01 20:03:04.493","130","7.601","128","8392704" +"2020-03-01 20:03:05.493","130","7.601","128","8392704" +"2020-03-01 20:03:06.493","130","7.6","128","8392704" +"2020-03-01 20:03:07.493","130","7.602","128","8392704" +"2020-03-01 20:03:08.493","130","7.606","128","8392704" +"2020-03-01 20:03:09.493","130","7.609","128","8392704" +"2020-03-01 20:03:10.493","130","7.612","128","8392704" +"2020-03-01 20:03:11.493","130","7.614","128","8392704" +"2020-03-01 20:03:12.493","130","7.615","128","8392704" +"2020-03-01 20:03:13.493","130","7.614","128","8392704" +"2020-03-01 20:03:14.493","130","7.613","128","8392704" +"2020-03-01 20:03:15.493","130","7.614","128","8392704" +"2020-03-01 20:03:16.493","130","7.612","128","8392704" +"2020-03-01 20:03:17.493","130","7.609","128","8392704" +"2020-03-01 20:03:18.493","130","7.606","128","8392704" +"2020-03-01 20:03:19.493","130","7.604","128","8392704" +"2020-03-01 20:03:20.493","130","7.604","128","8392704" +"2020-03-01 20:03:21.493","130","7.605","128","8392704" +"2020-03-01 20:03:22.493","130","7.605","128","8392704" +"2020-03-01 20:03:23.493","130","7.605","128","8392704" +"2020-03-01 20:03:24.493","130","7.605","128","8392704" +"2020-03-01 20:03:25.493","130","7.604","128","8392704" +"2020-03-01 20:03:26.493","130","7.603","128","8392704" +"2020-03-01 20:03:27.493","130","7.604","128","8392704" +"2020-03-01 20:03:28.493","130","7.605","128","8392704" +"2020-03-01 20:03:29.493","130","7.607","128","8392704" +"2020-03-01 20:03:30.493","130","7.609","128","8392704" +"2020-03-01 20:03:31.493","130","7.609","128","8392704" +"2020-03-01 20:03:32.493","130","7.607","128","8392704" +"2020-03-01 20:03:33.493","130","7.606","128","8392704" +"2020-03-01 20:03:34.493","130","7.607","128","8392704" +"2020-03-01 20:03:35.493","130","7.608","128","8392704" +"2020-03-01 20:03:36.493","130","7.609","128","8392704" +"2020-03-01 20:03:37.493","130","7.609","128","8392704" +"2020-03-01 20:03:38.493","130","7.607","128","8392704" +"2020-03-01 20:03:39.493","130","7.602","128","8392704" +"2020-03-01 20:03:40.493","130","7.599","128","8392704" +"2020-03-01 20:03:41.493","130","7.598","128","8392704" +"2020-03-01 20:03:42.493","130","7.596","128","8392704" +"2020-03-01 20:03:43.493","130","7.595","128","8392704" +"2020-03-01 20:03:44.493","130","7.594","128","8392704" +"2020-03-01 20:03:45.493","130","7.595","128","8392704" +"2020-03-01 20:03:46.493","130","7.597","128","8392704" +"2020-03-01 20:03:47.493","130","7.596","128","8392704" +"2020-03-01 20:03:48.493","130","7.595","128","8392704" +"2020-03-01 20:03:49.493","130","7.596","128","8392704" +"2020-03-01 20:03:50.493","130","7.596","128","8392704" +"2020-03-01 20:03:51.493","130","7.595","128","8392704" +"2020-03-01 20:03:52.493","130","7.596","128","8392704" +"2020-03-01 20:03:53.493","130","7.597","128","8392704" +"2020-03-01 20:03:54.493","130","7.598","128","8392704" +"2020-03-01 20:03:55.493","130","7.596","128","8392704" +"2020-03-01 20:03:56.493","130","7.596","128","8392704" +"2020-03-01 20:03:57.493","130","7.599","128","8392704" +"2020-03-01 20:03:58.493","130","7.602","128","8392704" +"2020-03-01 20:03:59.493","130","7.603","128","8392704" +"2020-03-01 20:04:00.493","130","7.602","128","8392704" +"2020-03-01 20:04:01.493","130","7.6","128","8392704" +"2020-03-01 20:04:02.493","130","7.598","128","8392704" +"2020-03-01 20:04:03.493","130","7.595","128","8392704" +"2020-03-01 20:04:04.493","130","7.593","128","8392704" +"2020-03-01 20:04:05.493","130","7.592","128","8392704" +"2020-03-01 20:04:06.493","130","7.591","128","8392704" +"2020-03-01 20:04:07.493","130","7.591","128","8392704" +"2020-03-01 20:04:08.493","130","7.591","128","8392704" +"2020-03-01 20:04:09.493","130","7.592","128","8392704" +"2020-03-01 20:04:10.493","130","7.59","128","8392704" +"2020-03-01 20:04:11.493","130","7.587","128","8392704" +"2020-03-01 20:04:12.493","130","7.584","128","8392704" +"2020-03-01 20:04:13.493","130","7.583","128","8392704" +"2020-03-01 20:04:14.493","130","7.581","128","8392704" +"2020-03-01 20:04:15.493","130","7.578","128","8392704" +"2020-03-01 20:04:16.493","130","7.576","128","8392704" +"2020-03-01 20:04:17.493","130","7.577","128","8392704" +"2020-03-01 20:04:18.493","130","7.579","128","8392704" +"2020-03-01 20:04:19.493","130","7.583","128","8392704" +"2020-03-01 20:04:20.493","130","7.587","128","8392704" +"2020-03-01 20:04:21.493","130","7.588","128","8392704" +"2020-03-01 20:04:22.493","130","7.589","128","8392704" +"2020-03-01 20:04:23.493","130","7.59","128","8392704" +"2020-03-01 20:04:24.493","130","7.593","128","8392704" +"2020-03-01 20:04:25.493","130","7.597","128","8392704" +"2020-03-01 20:04:26.493","130","7.6","128","8392704" +"2020-03-01 20:04:27.493","130","7.603","128","8392704" +"2020-03-01 20:04:28.493","130","7.606","128","8392704" +"2020-03-01 20:04:29.493","130","7.608","128","8392704" +"2020-03-01 20:04:30.493","130","7.609","128","8392704" +"2020-03-01 20:04:31.493","130","7.607","128","8392704" +"2020-03-01 20:04:32.493","130","7.607","128","8392704" +"2020-03-01 20:04:33.493","130","7.607","128","8392704" +"2020-03-01 20:04:34.493","130","7.602","128","8392704" +"2020-03-01 20:04:35.493","130","7.599","128","8392704" +"2020-03-01 20:04:36.493","130","7.599","128","8392704" +"2020-03-01 20:04:37.493","130","7.599","128","8392704" +"2020-03-01 20:04:38.493","130","7.598","128","8392704" +"2020-03-01 20:04:39.493","130","7.596","128","8392704" +"2020-03-01 20:04:40.493","130","7.595","128","8392704" +"2020-03-01 20:04:41.493","130","7.592","128","8392704" +"2020-03-01 20:04:42.493","130","7.586","128","8392704" +"2020-03-01 20:04:43.493","130","7.582","128","8392704" +"2020-03-01 20:04:44.493","130","7.582","128","8392704" +"2020-03-01 20:04:45.493","130","7.584","128","8392704" +"2020-03-01 20:04:46.493","130","7.583","128","8392704" +"2020-03-01 20:04:47.493","130","7.582","128","8392704" +"2020-03-01 20:04:48.493","130","7.582","128","8392704" +"2020-03-01 20:04:49.493","130","7.585","128","8392704" +"2020-03-01 06:43:17.493","130","6.751","128","8392704" +"2020-03-01 06:43:18.493","130","6.75","128","8392704" +"2020-03-01 06:43:19.493","130","6.748","128","8392704" +"2020-03-01 06:43:20.493","130","6.751","128","8392704" +"2020-03-01 06:43:21.493","130","6.752","128","8392704" +"2020-03-01 06:43:22.493","130","6.751","128","8392704" +"2020-03-01 06:43:23.493","130","6.746","128","8392704" +"2020-03-01 06:43:24.493","130","6.739","128","8392704" +"2020-03-01 06:43:25.493","130","6.737","128","8392704" +"2020-03-01 06:43:26.493","130","6.735","128","8392704" +"2020-03-01 06:43:27.493","130","6.735","128","8392704" +"2020-03-01 06:43:28.493","130","6.734","128","8392704" +"2020-03-01 06:43:29.493","130","6.731","128","8392704" +"2020-03-01 06:43:30.493","130","6.729","128","8392704" +"2020-03-01 06:43:31.493","130","6.73","128","8392704" +"2020-03-01 06:43:32.493","130","6.736","128","8392704" +"2020-03-01 06:43:33.493","130","6.74","128","8392704" +"2020-03-01 06:43:34.493","130","6.741","128","8392704" +"2020-03-01 06:43:35.493","130","6.743","128","8392704" +"2020-03-01 06:43:36.493","130","6.743","128","8392704" +"2020-03-01 06:43:37.493","130","6.745","128","8392704" +"2020-03-01 06:43:38.493","130","6.747","128","8392704" +"2020-03-01 06:43:39.493","130","6.747","128","8392704" +"2020-03-01 06:43:40.493","130","6.746","128","8392704" +"2020-03-01 06:43:41.493","130","6.745","128","8392704" +"2020-03-01 06:43:42.493","130","6.743","128","8392704" +"2020-03-01 06:43:43.493","130","6.741","128","8392704" +"2020-03-01 06:43:44.493","130","6.737","128","8392704" +"2020-03-01 06:43:45.493","130","6.737","128","8392704" +"2020-03-01 06:43:46.493","130","6.74","128","8392704" +"2020-03-01 06:43:47.493","130","6.744","128","8392704" +"2020-03-01 06:43:48.493","130","6.746","128","8392704" +"2020-03-01 06:43:49.493","130","6.745","128","8392704" +"2020-03-01 06:43:50.493","130","6.743","128","8392704" +"2020-03-01 06:43:51.493","130","6.745","128","8392704" +"2020-03-01 06:43:52.493","130","6.747","128","8392704" +"2020-03-01 06:43:53.493","130","6.748","128","8392704" +"2020-03-01 06:43:54.493","130","6.748","128","8392704" +"2020-03-01 06:43:55.493","130","6.747","128","8392704" +"2020-03-01 06:43:56.493","130","6.746","128","8392704" +"2020-03-01 06:43:57.493","130","6.744","128","8392704" +"2020-03-01 06:43:58.493","130","6.742","128","8392704" +"2020-03-01 06:43:59.493","130","6.74","128","8392704" +"2020-03-01 06:44:00.493","130","6.739","128","8392704" +"2020-03-01 06:44:01.493","130","6.739","128","8392704" +"2020-03-01 06:44:02.493","130","6.742","128","8392704" +"2020-03-01 06:44:03.493","130","6.742","128","8392704" +"2020-03-01 06:44:04.493","130","6.756","128","8392704" +"2020-03-01 06:44:05.493","130","6.757","128","8392704" +"2020-03-01 06:44:06.493","130","6.757","128","8392704" +"2020-03-01 06:44:07.493","130","6.757","128","8392704" +"2020-03-01 06:44:08.493","130","6.759","128","8392704" +"2020-03-01 06:44:09.493","130","6.759","128","8392704" +"2020-03-01 06:44:10.493","130","6.75","128","8392704" +"2020-03-01 06:44:11.493","130","6.744","128","8392704" +"2020-03-01 06:44:12.493","130","6.739","128","8392704" +"2020-03-01 06:44:13.493","130","6.739","128","8392704" +"2020-03-01 06:44:14.493","130","6.736","128","8392704" +"2020-03-01 06:44:15.493","130","6.734","128","8392704" +"2020-03-01 06:44:16.493","130","6.735","128","8392704" +"2020-03-01 06:44:17.493","130","6.734","128","8392704" +"2020-03-01 06:44:18.493","130","6.736","128","8392704" +"2020-03-01 06:44:19.493","130","6.741","128","8392704" +"2020-03-01 06:44:20.493","130","6.744","128","8392704" +"2020-03-01 06:44:21.493","130","6.746","128","8392704" +"2020-03-01 06:44:22.493","130","6.746","128","8392704" +"2020-03-01 06:44:23.493","130","6.748","128","8392704" +"2020-03-01 06:44:24.493","130","6.751","128","8392704" +"2020-03-01 06:44:25.493","130","6.752","128","8392704" +"2020-03-01 06:44:26.493","130","6.752","128","8392704" +"2020-03-01 06:44:27.493","130","6.752","128","8392704" +"2020-03-01 06:44:28.493","130","6.753","128","8392704" +"2020-03-01 06:44:29.493","130","6.751","128","8392704" +"2020-03-01 06:44:30.493","130","6.751","128","8392704" +"2020-03-01 06:44:31.493","130","6.749","128","8392704" +"2020-03-01 06:44:32.493","130","6.747","128","8392704" +"2020-03-01 06:44:33.493","130","6.748","128","8392704" +"2020-03-01 06:44:34.493","130","6.749","128","8392704" +"2020-03-01 06:44:35.493","130","6.746","128","8392704" +"2020-03-01 06:44:36.493","130","6.742","128","8392704" +"2020-03-01 06:44:37.493","130","6.742","128","8392704" +"2020-03-01 06:44:38.493","130","6.743","128","8392704" +"2020-03-01 06:44:39.493","130","6.743","128","8392704" +"2020-03-01 06:44:40.493","130","6.743","128","8392704" +"2020-03-01 06:44:41.493","130","6.741","128","8392704" +"2020-03-01 06:44:42.493","130","6.741","128","8392704" +"2020-03-01 06:44:43.493","130","6.741","128","8392704" +"2020-03-01 06:44:44.493","130","6.74","128","8392704" +"2020-03-01 06:44:45.493","130","6.74","128","8392704" +"2020-03-01 06:44:46.493","130","6.739","128","8392704" +"2020-03-01 06:44:47.493","130","6.738","128","8392704" +"2020-03-01 06:44:48.493","130","6.738","128","8392704" +"2020-03-01 06:44:49.493","130","6.741","128","8392704" +"2020-03-01 06:44:50.493","130","6.749","128","8392704" +"2020-03-01 06:44:51.493","130","6.756","128","8392704" +"2020-03-01 06:44:52.493","130","6.763","128","8392704" +"2020-03-01 06:44:53.493","130","6.768","128","8392704" +"2020-03-01 06:44:54.493","130","6.771","128","8392704" +"2020-03-01 06:44:55.493","130","6.774","128","8392704" +"2020-03-01 06:44:56.493","130","6.774","128","8392704" +"2020-03-01 06:44:57.493","130","6.774","128","8392704" +"2020-03-01 06:44:58.493","130","6.765","128","8392704" +"2020-03-01 06:44:59.493","130","6.763","128","8392704" +"2020-03-01 06:45:00.493","130","6.761","128","8392704" +"2020-03-01 06:45:01.493","130","6.758","128","8392704" +"2020-03-01 06:45:02.493","130","6.756","128","8392704" +"2020-03-01 06:45:03.493","130","6.756","128","8392704" +"2020-03-01 06:45:04.493","130","6.756","128","8392704" +"2020-03-01 06:45:05.493","130","6.763","128","8392704" +"2020-03-01 06:45:06.493","130","6.763","128","8392704" +"2020-03-01 06:45:07.493","130","6.764","128","8392704" +"2020-03-01 06:45:08.493","130","6.762","128","8392704" +"2020-03-01 06:45:09.493","130","6.763","128","8392704" +"2020-03-01 06:45:10.493","130","6.764","128","8392704" +"2020-03-01 06:45:11.493","130","6.763","128","8392704" +"2020-03-01 06:45:12.493","130","6.76","128","8392704" +"2020-03-01 06:45:13.493","130","6.759","128","8392704" +"2020-03-01 06:45:14.493","130","6.758","128","8392704" +"2020-03-01 06:45:15.493","130","6.758","128","8392704" +"2020-03-01 06:45:16.493","130","6.755","128","8392704" +"2020-03-01 20:04:50.493","130","7.59","128","8392704" +"2020-03-01 20:04:51.493","130","7.592","128","8392704" +"2020-03-01 20:04:52.493","130","7.592","128","8392704" +"2020-03-01 20:04:53.493","130","7.593","128","8392704" +"2020-03-01 20:04:54.493","130","7.592","128","8392704" +"2020-03-01 20:04:55.493","130","7.592","128","8392704" +"2020-03-01 20:04:56.493","130","7.593","128","8392704" +"2020-03-01 20:04:57.493","130","7.593","128","8392704" +"2020-03-01 20:04:58.493","130","7.593","128","8392704" +"2020-03-01 20:04:59.493","130","7.594","128","8392704" +"2020-03-01 20:05:00.493","130","7.595","128","8392704" +"2020-03-01 20:05:01.493","130","7.596","128","8392704" +"2020-03-01 20:05:02.493","130","7.595","128","8392704" +"2020-03-01 20:05:03.493","130","7.595","128","8392704" +"2020-03-01 20:05:04.493","130","7.594","128","8392704" +"2020-03-01 20:05:05.493","130","7.595","128","8392704" +"2020-03-01 20:05:06.493","130","7.598","128","8392704" +"2020-03-01 20:05:07.493","130","7.597","128","8392704" +"2020-03-01 20:05:08.493","130","7.595","128","8392704" +"2020-03-01 20:05:09.493","130","7.597","128","8392704" +"2020-03-01 20:05:10.493","130","7.598","128","8392704" +"2020-03-01 20:05:11.493","130","7.598","128","8392704" +"2020-03-01 20:05:12.493","130","7.597","128","8392704" +"2020-03-01 20:05:13.493","130","7.595","128","8392704" +"2020-03-01 20:05:14.493","130","7.591","128","8392704" +"2020-03-01 20:05:15.493","130","7.589","128","8392704" +"2020-03-01 20:05:16.493","130","7.588","128","8392704" +"2020-03-01 20:05:17.493","130","7.589","128","8392704" +"2020-03-01 20:05:18.493","130","7.589","128","8392704" +"2020-03-01 20:05:19.493","130","7.589","128","8392704" +"2020-03-01 20:05:20.493","130","7.587","128","8392704" +"2020-03-01 20:05:21.493","130","7.584","128","8392704" +"2020-03-01 20:05:22.493","130","7.583","128","8392704" +"2020-03-01 20:05:23.493","130","7.585","128","8392704" +"2020-03-01 20:05:24.493","130","7.586","128","8392704" +"2020-03-01 20:05:25.493","130","7.586","128","8392704" +"2020-03-01 20:05:26.493","130","7.586","128","8392704" +"2020-03-01 20:05:27.493","130","7.586","128","8392704" +"2020-03-01 20:05:28.493","130","7.587","128","8392704" +"2020-03-01 20:05:29.493","130","7.585","128","8392704" +"2020-03-01 20:05:30.493","130","7.584","128","8392704" +"2020-03-01 20:05:31.493","130","7.586","128","8392704" +"2020-03-01 20:05:32.493","130","7.589","128","8392704" +"2020-03-01 20:05:33.493","130","7.59","128","8392704" +"2020-03-01 20:05:34.493","130","7.591","128","8392704" +"2020-03-01 20:05:35.493","130","7.591","128","8392704" +"2020-03-01 20:05:36.493","130","7.594","128","8392704" +"2020-03-01 20:05:37.493","130","7.599","128","8392704" +"2020-03-01 20:05:38.493","130","7.602","128","8392704" +"2020-03-01 20:05:39.493","130","7.604","128","8392704" +"2020-03-01 20:05:40.493","130","7.605","128","8392704" +"2020-03-01 20:05:41.493","130","7.607","128","8392704" +"2020-03-01 20:05:42.493","130","7.607","128","8392704" +"2020-03-01 20:05:43.493","130","7.604","128","8392704" +"2020-03-01 20:05:44.493","130","7.597","128","8392704" +"2020-03-01 20:05:45.493","130","7.592","128","8392704" +"2020-03-01 20:05:46.493","130","7.59","128","8392704" +"2020-03-01 20:05:47.493","130","7.59","128","8392704" +"2020-03-01 20:05:48.493","130","7.591","128","8392704" +"2020-03-01 20:05:49.493","130","7.591","128","8392704" +"2020-03-01 20:05:50.493","130","7.591","128","8392704" +"2020-03-01 20:05:51.493","130","7.594","128","8392704" +"2020-03-01 20:05:52.493","130","7.599","128","8392704" +"2020-03-01 20:05:53.493","130","7.601","128","8392704" +"2020-03-01 20:05:54.493","130","7.602","128","8392704" +"2020-03-01 20:05:55.493","130","7.602","128","8392704" +"2020-03-01 20:05:56.493","130","7.602","128","8392704" +"2020-03-01 20:05:57.493","130","7.603","128","8392704" +"2020-03-01 20:05:58.493","130","7.604","128","8392704" +"2020-03-01 20:05:59.493","130","7.604","128","8392704" +"2020-03-01 20:06:00.493","130","7.605","128","8392704" +"2020-03-01 20:06:01.493","130","7.606","128","8392704" +"2020-03-01 20:06:02.493","130","7.607","128","8392704" +"2020-03-01 20:06:03.493","130","7.605","128","8392704" +"2020-03-01 20:06:04.493","130","7.604","128","8392704" +"2020-03-01 20:06:05.493","130","7.603","128","8392704" +"2020-03-01 20:06:06.493","130","7.602","128","8392704" +"2020-03-01 20:06:07.493","130","7.603","128","8392704" +"2020-03-01 20:06:08.493","130","7.604","128","8392704" \ No newline at end of file diff --git a/tests/pytest/test_data/ordered.csv b/tests/pytest/test_data/ordered.csv new file mode 100644 index 0000000000000000000000000000000000000000..14da572d75e3c9bef32d6de7696ce65485b06d23 --- /dev/null +++ b/tests/pytest/test_data/ordered.csv @@ -0,0 +1,500 @@ +"2020-03-01 19:46:50.493","130","7.617","128","8392704" +"2020-03-01 19:46:51.493","130","7.615","128","8392704" +"2020-03-01 19:46:52.493","130","7.613","128","8392704" +"2020-03-01 19:46:53.493","130","7.612","128","8392704" +"2020-03-01 19:46:54.493","130","7.611","128","8392704" +"2020-03-01 19:46:55.493","130","7.612","128","8392704" +"2020-03-01 19:46:56.493","130","7.611","128","8392704" +"2020-03-01 19:46:57.493","130","7.61","128","8392704" +"2020-03-01 19:46:58.493","130","7.61","128","8392704" +"2020-03-01 19:46:59.493","130","7.613","128","8392704" +"2020-03-01 19:47:00.493","130","7.617","128","8392704" +"2020-03-01 19:47:01.493","130","7.618","128","8392704" +"2020-03-01 19:47:02.493","130","7.619","128","8392704" +"2020-03-01 19:47:03.493","130","7.62","128","8392704" +"2020-03-01 19:47:04.493","130","7.619","128","8392704" +"2020-03-01 19:47:05.493","130","7.62","128","8392704" +"2020-03-01 19:47:06.493","130","7.62","128","8392704" +"2020-03-01 19:47:07.493","130","7.618","128","8392704" +"2020-03-01 19:47:08.493","130","7.618","128","8392704" +"2020-03-01 19:47:09.493","130","7.616","128","8392704" +"2020-03-01 19:47:10.493","130","7.615","128","8392704" +"2020-03-01 19:47:11.493","130","7.614","128","8392704" +"2020-03-01 19:47:12.493","130","7.614","128","8392704" +"2020-03-01 19:47:13.493","130","7.615","128","8392704" +"2020-03-01 19:47:14.493","130","7.617","128","8392704" +"2020-03-01 19:47:15.493","130","7.617","128","8392704" +"2020-03-01 19:47:16.493","130","7.612","128","8392704" +"2020-03-01 19:47:17.493","130","7.609","128","8392704" +"2020-03-01 19:47:18.493","130","7.609","128","8392704" +"2020-03-01 19:47:19.493","130","7.609","128","8392704" +"2020-03-01 19:47:20.493","130","7.611","128","8392704" +"2020-03-01 19:47:21.493","130","7.613","128","8392704" +"2020-03-01 19:47:22.493","130","7.612","128","8392704" +"2020-03-01 19:47:23.493","130","7.612","128","8392704" +"2020-03-01 19:47:24.493","130","7.612","128","8392704" +"2020-03-01 19:47:25.493","130","7.613","128","8392704" +"2020-03-01 19:47:26.493","130","7.617","128","8392704" +"2020-03-01 19:47:27.493","130","7.62","128","8392704" +"2020-03-01 19:47:28.493","130","7.621","128","8392704" +"2020-03-01 19:47:29.493","130","7.621","128","8392704" +"2020-03-01 19:47:30.493","130","7.623","128","8392704" +"2020-03-01 19:47:31.493","130","7.624","128","8392704" +"2020-03-01 19:47:32.493","130","7.621","128","8392704" +"2020-03-01 19:47:33.493","130","7.619","128","8392704" +"2020-03-01 19:47:34.493","130","7.618","128","8392704" +"2020-03-01 19:47:35.493","130","7.616","128","8392704" +"2020-03-01 19:47:36.493","130","7.618","128","8392704" +"2020-03-01 19:47:37.493","130","7.618","128","8392704" +"2020-03-01 19:47:38.493","130","7.616","128","8392704" +"2020-03-01 19:47:39.493","130","7.615","128","8392704" +"2020-03-01 19:47:40.493","130","7.615","128","8392704" +"2020-03-01 19:47:41.493","130","7.614","128","8392704" +"2020-03-01 19:47:42.493","130","7.613","128","8392704" +"2020-03-01 19:47:43.493","130","7.612","128","8392704" +"2020-03-01 19:47:44.493","130","7.611","128","8392704" +"2020-03-01 19:47:45.493","130","7.612","128","8392704" +"2020-03-01 19:47:46.493","130","7.614","128","8392704" +"2020-03-01 19:47:47.493","130","7.618","128","8392704" +"2020-03-01 19:47:48.493","130","7.62","128","8392704" +"2020-03-01 19:47:49.493","130","7.62","128","8392704" +"2020-03-01 19:47:50.493","130","7.621","128","8392704" +"2020-03-01 19:47:51.493","130","7.62","128","8392704" +"2020-03-01 19:47:52.493","130","7.619","128","8392704" +"2020-03-01 19:47:53.493","130","7.621","128","8392704" +"2020-03-01 19:47:54.493","130","7.622","128","8392704" +"2020-03-01 19:47:55.493","130","7.622","128","8392704" +"2020-03-01 19:47:56.493","130","7.62","128","8392704" +"2020-03-01 19:47:57.493","130","7.617","128","8392704" +"2020-03-01 19:47:58.493","130","7.616","128","8392704" +"2020-03-01 19:47:59.493","130","7.618","128","8392704" +"2020-03-01 19:48:00.493","130","7.62","128","8392704" +"2020-03-01 19:48:01.493","130","7.62","128","8392704" +"2020-03-01 19:48:02.493","130","7.616","128","8392704" +"2020-03-01 19:48:03.493","130","7.612","128","8392704" +"2020-03-01 19:48:04.493","130","7.609","128","8392704" +"2020-03-01 19:48:05.493","130","7.608","128","8392704" +"2020-03-01 19:48:06.493","130","7.605","128","8392704" +"2020-03-01 19:48:07.493","130","7.604","128","8392704" +"2020-03-01 19:48:08.493","130","7.605","128","8392704" +"2020-03-01 19:48:09.493","130","7.604","128","8392704" +"2020-03-01 19:48:10.493","130","7.604","128","8392704" +"2020-03-01 19:48:11.493","130","7.608","128","8392704" +"2020-03-01 19:48:12.493","130","7.611","128","8392704" +"2020-03-01 19:48:13.493","130","7.614","128","8392704" +"2020-03-01 19:48:14.493","130","7.616","128","8392704" +"2020-03-01 19:48:15.493","130","7.618","128","8392704" +"2020-03-01 19:48:16.493","130","7.62","128","8392704" +"2020-03-01 19:48:17.493","130","7.617","128","8392704" +"2020-03-01 19:48:18.493","130","7.61","128","8392704" +"2020-03-01 19:48:19.493","130","7.607","128","8392704" +"2020-03-01 19:48:20.493","130","7.604","128","8392704" +"2020-03-01 19:48:21.493","130","7.601","128","8392704" +"2020-03-01 19:48:22.493","130","7.601","128","8392704" +"2020-03-01 19:48:23.493","130","7.601","128","8392704" +"2020-03-01 19:48:24.493","130","7.598","128","8392704" +"2020-03-01 19:48:25.493","130","7.598","128","8392704" +"2020-03-01 19:48:26.493","130","7.604","128","8392704" +"2020-03-01 19:48:27.493","130","7.608","128","8392704" +"2020-03-01 19:48:28.493","130","7.609","128","8392704" +"2020-03-01 19:48:29.493","130","7.61","128","8392704" +"2020-03-01 19:48:30.493","130","7.611","128","8392704" +"2020-03-01 19:48:31.493","130","7.614","128","8392704" +"2020-03-01 19:48:32.493","130","7.614","128","8392704" +"2020-03-01 19:48:33.493","130","7.611","128","8392704" +"2020-03-01 19:48:34.493","130","7.607","128","8392704" +"2020-03-01 19:48:35.493","130","7.601","128","8392704" +"2020-03-01 19:48:36.493","130","7.596","128","8392704" +"2020-03-01 19:48:37.493","130","7.593","128","8392704" +"2020-03-01 19:48:38.493","130","7.593","128","8392704" +"2020-03-01 19:48:39.493","130","7.593","128","8392704" +"2020-03-01 19:48:40.493","130","7.595","128","8392704" +"2020-03-01 19:48:41.493","130","7.596","128","8392704" +"2020-03-01 19:48:42.493","130","7.599","128","8392704" +"2020-03-01 19:48:43.493","130","7.603","128","8392704" +"2020-03-01 19:48:44.493","130","7.605","128","8392704" +"2020-03-01 19:48:45.493","130","7.607","128","8392704" +"2020-03-01 19:48:46.493","130","7.608","128","8392704" +"2020-03-01 19:48:47.493","130","7.609","128","8392704" +"2020-03-01 19:48:48.493","130","7.61","128","8392704" +"2020-03-01 19:48:49.493","130","7.608","128","8392704" +"2020-03-01 19:48:50.493","130","7.605","128","8392704" +"2020-03-01 19:48:51.493","130","7.605","128","8392704" +"2020-03-01 19:48:52.493","130","7.607","128","8392704" +"2020-03-01 19:48:53.493","130","7.608","128","8392704" +"2020-03-01 19:48:54.493","130","7.608","128","8392704" +"2020-03-01 19:48:55.493","130","7.608","128","8392704" +"2020-03-01 19:48:56.493","130","7.61","128","8392704" +"2020-03-01 19:48:57.493","130","7.613","128","8392704" +"2020-03-01 19:48:58.493","130","7.612","128","8392704" +"2020-03-01 19:48:59.493","130","7.61","128","8392704" +"2020-03-01 19:49:00.493","130","7.609","128","8392704" +"2020-03-01 19:49:01.493","130","7.61","128","8392704" +"2020-03-01 19:49:02.493","130","7.611","128","8392704" +"2020-03-01 19:49:03.493","130","7.61","128","8392704" +"2020-03-01 19:49:04.493","130","7.61","128","8392704" +"2020-03-01 19:49:05.493","130","7.613","128","8392704" +"2020-03-01 19:49:06.493","130","7.615","128","8392704" +"2020-03-01 19:49:07.493","130","7.614","128","8392704" +"2020-03-01 19:49:08.493","130","7.613","128","8392704" +"2020-03-01 19:49:09.493","130","7.613","128","8392704" +"2020-03-01 19:49:10.493","130","7.615","128","8392704" +"2020-03-01 19:49:11.493","130","7.619","128","8392704" +"2020-03-01 19:49:12.493","130","7.62","128","8392704" +"2020-03-01 19:49:13.493","130","7.618","128","8392704" +"2020-03-01 19:49:14.493","130","7.619","128","8392704" +"2020-03-01 19:49:15.493","130","7.618","128","8392704" +"2020-03-01 19:49:16.493","130","7.617","128","8392704" +"2020-03-01 19:49:17.493","130","7.617","128","8392704" +"2020-03-01 19:49:18.493","130","7.618","128","8392704" +"2020-03-01 19:49:19.493","130","7.617","128","8392704" +"2020-03-01 19:49:20.493","130","7.616","128","8392704" +"2020-03-01 19:49:21.493","130","7.615","128","8392704" +"2020-03-01 19:49:22.493","130","7.616","128","8392704" +"2020-03-01 19:49:23.493","130","7.617","128","8392704" +"2020-03-01 19:49:24.493","130","7.615","128","8392704" +"2020-03-01 19:49:25.493","130","7.613","128","8392704" +"2020-03-01 19:49:26.493","130","7.612","128","8392704" +"2020-03-01 19:49:27.493","130","7.613","128","8392704" +"2020-03-01 19:49:28.493","130","7.614","128","8392704" +"2020-03-01 19:49:29.493","130","7.612","128","8392704" +"2020-03-01 19:49:30.493","130","7.611","128","8392704" +"2020-03-01 19:49:31.493","130","7.611","128","8392704" +"2020-03-01 19:49:32.493","130","7.612","128","8392704" +"2020-03-01 19:49:33.493","130","7.613","128","8392704" +"2020-03-01 19:49:34.493","130","7.614","128","8392704" +"2020-03-01 19:49:35.493","130","7.612","128","8392704" +"2020-03-01 19:49:36.493","130","7.607","128","8392704" +"2020-03-01 19:49:37.493","130","7.603","128","8392704" +"2020-03-01 19:49:38.493","130","7.599","128","8392704" +"2020-03-01 19:49:39.493","130","7.599","128","8392704" +"2020-03-01 19:49:40.493","130","7.599","128","8392704" +"2020-03-01 19:49:41.493","130","7.599","128","8392704" +"2020-03-01 19:49:42.493","130","7.601","128","8392704" +"2020-03-01 19:49:43.493","130","7.605","128","8392704" +"2020-03-01 19:49:44.493","130","7.606","128","8392704" +"2020-03-01 19:49:45.493","130","7.606","128","8392704" +"2020-03-01 19:49:46.493","130","7.606","128","8392704" +"2020-03-01 19:49:47.493","130","7.604","128","8392704" +"2020-03-01 19:49:48.493","130","7.604","128","8392704" +"2020-03-01 19:49:49.493","130","7.603","128","8392704" +"2020-03-01 19:49:50.493","130","7.604","128","8392704" +"2020-03-01 19:49:51.493","130","7.608","128","8392704" +"2020-03-01 19:49:52.493","130","7.614","128","8392704" +"2020-03-01 19:49:53.493","130","7.618","128","8392704" +"2020-03-01 19:49:54.493","130","7.621","128","8392704" +"2020-03-01 19:49:55.493","130","7.623","128","8392704" +"2020-03-01 19:49:56.493","130","7.623","128","8392704" +"2020-03-01 19:49:57.493","130","7.624","128","8392704" +"2020-03-01 19:49:58.493","130","7.626","128","8392704" +"2020-03-01 19:49:59.493","130","7.628","128","8392704" +"2020-03-01 19:50:00.493","130","7.627","128","8392704" +"2020-03-01 19:50:01.493","130","7.625","128","8392704" +"2020-03-01 19:50:02.493","130","7.627","128","8392704" +"2020-03-01 19:50:03.493","130","7.63","128","8392704" +"2020-03-01 19:50:04.493","130","7.633","128","8392704" +"2020-03-01 19:50:05.493","130","7.635","128","8392704" +"2020-03-01 19:50:06.493","130","7.634","128","8392704" +"2020-03-01 19:50:07.493","130","7.632","128","8392704" +"2020-03-01 19:50:08.493","130","7.628","128","8392704" +"2020-03-01 19:50:09.493","130","7.625","128","8392704" +"2020-03-01 19:50:10.493","130","7.625","128","8392704" +"2020-03-01 19:50:11.493","130","7.623","128","8392704" +"2020-03-01 19:50:12.493","130","7.623","128","8392704" +"2020-03-01 19:50:13.493","130","7.623","128","8392704" +"2020-03-01 19:50:14.493","130","7.622","128","8392704" +"2020-03-01 19:50:15.493","130","7.621","128","8392704" +"2020-03-01 19:50:16.493","130","7.618","128","8392704" +"2020-03-01 19:50:17.493","130","7.618","128","8392704" +"2020-03-01 19:50:18.493","130","7.617","128","8392704" +"2020-03-01 19:50:19.493","130","7.616","128","8392704" +"2020-03-01 19:50:20.493","130","7.615","128","8392704" +"2020-03-01 19:50:21.493","130","7.615","128","8392704" +"2020-03-01 19:50:22.493","130","7.616","128","8392704" +"2020-03-01 19:50:23.493","130","7.619","128","8392704" +"2020-03-01 19:50:24.493","130","7.622","128","8392704" +"2020-03-01 19:50:25.493","130","7.624","128","8392704" +"2020-03-01 19:50:26.493","130","7.627","128","8392704" +"2020-03-01 19:50:27.493","130","7.627","128","8392704" +"2020-03-01 19:50:28.493","130","7.625","128","8392704" +"2020-03-01 19:50:29.493","130","7.625","128","8392704" +"2020-03-01 19:50:30.493","130","7.625","128","8392704" +"2020-03-01 19:50:31.493","130","7.624","128","8392704" +"2020-03-01 19:50:32.493","130","7.624","128","8392704" +"2020-03-01 19:50:33.493","130","7.624","128","8392704" +"2020-03-01 19:50:34.493","130","7.626","128","8392704" +"2020-03-01 19:50:35.493","130","7.627","128","8392704" +"2020-03-01 19:50:36.493","130","7.627","128","8392704" +"2020-03-01 19:50:37.493","130","7.626","128","8392704" +"2020-03-01 19:50:38.493","130","7.623","128","8392704" +"2020-03-01 19:50:39.493","130","7.619","128","8392704" +"2020-03-01 19:50:40.493","130","7.616","128","8392704" +"2020-03-01 19:50:41.493","130","7.616","128","8392704" +"2020-03-01 19:50:42.493","130","7.615","128","8392704" +"2020-03-01 19:50:43.493","130","7.613","128","8392704" +"2020-03-01 19:50:44.493","130","7.614","128","8392704" +"2020-03-01 19:50:45.493","130","7.614","128","8392704" +"2020-03-01 19:50:46.493","130","7.612","128","8392704" +"2020-03-01 19:50:47.493","130","7.611","128","8392704" +"2020-03-01 19:50:48.493","130","7.611","128","8392704" +"2020-03-01 19:50:49.493","130","7.611","128","8392704" +"2020-03-01 19:50:50.493","130","7.612","128","8392704" +"2020-03-01 19:50:51.493","130","7.613","128","8392704" +"2020-03-01 19:50:52.493","130","7.613","128","8392704" +"2020-03-01 19:50:53.493","130","7.615","128","8392704" +"2020-03-01 19:50:54.493","130","7.617","128","8392704" +"2020-03-01 19:50:55.493","130","7.617","128","8392704" +"2020-03-01 19:50:56.493","130","7.619","128","8392704" +"2020-03-01 19:50:57.493","130","7.622","128","8392704" +"2020-03-01 19:50:58.493","130","7.624","128","8392704" +"2020-03-01 19:50:59.493","130","7.625","128","8392704" +"2020-03-01 19:51:00.493","130","7.624","128","8392704" +"2020-03-01 19:51:01.493","130","7.624","128","8392704" +"2020-03-01 19:51:02.493","130","7.622","128","8392704" +"2020-03-01 19:51:03.493","130","7.62","128","8392704" +"2020-03-01 19:51:04.493","130","7.617","128","8392704" +"2020-03-01 19:51:05.493","130","7.617","128","8392704" +"2020-03-01 19:51:06.493","130","7.618","128","8392704" +"2020-03-01 19:51:07.493","130","7.618","128","8392704" +"2020-03-01 19:51:08.493","130","7.618","128","8392704" +"2020-03-01 19:51:09.493","130","7.62","128","8392704" +"2020-03-01 19:51:10.493","130","7.622","128","8392704" +"2020-03-01 19:51:11.493","130","7.623","128","8392704" +"2020-03-01 19:51:12.493","130","7.624","128","8392704" +"2020-03-01 19:51:13.493","130","7.625","128","8392704" +"2020-03-01 19:51:14.493","130","7.626","128","8392704" +"2020-03-01 19:51:15.493","130","7.626","128","8392704" +"2020-03-01 19:51:16.493","130","7.626","128","8392704" +"2020-03-01 19:51:17.493","130","7.627","128","8392704" +"2020-03-01 19:51:18.493","130","7.627","128","8392704" +"2020-03-01 19:51:19.493","130","7.629","128","8392704" +"2020-03-01 19:51:20.493","130","7.629","128","8392704" +"2020-03-01 19:51:21.493","130","7.626","128","8392704" +"2020-03-01 19:51:22.493","130","7.625","128","8392704" +"2020-03-01 19:51:23.493","130","7.625","128","8392704" +"2020-03-01 19:51:24.493","130","7.626","128","8392704" +"2020-03-01 19:51:25.493","130","7.626","128","8392704" +"2020-03-01 19:51:26.493","130","7.624","128","8392704" +"2020-03-01 19:51:27.493","130","7.623","128","8392704" +"2020-03-01 19:51:28.493","130","7.624","128","8392704" +"2020-03-01 19:51:29.493","130","7.624","128","8392704" +"2020-03-01 19:51:30.493","130","7.624","128","8392704" +"2020-03-01 19:51:31.493","130","7.624","128","8392704" +"2020-03-01 19:51:32.493","130","7.626","128","8392704" +"2020-03-01 19:51:33.493","130","7.626","128","8392704" +"2020-03-01 19:51:34.493","130","7.626","128","8392704" +"2020-03-01 19:51:35.493","130","7.625","128","8392704" +"2020-03-01 19:51:36.493","130","7.624","128","8392704" +"2020-03-01 19:51:37.493","130","7.623","128","8392704" +"2020-03-01 19:51:38.493","130","7.622","128","8392704" +"2020-03-01 19:51:39.493","130","7.62","128","8392704" +"2020-03-01 19:51:40.493","130","7.62","128","8392704" +"2020-03-01 19:51:41.493","130","7.62","128","8392704" +"2020-03-01 19:51:42.493","130","7.621","128","8392704" +"2020-03-01 19:51:43.493","130","7.62","128","8392704" +"2020-03-01 19:51:44.493","130","7.619","128","8392704" +"2020-03-01 19:51:45.493","130","7.62","128","8392704" +"2020-03-01 19:51:46.493","130","7.62","128","8392704" +"2020-03-01 19:51:47.493","130","7.618","128","8392704" +"2020-03-01 19:51:48.493","130","7.619","128","8392704" +"2020-03-01 19:51:49.493","130","7.62","128","8392704" +"2020-03-01 19:51:50.493","130","7.622","128","8392704" +"2020-03-01 19:51:51.493","130","7.622","128","8392704" +"2020-03-01 19:51:52.493","130","7.62","128","8392704" +"2020-03-01 19:51:53.493","130","7.62","128","8392704" +"2020-03-01 19:51:54.493","130","7.622","128","8392704" +"2020-03-01 19:51:55.493","130","7.624","128","8392704" +"2020-03-01 19:51:56.493","130","7.622","128","8392704" +"2020-03-01 19:51:57.493","130","7.616","128","8392704" +"2020-03-01 19:51:58.493","130","7.611","128","8392704" +"2020-03-01 19:51:59.493","130","7.61","128","8392704" +"2020-03-01 19:52:00.493","130","7.608","128","8392704" +"2020-03-01 19:52:01.493","130","7.606","128","8392704" +"2020-03-01 19:52:02.493","130","7.607","128","8392704" +"2020-03-01 19:52:03.493","130","7.608","128","8392704" +"2020-03-01 19:52:04.493","130","7.61","128","8392704" +"2020-03-01 19:52:05.493","130","7.612","128","8392704" +"2020-03-01 19:52:06.493","130","7.615","128","8392704" +"2020-03-01 19:52:07.493","130","7.62","128","8392704" +"2020-03-01 19:52:08.493","130","7.623","128","8392704" +"2020-03-01 19:52:09.493","130","7.624","128","8392704" +"2020-03-01 19:52:10.493","130","7.623","128","8392704" +"2020-03-01 19:52:11.493","130","7.623","128","8392704" +"2020-03-01 19:52:12.493","130","7.624","128","8392704" +"2020-03-01 19:52:13.493","130","7.622","128","8392704" +"2020-03-01 19:52:14.493","130","7.62","128","8392704" +"2020-03-01 19:52:15.493","130","7.621","128","8392704" +"2020-03-01 19:52:16.493","130","7.62","128","8392704" +"2020-03-01 19:52:17.493","130","7.622","128","8392704" +"2020-03-01 19:52:18.493","130","7.625","128","8392704" +"2020-03-01 19:52:19.493","130","7.627","128","8392704" +"2020-03-01 19:52:20.493","130","7.625","128","8392704" +"2020-03-01 19:52:21.493","130","7.621","128","8392704" +"2020-03-01 19:52:22.493","130","7.617","128","8392704" +"2020-03-01 19:52:23.493","130","7.617","128","8392704" +"2020-03-01 19:52:24.493","130","7.617","128","8392704" +"2020-03-01 19:52:25.493","130","7.616","128","8392704" +"2020-03-01 19:52:26.493","130","7.615","128","8392704" +"2020-03-01 19:52:27.493","130","7.616","128","8392704" +"2020-03-01 19:52:28.493","130","7.619","128","8392704" +"2020-03-01 19:52:29.493","130","7.621","128","8392704" +"2020-03-01 19:52:30.493","130","7.621","128","8392704" +"2020-03-01 19:52:31.493","130","7.621","128","8392704" +"2020-03-01 19:52:32.493","130","7.621","128","8392704" +"2020-03-01 19:52:33.493","130","7.621","128","8392704" +"2020-03-01 19:52:34.493","130","7.623","128","8392704" +"2020-03-01 19:52:35.493","130","7.621","128","8392704" +"2020-03-01 19:52:36.493","130","7.617","128","8392704" +"2020-03-01 19:52:37.493","130","7.615","128","8392704" +"2020-03-01 19:52:38.493","130","7.612","128","8392704" +"2020-03-01 19:52:39.493","130","7.609","128","8392704" +"2020-03-01 19:52:40.493","130","7.606","128","8392704" +"2020-03-01 19:52:41.493","130","7.606","128","8392704" +"2020-03-01 19:52:42.493","130","7.609","128","8392704" +"2020-03-01 19:52:43.493","130","7.612","128","8392704" +"2020-03-01 19:52:44.493","130","7.616","128","8392704" +"2020-03-01 19:52:45.493","130","7.619","128","8392704" +"2020-03-01 19:52:46.493","130","7.62","128","8392704" +"2020-03-01 19:52:47.493","130","7.622","128","8392704" +"2020-03-01 19:52:48.493","130","7.622","128","8392704" +"2020-03-01 19:52:49.493","130","7.621","128","8392704" +"2020-03-01 19:52:50.493","130","7.618","128","8392704" +"2020-03-01 19:52:51.493","130","7.616","128","8392704" +"2020-03-01 19:52:52.493","130","7.613","128","8392704" +"2020-03-01 19:52:53.493","130","7.612","128","8392704" +"2020-03-01 19:52:54.493","130","7.612","128","8392704" +"2020-03-01 19:52:55.493","130","7.611","128","8392704" +"2020-03-01 19:52:56.493","130","7.609","128","8392704" +"2020-03-01 19:52:57.493","130","7.608","128","8392704" +"2020-03-01 19:52:58.493","130","7.609","128","8392704" +"2020-03-01 19:52:59.493","130","7.611","128","8392704" +"2020-03-01 19:53:00.493","130","7.612","128","8392704" +"2020-03-01 19:53:01.493","130","7.614","128","8392704" +"2020-03-01 19:53:02.493","130","7.618","128","8392704" +"2020-03-01 19:53:03.493","130","7.62","128","8392704" +"2020-03-01 19:53:04.493","130","7.62","128","8392704" +"2020-03-01 19:53:05.493","130","7.62","128","8392704" +"2020-03-01 19:53:06.493","130","7.619","128","8392704" +"2020-03-01 19:53:07.493","130","7.617","128","8392704" +"2020-03-01 19:53:08.493","130","7.615","128","8392704" +"2020-03-01 19:53:09.493","130","7.612","128","8392704" +"2020-03-01 19:53:10.493","130","7.61","128","8392704" +"2020-03-01 19:53:11.493","130","7.609","128","8392704" +"2020-03-01 19:53:12.493","130","7.608","128","8392704" +"2020-03-01 19:53:13.493","130","7.61","128","8392704" +"2020-03-01 19:53:14.493","130","7.611","128","8392704" +"2020-03-01 19:53:15.493","130","7.609","128","8392704" +"2020-03-01 19:53:16.493","130","7.608","128","8392704" +"2020-03-01 19:53:17.493","130","7.608","128","8392704" +"2020-03-01 19:53:18.493","130","7.607","128","8392704" +"2020-03-01 19:53:19.493","130","7.607","128","8392704" +"2020-03-01 19:53:20.493","130","7.605","128","8392704" +"2020-03-01 19:53:21.493","130","7.603","128","8392704" +"2020-03-01 19:53:22.493","130","7.606","128","8392704" +"2020-03-01 19:53:23.493","130","7.611","128","8392704" +"2020-03-01 19:53:24.493","130","7.615","128","8392704" +"2020-03-01 19:53:25.493","130","7.618","128","8392704" +"2020-03-01 19:53:26.493","130","7.62","128","8392704" +"2020-03-01 19:53:27.493","130","7.622","128","8392704" +"2020-03-01 19:53:28.493","130","7.624","128","8392704" +"2020-03-01 19:53:29.493","130","7.626","128","8392704" +"2020-03-01 19:53:30.493","130","7.624","128","8392704" +"2020-03-01 19:53:31.493","130","7.617","128","8392704" +"2020-03-01 19:53:32.493","130","7.613","128","8392704" +"2020-03-01 19:53:33.493","130","7.613","128","8392704" +"2020-03-01 19:53:34.493","130","7.613","128","8392704" +"2020-03-01 19:53:35.493","130","7.61","128","8392704" +"2020-03-01 19:53:36.493","130","7.609","128","8392704" +"2020-03-01 19:53:37.493","130","7.611","128","8392704" +"2020-03-01 19:53:38.493","130","7.61","128","8392704" +"2020-03-01 19:53:39.493","130","7.609","128","8392704" +"2020-03-01 19:53:40.493","130","7.608","128","8392704" +"2020-03-01 19:53:41.493","130","7.605","128","8392704" +"2020-03-01 19:53:42.493","130","7.601","128","8392704" +"2020-03-01 19:53:43.493","130","7.6","128","8392704" +"2020-03-01 19:53:44.493","130","7.602","128","8392704" +"2020-03-01 19:53:45.493","130","7.604","128","8392704" +"2020-03-01 19:53:46.493","130","7.605","128","8392704" +"2020-03-01 19:53:47.493","130","7.606","128","8392704" +"2020-03-01 19:53:48.493","130","7.605","128","8392704" +"2020-03-01 19:53:49.493","130","7.605","128","8392704" +"2020-03-01 19:53:50.493","130","7.606","128","8392704" +"2020-03-01 19:53:51.493","130","7.606","128","8392704" +"2020-03-01 19:53:52.493","130","7.604","128","8392704" +"2020-03-01 19:53:53.493","130","7.606","128","8392704" +"2020-03-01 19:53:54.493","130","7.61","128","8392704" +"2020-03-01 19:53:55.493","130","7.612","128","8392704" +"2020-03-01 19:53:56.493","130","7.613","128","8392704" +"2020-03-01 19:53:57.493","130","7.613","128","8392704" +"2020-03-01 19:53:58.493","130","7.613","128","8392704" +"2020-03-01 19:53:59.493","130","7.615","128","8392704" +"2020-03-01 19:54:00.493","130","7.616","128","8392704" +"2020-03-01 19:54:01.493","130","7.616","128","8392704" +"2020-03-01 19:54:02.493","130","7.618","128","8392704" +"2020-03-01 19:54:03.493","130","7.621","128","8392704" +"2020-03-01 19:54:04.493","130","7.623","128","8392704" +"2020-03-01 19:54:05.493","130","7.623","128","8392704" +"2020-03-01 19:54:06.493","130","7.624","128","8392704" +"2020-03-01 19:54:07.493","130","7.624","128","8392704" +"2020-03-01 19:54:08.493","130","7.622","128","8392704" +"2020-03-01 19:54:09.493","130","7.62","128","8392704" +"2020-03-01 19:54:10.493","130","7.621","128","8392704" +"2020-03-01 19:54:11.493","130","7.62","128","8392704" +"2020-03-01 19:54:12.493","130","7.617","128","8392704" +"2020-03-01 19:54:13.493","130","7.617","128","8392704" +"2020-03-01 19:54:14.493","130","7.618","128","8392704" +"2020-03-01 19:54:15.493","130","7.617","128","8392704" +"2020-03-01 19:54:16.493","130","7.615","128","8392704" +"2020-03-01 19:54:17.493","130","7.613","128","8392704" +"2020-03-01 19:54:18.493","130","7.612","128","8392704" +"2020-03-01 19:54:19.493","130","7.609","128","8392704" +"2020-03-01 19:54:20.493","130","7.605","128","8392704" +"2020-03-01 19:54:21.493","130","7.604","128","8392704" +"2020-03-01 19:54:22.493","130","7.605","128","8392704" +"2020-03-01 19:54:23.493","130","7.608","128","8392704" +"2020-03-01 19:54:24.493","130","7.611","128","8392704" +"2020-03-01 19:54:25.493","130","7.613","128","8392704" +"2020-03-01 19:54:26.493","130","7.613","128","8392704" +"2020-03-01 19:54:27.493","130","7.611","128","8392704" +"2020-03-01 19:54:28.493","130","7.611","128","8392704" +"2020-03-01 19:54:29.493","130","7.611","128","8392704" +"2020-03-01 19:54:30.493","130","7.612","128","8392704" +"2020-03-01 19:54:31.493","130","7.614","128","8392704" +"2020-03-01 19:54:32.493","130","7.61","128","8392704" +"2020-03-01 19:54:33.493","130","7.603","128","8392704" +"2020-03-01 19:54:34.493","130","7.598","128","8392704" +"2020-03-01 19:54:35.493","130","7.594","128","8392704" +"2020-03-01 19:54:36.493","130","7.591","128","8392704" +"2020-03-01 19:54:37.493","130","7.591","128","8392704" +"2020-03-01 19:54:38.493","130","7.59","128","8392704" +"2020-03-01 19:54:39.493","130","7.588","128","8392704" +"2020-03-01 19:54:40.493","130","7.593","128","8392704" +"2020-03-01 19:54:41.493","130","7.599","128","8392704" +"2020-03-01 19:54:42.493","130","7.602","128","8392704" +"2020-03-01 19:54:43.493","130","7.604","128","8392704" +"2020-03-01 19:54:44.493","130","7.606","128","8392704" +"2020-03-01 19:54:45.493","130","7.609","128","8392704" +"2020-03-01 19:54:46.493","130","7.612","128","8392704" +"2020-03-01 19:54:47.493","130","7.614","128","8392704" +"2020-03-01 19:54:48.493","130","7.616","128","8392704" +"2020-03-01 19:54:49.493","130","7.617","128","8392704" +"2020-03-01 19:54:50.493","130","7.619","128","8392704" +"2020-03-01 19:54:51.493","130","7.623","128","8392704" +"2020-03-01 19:54:52.493","130","7.626","128","8392704" +"2020-03-01 19:54:53.493","130","7.626","128","8392704" +"2020-03-01 19:54:54.493","130","7.624","128","8392704" +"2020-03-01 19:54:55.493","130","7.623","128","8392704" +"2020-03-01 19:54:56.493","130","7.618","128","8392704" +"2020-03-01 19:54:57.493","130","7.613","128","8392704" +"2020-03-01 19:54:58.493","130","7.61","128","8392704" +"2020-03-01 19:54:59.493","130","7.605","128","8392704" +"2020-03-01 19:55:00.493","130","7.604","128","8392704" +"2020-03-01 19:55:01.493","130","7.603","128","8392704" +"2020-03-01 19:55:02.493","130","7.602","128","8392704" +"2020-03-01 19:55:03.493","130","7.602","128","8392704" +"2020-03-01 19:55:04.493","130","7.602","128","8392704" +"2020-03-01 19:55:05.493","130","7.603","128","8392704" +"2020-03-01 19:55:06.493","130","7.608","128","8392704" +"2020-03-01 19:55:07.493","130","7.609","128","8392704" +"2020-03-01 19:55:08.493","130","7.608","128","8392704" +"2020-03-01 19:55:09.493","130","7.609","128","8392704" \ No newline at end of file diff --git a/tests/script/general/db/nosuchfile.sim b/tests/script/general/db/nosuchfile.sim new file mode 100644 index 0000000000000000000000000000000000000000..98ac4ec012dc694357878a61ca0dbc11259f0a9e --- /dev/null +++ b/tests/script/general/db/nosuchfile.sim @@ -0,0 +1,66 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect +sleep 3000 + +print ========== step3 +sql create database d1 +sql create table d1.t1 (t timestamp, i int) +sql insert into d1.t1 values(now+1s, 35) +sql insert into d1.t1 values(now+2s, 34) +sql insert into d1.t1 values(now+3s, 33) +sql insert into d1.t1 values(now+4s, 32) +sql insert into d1.t1 values(now+5s, 31) + +print ========== step4 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +sleep 3000 + +print ========== step5 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != 31 then + return -1 +endi +if $data11 != 32 then + return -1 +endi +if $data21 != 33 then + return -1 +endi +if $data31 != 34 then + return -1 +endi +if $data41 != 35 then + return -1 +endi + +print ========== step6 +system_content rm -rf ../../../sim/dnode1/data/vnode/vnode2/tsdb/data + +print ========== step7 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != null then + return -1 +endi +if $data11 != null then + return -1 +endi +if $data21 != null then + return -1 +endi +if $data31 != null then + return -1 +endi +if $data41 != null then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/insert/insert_drop.sim b/tests/script/general/insert/insert_drop.sim index 80c16ff8e4c0de45bbc6e43991a96b75b6c924d3..9b68e5a6a6c7e77fc0e591e4950500369a88a650 100644 --- a/tests/script/general/insert/insert_drop.sim +++ b/tests/script/general/insert/insert_drop.sim @@ -52,10 +52,10 @@ sleep 1000 sql use $db sql drop table tb5 - +$i = 0 while $i < 4 - $tbId = $i + $halfNum - $tb = $tbPrefix . $i + + $tb = tb . $i $x = 0 while $x < $rowNum $xs = $x * $delta diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim index 132ecf43428cbb5d12abb1a991136f1cbb335a98..3208df95e446f5f06494617fa33a71a5f09ab828 100644 --- a/tests/script/general/parser/col_arithmetic_operation.sim +++ b/tests/script/general/parser/col_arithmetic_operation.sim @@ -91,4 +91,60 @@ endi sql_error select max(c2*2) from $tb sql_error select max(c1-c2) from $tb +print =====================> td-1764 +sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y) +if $rows != 1 then + return -1 +endi + +if $data00 != @18-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != 2.250000000 then + return -1 +endi + +if $data02 != 225000 then + return -1 +endi + +sql select first(c1) - last(c1), first(c1) as b, last(c1) as b, min(c1) - max(c1), spread(c1) from ca_stb0 interval(1y) +if $rows != 1 then + return -1 +endi + +if $data00 != @18-01-01 00:00:00.000@ then + return -1 +endi + +if $data01 != -9.000000000 then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data03 != 9 then + return -1 +endi + +if $data04 != -9.000000000 then + return -1 +endi + +if $data05 != 9.000000000 then + return -1 +endi + +sql_error select first(c1, c2) - last(c1, c2) from stb interval(1y) +sql_error select first(ts) - last(ts) from stb interval(1y) +sql_error select top(c1, 2) - last(c1) from stb; +sql_error select stddev(c1) - last(c1) from stb; +sql_error select diff(c1) - last(c1) from stb; +sql_error select first(c7) - last(c7) from stb; +sql_error select first(c8) - last(c8) from stb; +sql_error select first(c9) - last(c9) from stb; + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim index 13b4455779933e65d53f2556eb7b4946578c074f..7ae496f1ac60ca7c4eb2b143f4d906c0c18e3726 100644 --- a/tests/script/general/parser/constCol.sim +++ b/tests/script/general/parser/constCol.sim @@ -353,4 +353,36 @@ sql_error select from t1 sql_error select abc from t1 sql_error select abc as tu from t1 +print ========================> td-1756 +sql_error select * from t1 where ts>now-1y +sql_error select * from t1 where ts>now-1n + +print ========================> td-1752 +sql select * from db.st2 where t2 < 200 and t2 is not null; +if $rows != 1 then + return -1 +endi + +if $data00 != @19-12-09 16:27:35.000@ then + return -1 +endi + +if $data01 != 2 then + return -1 +endi + +if $data02 != 1 then + return -1 +endi + +sql select * from db.st2 where t2 > 200 or t2 is null; +if $rows != 0 then + return -1 +endi + +sql select * from st2 where t2 < 200 and t2 is null; +if $rows != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index bd0d3c1a12c77570c19ea1ef061395912ad9f93a..b70fe88e815eec020b657f00a972eba74c6e2976 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -27,7 +27,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim index 254571bda103957fbaeaf0e311e7be03b4dcfc35..79b30ffe922af1e80ab242c4c95c835c9a4a790f 100644 --- a/tests/script/general/parser/join.sim +++ b/tests/script/general/parser/join.sim @@ -24,7 +24,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index 51f1ef11c7fc9f8cfff60ebe86ad00104266e7ad..5968a9cd5e5ece55f79da8c323a2ed36f0dc4426 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -22,7 +22,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim index fbff99d58f5b6355863b90172c2fb14c1f2ba393..127ade66c52987cdddf28fd79bcdab0ebc7bae09 100644 --- a/tests/script/general/parser/projection_limit_offset.sim +++ b/tests/script/general/parser/projection_limit_offset.sim @@ -21,7 +21,7 @@ $mt = $mtPrefix . $i $tstart = 100000 -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db keep 36500 sql use $db diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim index f85211beb83e575e2a73518d89f4b7d989486f83..ec0e31311afe1a08644aa28515071bced71ae0f0 100644 --- a/tests/script/general/parser/sliding.sim +++ b/tests/script/general/parser/sliding.sim @@ -26,7 +26,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db maxtables 4 keep 36500 sql use $db diff --git a/tests/script/general/parser/tags_dynamically_specifiy.sim b/tests/script/general/parser/tags_dynamically_specifiy.sim index 0a5d5c971649abae07d149f1cc914eb096e638fa..07bf4d8dd1cae6b0b374b9b67b1dca58f218fb3f 100644 --- a/tests/script/general/parser/tags_dynamically_specifiy.sim +++ b/tests/script/general/parser/tags_dynamically_specifiy.sim @@ -96,5 +96,10 @@ if $rows != 14 then return -1 endi +print ===============================> td-1765 +sql create table m1(ts timestamp, k int) tags(a binary(4), b nchar(4)); +sql create table tm0 using m1 tags('abcd', 'abcd'); +sql_error alter table tm0 set tag b = 'abcd1'; +sql_error alter table tm0 set tag a = 'abcd1'; system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 3dd80b8e38c8144fee218ec372e59fe262ac7a15..b8484089250cbf0b8dd61e48ddb1e044878cb56e 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,51 +1,51 @@ -sleep 2000 -run general/parser/alter.sim -sleep 2000 -run general/parser/alter1.sim -sleep 2000 -run general/parser/alter_stable.sim -sleep 2000 -run general/parser/auto_create_tb.sim -sleep 2000 -run general/parser/auto_create_tb_drop_tb.sim -sleep 2000 -run general/parser/col_arithmetic_operation.sim -sleep 2000 -run general/parser/columnValue.sim -sleep 2000 -run general/parser/commit.sim -sleep 2000 -run general/parser/create_db.sim -sleep 2000 -run general/parser/create_mt.sim -sleep 2000 -run general/parser/create_tb.sim -sleep 2000 -run general/parser/dbtbnameValidate.sim -sleep 2000 -run general/parser/fill.sim -sleep 2000 -run general/parser/fill_stb.sim -sleep 2000 -#run general/parser/fill_us.sim # -sleep 2000 -run general/parser/first_last.sim -sleep 2000 -run general/parser/import_commit1.sim -sleep 2000 -run general/parser/import_commit2.sim -sleep 2000 -run general/parser/import_commit3.sim -sleep 2000 -#run general/parser/import_file.sim -sleep 2000 -run general/parser/insert_tb.sim -sleep 2000 -run general/parser/tags_dynamically_specifiy.sim -sleep 2000 -run general/parser/interp.sim -sleep 2000 -run general/parser/lastrow.sim +#sleep 2000 +#run general/parser/alter.sim +#sleep 2000 +#run general/parser/alter1.sim +#sleep 2000 +#run general/parser/alter_stable.sim +#sleep 2000 +#run general/parser/auto_create_tb.sim +#sleep 2000 +#run general/parser/auto_create_tb_drop_tb.sim +#sleep 2000 +#run general/parser/col_arithmetic_operation.sim +#sleep 2000 +#run general/parser/columnValue.sim +#sleep 2000 +#run general/parser/commit.sim +#sleep 2000 +#run general/parser/create_db.sim +#sleep 2000 +#run general/parser/create_mt.sim +#sleep 2000 +#run general/parser/create_tb.sim +#sleep 2000 +#run general/parser/dbtbnameValidate.sim +#sleep 2000 +#run general/parser/fill.sim +#sleep 2000 +#run general/parser/fill_stb.sim +#sleep 2000 +##run general/parser/fill_us.sim # +#sleep 2000 +#run general/parser/first_last.sim +#sleep 2000 +#run general/parser/import_commit1.sim +#sleep 2000 +#run general/parser/import_commit2.sim +#sleep 2000 +#run general/parser/import_commit3.sim +#sleep 2000 +##run general/parser/import_file.sim +#sleep 2000 +#run general/parser/insert_tb.sim +#sleep 2000 +#run general/parser/tags_dynamically_specifiy.sim +#sleep 2000 +#run general/parser/interp.sim +#sleep 2000 +#run general/parser/lastrow.sim sleep 2000 run general/parser/limit.sim sleep 2000 diff --git a/tests/script/general/parser/union.sim b/tests/script/general/parser/union.sim index 9e178537a291a3ccd4ab93b9ae03a25e7e619faa..024b9c76efe6b8ddd69c723ffc0e1e0b0d82e075 100644 --- a/tests/script/general/parser/union.sim +++ b/tests/script/general/parser/union.sim @@ -27,7 +27,7 @@ $j = 1 $mt1 = $mtPrefix . $j -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db sql use $db @@ -251,7 +251,7 @@ if $rows != 15 then endi # first subclause are empty -sql select count(*) as c from union_tb0 where ts>now+10y union all select sum(c1) as c from union_tb1; +sql select count(*) as c from union_tb0 where ts > now + 3650d union all select sum(c1) as c from union_tb1; if $rows != 1 then return -1 endi @@ -346,7 +346,7 @@ if $data91 != 99 then return -1 endi -#1111111111111111111111111111111111111111111111111 +#================================================================================================= # two aggregated functions for normal tables sql select sum(c1) as a from union_tb0 limit 1 union all select sum(c3) as a from union_tb1 limit 2; if $rows != 2 then diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 5cac3f47235c1321d740e373d0f5828f076c31c7..066fac43ad9ee617a2cb9a91a62a6ebf984be6f0 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -20,7 +20,7 @@ $i = 0 $db = $dbPrefix . $i $mt = $mtPrefix . $i -sql drop database if exits $db -x step1 +sql drop database if exists $db -x step1 step1: sql create database if not exists $db sql use $db diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 4e68d1566a76468b06115a80aace70871665c9cb..1b2fe37c71f26486ba847006ecf3aa373b5f55c1 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -64,6 +64,7 @@ cd ../../../debug; make ./test.sh -f general/db/repeat.sim ./test.sh -f general/db/tables.sim ./test.sh -f general/db/vnodes.sim +./test.sh -f general/db/nosuchfile.sim ./test.sh -f general/field/2.sim ./test.sh -f general/field/3.sim @@ -277,6 +278,7 @@ cd ../../../debug; make ./test.sh -f unique/dnode/balance2.sim ./test.sh -f unique/dnode/balance3.sim ./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/data1.sim ./test.sh -f unique/dnode/offline1.sim ./test.sh -f unique/dnode/offline2.sim ./test.sh -f unique/dnode/reason.sim diff --git a/tests/script/jenkins/basic_1.txt b/tests/script/jenkins/basic_1.txt index aba2ec945f518b473854eaec23f22fbca51d7192..765e7139163952c43569e596d8ca8cf27d583cf6 100644 --- a/tests/script/jenkins/basic_1.txt +++ b/tests/script/jenkins/basic_1.txt @@ -199,4 +199,7 @@ ./test.sh -f unique/dnode/vnode_clean.sim ./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim \ No newline at end of file +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim diff --git a/tests/script/jenkins/basic_2.txt b/tests/script/jenkins/basic_2.txt index 166c732df79197173827af08ec36495dd11cde47..014313fafe9c7974baa7205cca41492dedee213a 100644 --- a/tests/script/jenkins/basic_2.txt +++ b/tests/script/jenkins/basic_2.txt @@ -81,3 +81,10 @@ cd ../../../debug; make ./test.sh -f unique/db/replica_reduce32.sim ./test.sh -f unique/db/replica_reduce31.sim ./test.sh -f unique/db/replica_part.sim + +./test.sh -f unique/vnode/many.sim +./test.sh -f unique/vnode/replica2_basic2.sim +./test.sh -f unique/vnode/replica2_repeat.sim +./test.sh -f unique/vnode/replica3_basic.sim +./test.sh -f unique/vnode/replica3_repeat.sim +./test.sh -f unique/vnode/replica3_vgroup.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_3.txt b/tests/script/jenkins/basic_3.txt index de5d64b98464d90fafd02c0efe28d444716c8cbd..83b10a371cbc354acc079a6307fe0dbee27e3533 100644 --- a/tests/script/jenkins/basic_3.txt +++ b/tests/script/jenkins/basic_3.txt @@ -1,5 +1,3 @@ -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim ./test.sh -f unique/stable/balance_replica1.sim ./test.sh -f unique/stable/dnode2_stop.sim @@ -21,12 +19,7 @@ ./test.sh -f unique/mnode/mgmt34.sim ./test.sh -f unique/mnode/mgmtr2.sim -./test.sh -f unique/vnode/many.sim -./test.sh -f unique/vnode/replica2_basic2.sim -./test.sh -f unique/vnode/replica2_repeat.sim -./test.sh -f unique/vnode/replica3_basic.sim -./test.sh -f unique/vnode/replica3_repeat.sim -./test.sh -f unique/vnode/replica3_vgroup.sim + ./test.sh -f general/parser/stream_on_sys.sim ./test.sh -f general/stream/metrics_del.sim diff --git a/tests/script/unique/dnode/data1.sim b/tests/script/unique/dnode/data1.sim new file mode 100644 index 0000000000000000000000000000000000000000..61a991148b21b471aef906223a5f600f7db38f5f --- /dev/null +++ b/tests/script/unique/dnode/data1.sim @@ -0,0 +1,137 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/cfg.sh -n dnode2 -c wallevel -v 2 +system sh/cfg.sh -n dnode3 -c wallevel -v 2 +system sh/cfg.sh -n dnode4 -c wallevel -v 2 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect +sleep 3000 + +print ========== step2 +sql create dnode $hostname2 +system sh/exec.sh -n dnode2 -s start +sql create dnode $hostname3 +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname4 +system sh/exec.sh -n dnode4 -s start + +$x = 0 +show2: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then + goto show2 +endi +if $data2_2 != 0 then + goto show2 +endi +if $data2_3 != 0 then + goto show2 +endi +if $data2_4 != 0 then + goto show2 +endi + +print ========== step3 +sql create database d1 replica 3 +sql create table d1.t1 (t timestamp, i int) +sql insert into d1.t1 values(now+1s, 35) +sql insert into d1.t1 values(now+2s, 34) +sql insert into d1.t1 values(now+3s, 33) +sql insert into d1.t1 values(now+4s, 32) +sql insert into d1.t1 values(now+5s, 31) + +$x = 0 +show3: + $x = $x + 1 + sleep 3000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 openVnodes $data2_1 +print dnode2 openVnodes $data2_2 +print dnode3 openVnodes $data2_3 +print dnode4 openVnodes $data2_4 +if $data2_1 != 0 then + goto show3 +endi +if $data2_2 != 1 then + goto show3 +endi +if $data2_3 != 1 then + goto show3 +endi +if $data2_4 != 1 then + goto show3 +endi + +print ========== step4 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + +print ========== step5 +system_content rm -rf ../../../sim/dnode4/data/vnode/vnode2/tsdb/data + +print ========== step6 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +sleep 10000 + +print ========== step7 +sql select * from d1.t1 order by t desc +print $data01 $data11 $data21 $data31 $data41 +if $data01 != 31 then + return -1 +endi +if $data11 != 32 then + return -1 +endi +if $data21 != 33 then + return -1 +endi +if $data31 != 34 then + return -1 +endi +if $data41 != 35 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT +system sh/exec.sh -n dnode6 -s stop -x SIGINT +system sh/exec.sh -n dnode7 -s stop -x SIGINT +system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file