提交 28692a51 编写于 作者: W wpan

Merge branch 'develop' into feature/TD-2581

...@@ -25,15 +25,14 @@ steps: ...@@ -25,15 +25,14 @@ steps:
- master - master
--- ---
kind: pipeline kind: pipeline
name: test_arm64 name: test_arm64_bionic
platform: platform:
os: linux os: linux
arch: arm64 arch: arm64
steps: steps:
- name: build - name: build
image: gcc image: arm64v8/ubuntu:bionic
commands: commands:
- apt-get update - apt-get update
- apt-get install -y cmake build-essential - apt-get install -y cmake build-essential
...@@ -48,9 +47,87 @@ steps: ...@@ -48,9 +47,87 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
---
kind: pipeline
name: test_arm64_focal
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/ubuntu:focal
commands:
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
- apt-get update
- apt-get install -y -qq cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_centos7
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:7
commands:
- yum install -y gcc gcc-c++ make cmake git
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: test_arm name: test_arm64_centos8
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:8
commands:
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm_bionic
platform: platform:
os: linux os: linux
...@@ -73,7 +150,6 @@ steps: ...@@ -73,7 +150,6 @@ steps:
branch: branch:
- develop - develop
- master - master
--- ---
kind: pipeline kind: pipeline
name: build_trusty name: build_trusty
...@@ -174,25 +250,3 @@ steps: ...@@ -174,25 +250,3 @@ steps:
- develop - develop
- master - master
---
kind: pipeline
name: goodbye
platform:
os: linux
arch: amd64
steps:
- name: 64-bit
image: alpine
commands:
- echo 64-bit is good.
when:
branch:
- develop
- master
depends_on:
- test_arm64
- test_amd64
\ No newline at end of file
...@@ -41,6 +41,7 @@ def pre_test(){ ...@@ -41,6 +41,7 @@ def pre_test(){
sh ''' sh '''
killall -9 taosd ||echo "no taosd running" killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running" killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC} cd ${WKC}
git reset --hard HEAD~10 >/dev/null git reset --hard HEAD~10 >/dev/null
''' '''
...@@ -223,12 +224,14 @@ pipeline { ...@@ -223,12 +224,14 @@ pipeline {
steps { steps {
pre_test() pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
timeout(time: 60, unit: 'MINUTES'){
sh ''' sh '''
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000 ./crash_gen.sh -a -p -t 4 -s 2000
''' '''
} }
}
timeout(time: 60, unit: 'MINUTES'){
sh ''' sh '''
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/* rm -rf /var/lib/taos/*
...@@ -241,6 +244,7 @@ pipeline { ...@@ -241,6 +244,7 @@ pipeline {
rm -rf /var/log/taos/* rm -rf /var/log/taos/*
./handle_taosd_val_log.sh ./handle_taosd_val_log.sh
''' '''
}
timeout(time: 45, unit: 'MINUTES'){ timeout(time: 45, unit: 'MINUTES'){
sh ''' sh '''
date date
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app package app
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr package expr
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr package expr
import "testing" import "testing"
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr package expr
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package expr package expr
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app package app
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app package app
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app package app
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main package main
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package models package models
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package models package models
import "time" import "time"
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils package utils
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package log package log
import ( import (
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.1.5.0") SET(TD_VER_NUMBER "2.1.6.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -126,7 +126,7 @@ taos> source <filename>; ...@@ -126,7 +126,7 @@ taos> source <filename>;
$ taosdemo $ taosdemo
``` ```
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
...@@ -150,10 +150,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters; ...@@ -150,10 +150,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters;
taos> select count(*) from test.meters where location="beijing"; taos> select count(*) from test.meters where location="beijing";
``` ```
- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等: - 查询 groupId=10 的所有记录的平均值、最大值、最小值等:
```mysql ```mysql
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10; taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
``` ```
- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: - 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
......
...@@ -33,7 +33,7 @@ USE power; ...@@ -33,7 +33,7 @@ USE power;
一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表: 一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
```mysql ```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
``` ```
**注意:**这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。 **注意:**这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。
......
...@@ -17,7 +17,7 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以 ...@@ -17,7 +17,7 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以
下面以智能电表场景为例介绍连续查询的具体使用方法。假设我们通过下列SQL语句创建了超级表和子表: 下面以智能电表场景为例介绍连续查询的具体使用方法。假设我们通过下列SQL语句创建了超级表和子表:
```sql ```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int); create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
create table D1001 using meters tags ("Beijing.Chaoyang", 2); create table D1001 using meters tags ("Beijing.Chaoyang", 2);
create table D1002 using meters tags ("Beijing.Haidian", 2); create table D1002 using meters tags ("Beijing.Haidian", 2);
... ...
......
...@@ -3,17 +3,17 @@ ...@@ -3,17 +3,17 @@
## <a class="anchor" id="grafana"></a>Grafana ## <a class="anchor" id="grafana"></a>Grafana
TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
### 安装Grafana ### 安装Grafana
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统,到Grafana官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。 目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。
### 配置Grafana ### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。 TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。
CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
......
...@@ -143,7 +143,7 @@ taosd -C ...@@ -143,7 +143,7 @@ taosd -C
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下: TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
- numOfMnodes:系统中管理节点个数。默认值:3。 - numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。)
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。 - mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。 - offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。 - statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
...@@ -427,7 +427,7 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务 ...@@ -427,7 +427,7 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务
COMPACT VNODES IN (vg_id1, vg_id2, ...) COMPACT VNODES IN (vg_id1, vg_id2, ...)
``` ```
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态 COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 2 时表示对应的 VGroup 处于排队等待进行重整的状态,值为 1 时表示正在进行碎片重整,为 0 时则表示并没有处于重整状态(未要求进行重整或已经完成重整)
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
......
...@@ -34,7 +34,7 @@ taos> DESCRIBE meters; ...@@ -34,7 +34,7 @@ taos> DESCRIBE meters;
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数 now 是客户端的当前时间 - 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数 - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数)
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。 TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
...@@ -182,7 +182,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ...@@ -182,7 +182,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
- **批量创建数据表** - **批量创建数据表**
```mysql ```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...; CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
``` ```
以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。 以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。
...@@ -414,13 +414,13 @@ INSERT INTO ...@@ -414,13 +414,13 @@ INSERT INTO
``` ```
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如: 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
```mysql ```mysql
INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
``` ```
自动建表语法也支持在一条语句中向多个表插入记录。例如: 自动建表语法也支持在一条语句中向多个表插入记录。例如:
```mysql ```mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
``` ```
**说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。 **说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。
......
...@@ -32,7 +32,7 @@ Replace the database operating in the current connection with “power”, other ...@@ -32,7 +32,7 @@ Replace the database operating in the current connection with “power”, other
An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable: An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
```mysql ```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
``` ```
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15. **Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
......
...@@ -17,7 +17,7 @@ The continuous query provided by TDengine differs from the time window calculati ...@@ -17,7 +17,7 @@ The continuous query provided by TDengine differs from the time window calculati
The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement: The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
```sql ```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int); create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
create table D1001 using meters tags ("Beijing.Chaoyang", 2); create table D1001 using meters tags ("Beijing.Chaoyang", 2);
create table D1002 using meters tags ("Beijing.Haidian", 2); create table D1002 using meters tags ("Beijing.Haidian", 2);
... ...
......
...@@ -132,7 +132,7 @@ The SQL creates a database demo, each data file stores 10 days of data, the memo ...@@ -132,7 +132,7 @@ The SQL creates a database demo, each data file stores 10 days of data, the memo
When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows: When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows:
- numOfMnodes: the number of management nodes in the system. Default: 3. - numOfMnodes: the number of management nodes in the system. Default: 3. (Since version 2.0.20.11 and version 2.1.6.0, the default value of "numOfMnodes" has been changed to 1.)
- balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1. - balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1.
- mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4. - mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4.
- offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days). - offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days).
......
...@@ -165,7 +165,7 @@ Note: ...@@ -165,7 +165,7 @@ Note:
- **Create tables in batches** - **Create tables in batches**
```mysql ```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...; CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
``` ```
Create a large number of data tables in batches faster. (Server side 2.0. 14 and above) Create a large number of data tables in batches faster. (Server side 2.0. 14 and above)
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main package main
import ( import (
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dataimport package dataimport
import ( import (
......
...@@ -44,7 +44,8 @@ echo "version=${version}" ...@@ -44,7 +44,8 @@ echo "version=${version}"
#docker manifest rm tdengine/tdengine #docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version} #docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
docker manifest rm tdengine/tdengine:latest docker manifest inspect tdengine/tdengine-beta:latest
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker login -u tdengine -p ${passWord} #replace the docker registry username and password
...@@ -52,6 +53,7 @@ if [ "$verType" == "beta" ]; then ...@@ -52,6 +53,7 @@ if [ "$verType" == "beta" ]; then
docker manifest push tdengine/tdengine-beta:${version} docker manifest push tdengine/tdengine-beta:${version}
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
docker manifest inspect tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:latest docker manifest rm tdengine/tdengine:latest
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
......
...@@ -35,7 +35,7 @@ fi ...@@ -35,7 +35,7 @@ fi
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
else else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
......
name: tdengine name: tdengine
base: core18 base: core18
version: '2.1.5.0' version: '2.1.6.0'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
...@@ -72,7 +72,7 @@ parts: ...@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.5.0 - usr/lib/libtaos.so.2.1.6.0
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
......
...@@ -342,10 +342,11 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild); ...@@ -342,10 +342,11 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta); uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta); CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize(); uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf); int32_t tscCreateTableMetaFromSTableMeta(STableMeta** pChild, const char* name, size_t *tableMetaCapacity);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta); STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo); SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo); int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr); int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId); void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);
......
...@@ -84,8 +84,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -84,8 +84,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
int64_t useconds = 0; int64_t useconds = 0;
char * pTokenEnd = *next; char * pTokenEnd = *next;
index = 0;
if (pToken->type == TK_NOW) { if (pToken->type == TK_NOW) {
useconds = taosGetTimestamp(timePrec); useconds = taosGetTimestamp(timePrec);
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) { } else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
...@@ -130,7 +128,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -130,7 +128,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z); return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
} }
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, timePrec) != TSDB_CODE_SUCCESS) { char unit = 0;
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, &unit, timePrec) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
......
...@@ -458,9 +458,9 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm ...@@ -458,9 +458,9 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
uint32_t size = tscGetTableMetaMaxSize(); size_t size = 0;
STableMeta* tableMeta = calloc(1, size); STableMeta* tableMeta = NULL;
taosHashGetClone(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, tableMeta); taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void **)&tableMeta, &size);
tstrncpy(schema->sTableName, tableName, strlen(tableName)+1); tstrncpy(schema->sTableName, tableName, strlen(tableName)+1);
schema->precision = tableMeta->tableInfo.precision; schema->precision = tableMeta->tableInfo.precision;
...@@ -484,7 +484,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm ...@@ -484,7 +484,7 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
size_t tagIndex = taosArrayGetSize(schema->tags) - 1; size_t tagIndex = taosArrayGetSize(schema->tags) - 1;
taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex)); taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex));
} }
tscDebug("SML:0x%"PRIx64 "load table meta succeed. %s, columns number: %d, tag number: %d, precision: %d", tscDebug("SML:0x%"PRIx64 " load table meta succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision); info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision);
free(tableMeta); tableMeta = NULL; free(tableMeta); tableMeta = NULL;
return code; return code;
...@@ -1417,7 +1417,7 @@ static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType) { ...@@ -1417,7 +1417,7 @@ static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType) {
return false; return false;
} }
static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info) {
errno = 0; errno = 0;
uint8_t type = pVal->type; uint8_t type = pVal->type;
int16_t length = pVal->length; int16_t length = pVal->length;
...@@ -1436,7 +1436,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { ...@@ -1436,7 +1436,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) {
} }
if (errno == ERANGE) { if (errno == ERANGE) {
tscError("Converted number out of range"); tscError("SML:0x%"PRIx64" Convert number(%s) out of range", info->id, str);
return false; return false;
} }
...@@ -1518,7 +1518,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) { ...@@ -1518,7 +1518,7 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str) {
} }
//len does not include '\0' from value. //len does not include '\0' from value.
static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
uint16_t len) { uint16_t len, SSmlLinesInfo* info) {
if (len <= 0) { if (len <= 0) {
return false; return false;
} }
...@@ -1528,7 +1528,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1528,7 +1528,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_TINYINT; pVal->type = TSDB_DATA_TYPE_TINYINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 2] = '\0'; value[len - 2] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1537,7 +1537,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1537,7 +1537,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UTINYINT; pVal->type = TSDB_DATA_TYPE_UTINYINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 2] = '\0'; value[len - 2] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1546,7 +1546,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1546,7 +1546,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_SMALLINT; pVal->type = TSDB_DATA_TYPE_SMALLINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1555,7 +1555,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1555,7 +1555,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_USMALLINT; pVal->type = TSDB_DATA_TYPE_USMALLINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1564,7 +1564,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1564,7 +1564,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_INT; pVal->type = TSDB_DATA_TYPE_INT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1573,7 +1573,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1573,7 +1573,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UINT; pVal->type = TSDB_DATA_TYPE_UINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1582,7 +1582,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1582,7 +1582,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_BIGINT; pVal->type = TSDB_DATA_TYPE_BIGINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1591,7 +1591,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1591,7 +1591,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_UBIGINT; pVal->type = TSDB_DATA_TYPE_UBIGINT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidInteger(value) || !convertStrToNumber(pVal, value)) { if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1601,7 +1601,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1601,7 +1601,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_FLOAT; pVal->type = TSDB_DATA_TYPE_FLOAT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) { if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1610,7 +1610,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1610,7 +1610,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
pVal->type = TSDB_DATA_TYPE_DOUBLE; pVal->type = TSDB_DATA_TYPE_DOUBLE;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
value[len - 3] = '\0'; value[len - 3] = '\0';
if (!isValidFloat(value) || !convertStrToNumber(pVal, value)) { if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1646,7 +1646,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, ...@@ -1646,7 +1646,7 @@ static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
if (isValidInteger(value) || isValidFloat(value)) { if (isValidInteger(value) || isValidFloat(value)) {
pVal->type = TSDB_DATA_TYPE_FLOAT; pVal->type = TSDB_DATA_TYPE_FLOAT;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
if (!convertStrToNumber(pVal, value)) { if (!convertStrToNumber(pVal, value, info)) {
return false; return false;
} }
return true; return true;
...@@ -1702,7 +1702,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len, ...@@ -1702,7 +1702,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
} }
static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
uint16_t len) { uint16_t len, SSmlLinesInfo* info) {
int32_t ret; int32_t ret;
SMLTimeStampType type; SMLTimeStampType type;
int64_t tsVal; int64_t tsVal;
...@@ -1715,7 +1715,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, ...@@ -1715,7 +1715,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
if (ret) { if (ret) {
return ret; return ret;
} }
tscDebug("Timestamp after conversion:%"PRId64, tsVal); tscDebug("SML:0x%"PRIx64"Timestamp after conversion:%"PRId64, info->id, tsVal);
pVal->type = TSDB_DATA_TYPE_TIMESTAMP; pVal->type = TSDB_DATA_TYPE_TIMESTAMP;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes; pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
...@@ -1724,7 +1724,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, ...@@ -1724,7 +1724,7 @@ static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) {
const char *start, *cur; const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
int len = 0; int len = 0;
...@@ -1744,7 +1744,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { ...@@ -1744,7 +1744,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) {
memcpy(value, start, len); memcpy(value, start, len);
} }
ret = convertSmlTimeStamp(*pTS, value, len); ret = convertSmlTimeStamp(*pTS, value, len, info);
if (ret) { if (ret) {
free(value); free(value);
free(*pTS); free(*pTS);
...@@ -1757,7 +1757,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) { ...@@ -1757,7 +1757,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index) {
return ret; return ret;
} }
static bool checkDuplicateKey(char *key, SHashObj *pHash) { static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
char *val = NULL; char *val = NULL;
char *cur = key; char *cur = key;
char keyLower[TSDB_COL_NAME_LEN]; char keyLower[TSDB_COL_NAME_LEN];
...@@ -1771,7 +1771,7 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) { ...@@ -1771,7 +1771,7 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) {
val = taosHashGet(pHash, keyLower, keyLen); val = taosHashGet(pHash, keyLower, keyLen);
if (val) { if (val) {
tscError("Duplicate key:%s", keyLower); tscError("SML:0x%"PRIx64" Duplicate key detected:%s", info->id, keyLower);
return true; return true;
} }
...@@ -1781,19 +1781,19 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) { ...@@ -1781,19 +1781,19 @@ static bool checkDuplicateKey(char *key, SHashObj *pHash) {
return false; return false;
} }
static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash) { static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *index;
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
uint16_t len = 0; uint16_t len = 0;
//key field cannot start with digit //key field cannot start with digit
if (isdigit(*cur)) { if (isdigit(*cur)) {
tscError("Tag key cannnot start with digit\n"); tscError("SML:0x%"PRIx64" Tag key cannnot start with digit", info->id);
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
} }
while (*cur != '\0') { while (*cur != '\0') {
if (len > TSDB_COL_NAME_LEN) { if (len > TSDB_COL_NAME_LEN) {
tscDebug("Key field cannot exceeds 65 characters"); tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
} }
//unescaped '=' identifies a tag key //unescaped '=' identifies a tag key
...@@ -1810,20 +1810,20 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash ...@@ -1810,20 +1810,20 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
} }
key[len] = '\0'; key[len] = '\0';
if (checkDuplicateKey(key, pHash)) { if (checkDuplicateKey(key, pHash, info)) {
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
} }
pKV->key = calloc(len + 1, 1); pKV->key = calloc(len + 1, 1);
memcpy(pKV->key, key, len + 1); memcpy(pKV->key, key, len + 1);
//tscDebug("Key:%s|len:%d", pKV->key, len); //tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1; *index = cur + 1;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index, static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
bool *is_last_kv) { bool *is_last_kv, SSmlLinesInfo* info) {
const char *start, *cur; const char *start, *cur;
char *value = NULL; char *value = NULL;
uint16_t len = 0; uint16_t len = 0;
...@@ -1847,7 +1847,9 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -1847,7 +1847,9 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
value = calloc(len + 1, 1); value = calloc(len + 1, 1);
memcpy(value, start, len); memcpy(value, start, len);
value[len] = '\0'; value[len] = '\0';
if (!convertSmlValueType(pKV, value, len)) { if (!convertSmlValueType(pKV, value, len, info)) {
tscError("SML:0x%"PRIx64" Failed to convert sml value string(%s) to any type",
info->id, value);
//free previous alocated key field //free previous alocated key field
free(pKV->key); free(pKV->key);
pKV->key = NULL; pKV->key = NULL;
...@@ -1861,7 +1863,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -1861,7 +1863,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
} }
static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index, static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index,
uint8_t *has_tags) { uint8_t *has_tags, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *index;
uint16_t len = 0; uint16_t len = 0;
...@@ -1870,7 +1872,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index ...@@ -1870,7 +1872,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
if (isdigit(*cur)) { if (isdigit(*cur)) {
tscError("Measurement field cannnot start with digit"); tscError("SML:0x%"PRIx64" Measurement field cannnot start with digit", info->id);
free(pSml->stableName); free(pSml->stableName);
pSml->stableName = NULL; pSml->stableName = NULL;
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
...@@ -1878,7 +1880,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index ...@@ -1878,7 +1880,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
while (*cur != '\0') { while (*cur != '\0') {
if (len > TSDB_TABLE_NAME_LEN) { if (len > TSDB_TABLE_NAME_LEN) {
tscError("Measurement field cannot exceeds 193 characters"); tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
free(pSml->stableName); free(pSml->stableName);
pSml->stableName = NULL; pSml->stableName = NULL;
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
...@@ -1902,7 +1904,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index ...@@ -1902,7 +1904,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
} }
pSml->stableName[len] = '\0'; pSml->stableName[len] = '\0';
*index = cur + 1; *index = cur + 1;
tscDebug("Stable name in measurement:%s|len:%d", pSml->stableName, len); tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1921,7 +1923,8 @@ static int32_t isValidChildTableName(const char *pTbName, int16_t len) { ...@@ -1921,7 +1923,8 @@ static int32_t isValidChildTableName(const char *pTbName, int16_t len) {
static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
const char **index, bool isField, const char **index, bool isField,
TAOS_SML_DATA_POINT* smlData, SHashObj *pHash) { TAOS_SML_DATA_POINT* smlData, SHashObj *pHash,
SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *index;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv; TAOS_SML_KV *pkv;
...@@ -1941,14 +1944,14 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -1941,14 +1944,14 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
} }
while (*cur != '\0') { while (*cur != '\0') {
ret = parseSmlKey(pkv, &cur, pHash); ret = parseSmlKey(pkv, &cur, pHash, info);
if (ret) { if (ret) {
tscError("Unable to parse key field"); tscError("SML:0x%"PRIx64" Unable to parse key", info->id);
goto error; goto error;
} }
ret = parseSmlValue(pkv, &cur, &is_last_kv); ret = parseSmlValue(pkv, &cur, &is_last_kv, info);
if (ret) { if (ret) {
tscError("Unable to parse value field"); tscError("SML:0x%"PRIx64" Unable to parse value", info->id);
goto error; goto error;
} }
if (!isField && if (!isField &&
...@@ -1966,7 +1969,6 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -1966,7 +1969,6 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
*num_kvs += 1; *num_kvs += 1;
} }
if (is_last_kv) { if (is_last_kv) {
//tscDebug("last key-value field detected");
goto done; goto done;
} }
...@@ -2024,50 +2026,50 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t ...@@ -2024,50 +2026,50 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t
free(ts); free(ts);
} }
int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData) { int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
const char* index = sql; const char* index = sql;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
uint8_t has_tags = 0; uint8_t has_tags = 0;
TAOS_SML_KV *timestamp = NULL; TAOS_SML_KV *timestamp = NULL;
SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
ret = parseSmlMeasurement(smlData, &index, &has_tags); ret = parseSmlMeasurement(smlData, &index, &has_tags, info);
if (ret) { if (ret) {
tscError("Unable to parse measurement"); tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
return ret; return ret;
} }
tscDebug("Parse measurement finished, has_tags:%d", has_tags); tscDebug("SML:0x%"PRIx64" Parse measurement finished, has_tags:%d", info->id, has_tags);
//Parse Tags //Parse Tags
if (has_tags) { if (has_tags) {
ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable); ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info);
if (ret) { if (ret) {
tscError("Unable to parse tag"); tscError("SML:0x%"PRIx64" Unable to parse tag", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
return ret; return ret;
} }
} }
tscDebug("Parse tags finished, num of tags:%d", smlData->tagNum); tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum);
//Parse fields //Parse fields
ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable); ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info);
if (ret) { if (ret) {
tscError("Unable to parse field"); tscError("SML:0x%"PRIx64" Unable to parse field", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
return ret; return ret;
} }
tscDebug("Parse fields finished, num of fields:%d", smlData->fieldNum); tscDebug("SML:0x%"PRIx64" Parse fields finished, num of fields:%d", info->id, smlData->fieldNum);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
//Parse timestamp //Parse timestamp
ret = parseSmlTimeStamp(&timestamp, &index); ret = parseSmlTimeStamp(&timestamp, &index, info);
if (ret) { if (ret) {
tscError("Unable to parse timestamp"); tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret; return ret;
} }
moveTimeStampToFirstKv(&smlData, timestamp); moveTimeStampToFirstKv(&smlData, timestamp);
tscDebug("Parse timestamp finished"); tscDebug("SML:0x%"PRIx64" Parse timestamp finished", info->id);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -2104,7 +2106,7 @@ void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) { ...@@ -2104,7 +2106,7 @@ void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) {
int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) { int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) {
for (int32_t i = 0; i < numLines; ++i) { for (int32_t i = 0; i < numLines; ++i) {
TAOS_SML_DATA_POINT point = {0}; TAOS_SML_DATA_POINT point = {0};
int32_t code = tscParseLine(lines[i], &point); int32_t code = tscParseLine(lines[i], &point, info);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]); tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
destroySmlDataPoint(&point); destroySmlDataPoint(&point);
......
...@@ -255,10 +255,25 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { ...@@ -255,10 +255,25 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pQdesc->qId = htobe64(pSql->res.qId); pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->sqlObjId = htobe64(pSql->self); pQdesc->sqlObjId = htobe64(pSql->self);
pQdesc->pid = pHeartbeat->pid; pQdesc->pid = pHeartbeat->pid;
if (pSql->cmd.pQueryInfo->stableQuery == true) { pQdesc->stableQuery = pSql->cmd.pQueryInfo->stableQuery;
pQdesc->numOfSub = pSql->subState.numOfSub; pQdesc->numOfSub = pSql->subState.numOfSub;
char *p = pQdesc->subSqlInfo;
int32_t remainLen = sizeof(pQdesc->subSqlInfo);
if (pQdesc->numOfSub == 0) {
snprintf(p, remainLen, "N/A");
} else { } else {
pQdesc->numOfSub = 1; int32_t len;
for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i,
pSql->pSubs[i]->self,
pSql->subState.states[i] ? 'C' : 'I');
if (len > remainLen) {
break;
}
remainLen -= len;
p += len;
}
} }
pQdesc->numOfSub = htonl(pQdesc->numOfSub); pQdesc->numOfSub = htonl(pQdesc->numOfSub);
......
...@@ -905,6 +905,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -905,6 +905,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i); SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i);
tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size); tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size);
// normalizeSqlNode(pSqlNode); // normalize the column name in each function // normalizeSqlNode(pSqlNode); // normalize the column name in each function
if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) { if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code; return code;
...@@ -1293,35 +1294,31 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl ...@@ -1293,35 +1294,31 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SInterval* pInterval = &pQueryInfo->interval;
if (pSliding->n == 0) { if (pSliding->n == 0) {
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit; pInterval->slidingUnit = pInterval->intervalUnit;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval; pInterval->sliding = pInterval->interval;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') { if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
} }
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding, tinfo.precision); parseAbsoluteDuration(pSliding->z, pSliding->n, &pInterval->sliding, &pInterval->slidingUnit, tinfo.precision);
if (pQueryInfo->interval.sliding < if (pInterval->sliding < convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
} }
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) { if (pInterval->sliding > pInterval->interval) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) { if ((pInterval->interval != 0) && (pInterval->interval/pInterval->sliding > INTERVAL_SLIDING_FACTOR)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
// if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) {
// return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
// }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -2033,7 +2030,6 @@ static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) { ...@@ -2033,7 +2030,6 @@ static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) {
tscError("udfinfo is null"); tscError("udfinfo is null");
return NULL; return NULL;
} }
size_t t = taosArrayGetSize(pUdfInfo); size_t t = taosArrayGetSize(pUdfInfo);
for(int32_t i = 0; i < t; ++i) { for(int32_t i = 0; i < t; ++i) {
SUdfInfo* pUdf = taosArrayGet(pUdfInfo, i); SUdfInfo* pUdf = taosArrayGet(pUdfInfo, i);
...@@ -6166,6 +6162,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -6166,6 +6162,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pItem = taosArrayGet(pVarList, 1); tVariantListItem* pItem = taosArrayGet(pVarList, 1);
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) {
return invalidOperationMsg(pMsg, msg14);
}
pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
...@@ -6356,10 +6357,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -6356,10 +6357,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
int16_t i; int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
int32_t tagIndex = columnIndex.columnIndex - numOfCols;
assert(tagIndex>=0);
uint32_t nLen = 0; uint32_t nLen = 0;
for (i = 0; i < numOfTags; ++i) { for (int i = 0; i < numOfTags; ++i) {
nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes; nLen += (i != tagIndex) ? pSchema[i].bytes : pItem->bytes;
} }
if (nLen >= TSDB_MAX_TAGS_LEN) { if (nLen >= TSDB_MAX_TAGS_LEN) {
return invalidOperationMsg(pMsg, msg24); return invalidOperationMsg(pMsg, msg24);
...@@ -8369,6 +8372,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -8369,6 +8372,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SArray* pVgroupList = NULL; SArray* pVgroupList = NULL;
SArray* plist = NULL; SArray* plist = NULL;
STableMeta* pTableMeta = NULL; STableMeta* pTableMeta = NULL;
size_t tableMetaCapacity = 0;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
...@@ -8395,18 +8399,14 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -8395,18 +8399,14 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} }
} }
uint32_t maxSize = tscGetTableMetaMaxSize();
char name[TSDB_TABLE_FNAME_LEN] = {0}; char name[TSDB_TABLE_FNAME_LEN] = {0};
assert(maxSize < 80 * TSDB_MAX_COLUMNS); //if (!pSql->pBuf) {
if (!pSql->pBuf) { // if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) { // code = TSDB_CODE_TSC_OUT_OF_MEMORY;
code = TSDB_CODE_TSC_OUT_OF_MEMORY; // goto _end;
goto _end; // }
} //}
}
pTableMeta = calloc(1, maxSize);
plist = taosArrayInit(4, POINTER_BYTES); plist = taosArrayInit(4, POINTER_BYTES);
pVgroupList = taosArrayInit(4, POINTER_BYTES); pVgroupList = taosArrayInit(4, POINTER_BYTES);
...@@ -8420,16 +8420,16 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -8420,16 +8420,16 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tNameExtractFullName(pname, name); tNameExtractFullName(pname, name);
size_t len = strlen(name); size_t len = strlen(name);
memset(pTableMeta, 0, maxSize);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMeta);
if (pTableMeta->id.uid > 0) { taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity);
if (pTableMeta && pTableMeta->id.uid > 0) {
tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name); tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
// avoid mem leak, may should update pTableMeta // avoid mem leak, may should update pTableMeta
void* pVgroupIdList = NULL; void* pVgroupIdList = NULL;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) { if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity);
// create the child table meta from super table failed, try load it from mnode // create the child table meta from super table failed, try load it from mnode
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
...@@ -8585,6 +8585,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod ...@@ -8585,6 +8585,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN)); STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta);
pTableMetaInfo->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
assert(pTableMetaInfo->pTableMeta != NULL); assert(pTableMetaInfo->pTableMeta != NULL);
if (p->vgroupIdList != NULL) { if (p->vgroupIdList != NULL) {
...@@ -8685,6 +8686,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS ...@@ -8685,6 +8686,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub); pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
if (subInfo->aliasName.n > 0) { if (subInfo->aliasName.n > 0) {
if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) { if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) {
...@@ -8733,6 +8735,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -8733,6 +8735,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg6 = "not support stddev/percentile/interp in the outer query yet"; const char* msg6 = "not support stddev/percentile/interp in the outer query yet";
const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery"; const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery";
const char* msg8 = "condition missing for join query"; const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
...@@ -8763,6 +8766,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -8763,6 +8766,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
// parse the subquery in the first place // parse the subquery in the first place
int32_t numOfSub = (int32_t)taosArrayGetSize(pSqlNode->from->list); int32_t numOfSub = (int32_t)taosArrayGetSize(pSqlNode->from->list);
for (int32_t i = 0; i < numOfSub; ++i) { for (int32_t i = 0; i < numOfSub; ++i) {
// check if there is 3 level select
SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
if (p->from->type == SQL_NODE_FROM_SUBQUERY){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
code = doValidateSubquery(pSqlNode, i, pSql, pQueryInfo, tscGetErrorMsgPayload(pCmd)); code = doValidateSubquery(pSqlNode, i, pSql, pQueryInfo, tscGetErrorMsgPayload(pCmd));
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
...@@ -9039,8 +9049,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -9039,8 +9049,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
#if 0 #if 0
SQueryNode* p = qCreateQueryPlan(pQueryInfo); SQueryNode* p = qCreateQueryPlan(pQueryInfo);
char* s = queryPlanToString(p); char* s = queryPlanToString(p);
printf("%s\n", s);
tfree(s); tfree(s);
qDestroyQueryPlan(p); qDestroyQueryPlan(p);
#endif #endif
......
...@@ -337,11 +337,16 @@ int tscSendMsgToServer(SSqlObj *pSql) { ...@@ -337,11 +337,16 @@ int tscSendMsgToServer(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
SRpcMsg* rpcMsg = pSchedMsg->ahandle;
SRpcEpSet* pEpSet = pSchedMsg->thandle;
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle; TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle); SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
if (pSql == NULL) { if (pSql == NULL) {
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return; return;
} }
...@@ -359,6 +364,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -359,6 +364,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosRemoveRef(tscObjRef, handle); taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return; return;
} }
...@@ -370,6 +377,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -370,6 +377,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosRemoveRef(tscObjRef, handle); taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return; return;
} }
...@@ -392,10 +401,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -392,10 +401,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
// single table query error need to be handled here. // single table query error need to be handled here.
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || (((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
// 1. super table subquery // 1. super table subquery
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer // 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
...@@ -425,6 +432,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -425,6 +432,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
return; return;
} }
} }
...@@ -502,6 +511,29 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -502,6 +511,29 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosReleaseRef(tscObjRef, handle); taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
free(rpcMsg);
free(pEpSet);
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSchedMsg schedMsg = {0};
schedMsg.fp = doProcessMsgFromServer;
SRpcMsg* rpcMsgCopy = calloc(1, sizeof(SRpcMsg));
memcpy(rpcMsgCopy, rpcMsg, sizeof(struct SRpcMsg));
schedMsg.ahandle = (void*)rpcMsgCopy;
SRpcEpSet* pEpSetCopy = NULL;
if (pEpSet != NULL) {
pEpSetCopy = calloc(1, sizeof(SRpcEpSet));
memcpy(pEpSetCopy, pEpSet, sizeof(SRpcEpSet));
}
schedMsg.thandle = (void*)pEpSetCopy;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
} }
int doBuildAndSendMsg(SSqlObj *pSql) { int doBuildAndSendMsg(SSqlObj *pSql) {
...@@ -644,6 +676,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { ...@@ -644,6 +676,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo)); int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
int32_t srcColFilterSize = 0; int32_t srcColFilterSize = 0;
int32_t srcTagFilterSize = tscGetTagFilterSerializeLen(pQueryInfo);
size_t numOfExprs = tscNumOfExprs(pQueryInfo); size_t numOfExprs = tscNumOfExprs(pQueryInfo);
int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2); int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2);
...@@ -672,7 +706,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { ...@@ -672,7 +706,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
} }
} }
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize +
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
} }
...@@ -1020,7 +1054,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1020,7 +1054,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid); SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) { if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->tagCondLen = htons(pCond->len); pQueryMsg->tagCondLen = htonl(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len); memcpy(pMsg, pCond->cond, pCond->len);
pMsg += pCond->len; pMsg += pCond->len;
...@@ -2825,43 +2859,22 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg ...@@ -2825,43 +2859,22 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) { int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) {
assert(tIsValidName(&pTableMetaInfo->name)); assert(tIsValidName(&pTableMetaInfo->name));
uint32_t size = tscGetTableMetaMaxSize();
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->pTableMeta = malloc(size);
pTableMetaInfo->tableMetaSize = size;
} else if (pTableMetaInfo->tableMetaSize < size) {
char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
}
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
char name[TSDB_TABLE_FNAME_LEN] = {0}; char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name); tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name); size_t len = strlen(name);
taosHashGetClone(tscTableMetaMap, name, len, NULL, pTableMetaInfo->pTableMeta); if (pTableMetaInfo->tableMetaCapacity != 0) {
if (pTableMetaInfo->pTableMeta != NULL) {
// TODO resize the tableMeta memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
assert(size < 80 * TSDB_MAX_COLUMNS);
if (!pSql->pBuf) {
if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
} }
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity);
STableMeta* pMeta = pTableMetaInfo->pTableMeta; STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) { if (pMeta && pMeta->id.uid > 0) {
// in case of child table, here only get the // in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) { if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, pSql->pBuf); int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate); return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
} }
......
...@@ -1636,6 +1636,8 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { ...@@ -1636,6 +1636,8 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
continue; continue;
} }
SSqlRes* pRes1 = &pSql1->res; SSqlRes* pRes1 = &pSql1->res;
if (pRes1->row >= pRes1->numOfRows) { if (pRes1->row >= pRes1->numOfRows) {
subquerySetState(pSql1, &pSql->subState, i, 0); subquerySetState(pSql1, &pSql->subState, i, 0);
......
...@@ -1689,6 +1689,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { ...@@ -1689,6 +1689,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta); pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta); pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
} }
/* /*
...@@ -2827,7 +2828,8 @@ void tscDequoteAndTrimToken(SStrToken* pToken) { ...@@ -2827,7 +2828,8 @@ void tscDequoteAndTrimToken(SStrToken* pToken) {
} }
int32_t tscValidateName(SStrToken* pToken) { int32_t tscValidateName(SStrToken* pToken) {
if (pToken->type != TK_STRING && pToken->type != TK_ID) { if (pToken == NULL || pToken->z == NULL ||
(pToken->type != TK_STRING && pToken->type != TK_ID)) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
...@@ -3514,6 +3516,8 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM ...@@ -3514,6 +3516,8 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
} else { } else {
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
} }
pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
if (vgroupList != NULL) { if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
...@@ -3751,7 +3755,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t ...@@ -3751,7 +3755,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
if (pQueryInfo->fillType != TSDB_FILL_NONE) { if (pQueryInfo->fillType != TSDB_FILL_NONE) {
//just make memory memory sanitizer happy //just make memory memory sanitizer happy
//refator later //refactor later
pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
if (pNewQueryInfo->fillVal == NULL) { if (pNewQueryInfo->fillVal == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -4550,24 +4554,36 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) { ...@@ -4550,24 +4554,36 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta; return cMeta;
} }
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf) { int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity) {
assert(pChild != NULL && buf != NULL); assert(*ppChild != NULL);
STableMeta* p = buf; STableMeta* p = NULL;
taosHashGetClone(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p); size_t sz = 0;
STableMeta* pChild = *ppChild;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
// tableMeta exists, build child table meta according to the super table meta // tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table. // the uid need to be checked in addition to the general name of the super table.
if (p->id.uid > 0 && pChild->suid == p->id.uid) { if (p && p->id.uid > 0 && pChild->suid == p->id.uid) {
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
pChild = realloc(pChild, tableMetaSize);
*tableMetaCapacity = (size_t)tableMetaSize;
}
pChild->sversion = p->sversion; pChild->sversion = p->sversion;
pChild->tversion = p->tversion; pChild->tversion = p->tversion;
memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableComInfo)); memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableComInfo));
int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags; memcpy(pChild->schema, p->schema, totalBytes);
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total); *ppChild = pChild;
tfree(p);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here } else { // super table has been removed, current tableMeta is also expired. remove it here
tfree(p);
taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return -1; return -1;
} }
...@@ -4790,6 +4806,21 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { ...@@ -4790,6 +4806,21 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) {
return len; return len;
} }
int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo) {
// serialize tag column query condition
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) {
STagCond* pTagCond = &pQueryInfo->tagCond;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
return pCond->len;
}
}
return 0;
}
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) { int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) {
memset(pQueryAttr, 0, sizeof(SQueryAttr)); memset(pQueryAttr, 0, sizeof(SQueryAttr));
......
...@@ -325,7 +325,9 @@ typedef struct SDataCol { ...@@ -325,7 +325,9 @@ typedef struct SDataCol {
#define isAllRowsNull(pCol) ((pCol)->len == 0) #define isAllRowsNull(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints); int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
void dataColSetOffset(SDataCol *pCol, int nEle); void dataColSetOffset(SDataCol *pCol, int nEle);
...@@ -358,12 +360,10 @@ typedef struct { ...@@ -358,12 +360,10 @@ typedef struct {
int maxRowSize; int maxRowSize;
int maxCols; // max number of columns int maxCols; // max number of columns
int maxPoints; // max number of points int maxPoints; // max number of points
int bufSize;
int numOfRows; int numOfRows;
int numOfCols; // Total number of cols int numOfCols; // Total number of cols
int sversion; // TODO: set sversion int sversion; // TODO: set sversion
void * buf;
SDataCol *cols; SDataCol *cols;
} SDataCols; } SDataCols;
......
...@@ -22,6 +22,29 @@ ...@@ -22,6 +22,29 @@
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows, bool forceSetNull); int limit2, int tRows, bool forceSetNull);
//TODO: change caller to use return val
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
int spaceNeeded = pCol->bytes * maxPoints;
if(IS_VAR_DATA_TYPE(pCol->type)) {
spaceNeeded += sizeof(VarDataOffsetT) * maxPoints;
}
if(pCol->spaceSize < spaceNeeded) {
void* ptr = realloc(pCol->pData, spaceNeeded);
if(ptr == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize,
strerror(errno));
return -1;
} else {
pCol->pData = ptr;
pCol->spaceSize = spaceNeeded;
}
}
if(IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
}
return 0;
}
/** /**
* Duplicate the schema and return a new object * Duplicate the schema and return a new object
*/ */
...@@ -207,24 +230,13 @@ SMemRow tdMemRowDup(SMemRow row) { ...@@ -207,24 +230,13 @@ SMemRow tdMemRowDup(SMemRow row) {
return trow; return trow;
} }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) { void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->type = colType(pCol); pDataCol->type = colType(pCol);
pDataCol->colId = colColId(pCol); pDataCol->colId = colColId(pCol);
pDataCol->bytes = colBytes(pCol); pDataCol->bytes = colBytes(pCol);
pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE; pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0; pDataCol->len = 0;
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
*pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize + sizeof(VarDataOffsetT) * maxPoints);
} else {
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
pDataCol->dataOff = NULL;
pDataCol->pData = *pBuf;
*pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize);
}
} }
// value from timestamp should be TKEY here instead of TSKEY // value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
...@@ -239,6 +251,8 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP ...@@ -239,6 +251,8 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
if (numOfRows > 0) { if (numOfRows > 0) {
// Find the first not null value, fill all previouse values as NULL // Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows, maxPoints); dataColSetNEleNull(pCol, numOfRows, maxPoints);
} else {
tdAllocMemForCol(pCol, maxPoints);
} }
} }
...@@ -257,13 +271,14 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP ...@@ -257,13 +271,14 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
} }
bool isNEleNull(SDataCol *pCol, int nEle) { bool isNEleNull(SDataCol *pCol, int nEle) {
if(isAllRowsNull(pCol)) return true;
for (int i = 0; i < nEle; i++) { for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
} }
return true; return true;
} }
FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
if (IS_VAR_DATA_TYPE(pCol->type)) { if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len; pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
...@@ -276,6 +291,7 @@ FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { ...@@ -276,6 +291,7 @@ FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
} }
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
tdAllocMemForCol(pCol, maxPoints);
if (IS_VAR_DATA_TYPE(pCol->type)) { if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->len = 0; pCol->len = 0;
...@@ -319,56 +335,63 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { ...@@ -319,56 +335,63 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
tdFreeDataCols(pCols); tdFreeDataCols(pCols);
return NULL; return NULL;
} }
int i;
for(i = 0; i < maxCols; i++) {
pCols->cols[i].spaceSize = 0;
pCols->cols[i].len = 0;
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
}
pCols->maxCols = maxCols; pCols->maxCols = maxCols;
} }
pCols->maxRowSize = maxRowSize; pCols->maxRowSize = maxRowSize;
pCols->bufSize = maxRowSize * maxRows;
if (pCols->bufSize > 0) {
pCols->buf = malloc(pCols->bufSize);
if (pCols->buf == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols,
strerror(errno));
tdFreeDataCols(pCols);
return NULL;
}
}
return pCols; return pCols;
} }
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
if (schemaNCols(pSchema) > pCols->maxCols) { int i;
int oldMaxCols = pCols->maxCols;
if (schemaNCols(pSchema) > oldMaxCols) {
pCols->maxCols = schemaNCols(pSchema); pCols->maxCols = schemaNCols(pSchema);
pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
if (pCols->cols == NULL) return -1; if (pCols->cols == NULL) return -1;
for(i = oldMaxCols; i < pCols->maxCols; i++) {
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
pCols->cols[i].spaceSize = 0;
}
} }
if (schemaTLen(pSchema) > pCols->maxRowSize) { if (schemaTLen(pSchema) > pCols->maxRowSize) {
pCols->maxRowSize = schemaTLen(pSchema); pCols->maxRowSize = schemaTLen(pSchema);
pCols->bufSize = schemaTLen(pSchema) * pCols->maxPoints;
pCols->buf = realloc(pCols->buf, pCols->bufSize);
if (pCols->buf == NULL) return -1;
} }
tdResetDataCols(pCols); tdResetDataCols(pCols);
pCols->numOfCols = schemaNCols(pSchema); pCols->numOfCols = schemaNCols(pSchema);
void *ptr = pCols->buf; for (i = 0; i < schemaNCols(pSchema); i++) {
for (int i = 0; i < schemaNCols(pSchema); i++) { dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints);
dataColInit(pCols->cols + i, schemaColAt(pSchema, i), &ptr, pCols->maxPoints);
ASSERT((char *)ptr - (char *)(pCols->buf) <= pCols->bufSize);
} }
return 0; return 0;
} }
SDataCols *tdFreeDataCols(SDataCols *pCols) { SDataCols *tdFreeDataCols(SDataCols *pCols) {
int i;
if (pCols) { if (pCols) {
tfree(pCols->buf); if(pCols->cols) {
tfree(pCols->cols); int maxCols = pCols->maxCols;
for(i = 0; i < maxCols; i++) {
SDataCol *pCol = &pCols->cols[i];
tfree(pCol->pData);
}
free(pCols->cols);
pCols->cols = NULL;
}
free(pCols); free(pCols);
} }
return NULL; return NULL;
...@@ -388,21 +411,14 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { ...@@ -388,21 +411,14 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].bytes = pDataCols->cols[i].bytes; pRet->cols[i].bytes = pDataCols->cols[i].bytes;
pRet->cols[i].offset = pDataCols->cols[i].offset; pRet->cols[i].offset = pDataCols->cols[i].offset;
pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize;
pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf)));
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
ASSERT(pDataCols->cols[i].dataOff != NULL);
pRet->cols[i].dataOff =
(int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf)));
}
if (keepData) { if (keepData) {
pRet->cols[i].len = pDataCols->cols[i].len;
if (pDataCols->cols[i].len > 0) { if (pDataCols->cols[i].len > 0) {
tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints);
pRet->cols[i].len = pDataCols->cols[i].len;
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len); memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints); int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints;
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize);
} }
} }
} }
...@@ -426,20 +442,9 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -426,20 +442,9 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int rcol = 0; int rcol = 0;
int dcol = 0; int dcol = 0;
if (dataRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
}
} else {
while (dcol < pCols->numOfCols) { while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) { if (rcol >= schemaNCols(pSchema)) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dcol++; dcol++;
continue; continue;
...@@ -455,13 +460,11 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -455,13 +460,11 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
rcol++; rcol++;
} else { } else {
if(forceSetNull) { if(forceSetNull) {
//dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
} }
dcol++; dcol++;
} }
} }
}
pCols->numOfRows++; pCols->numOfRows++;
} }
...@@ -471,22 +474,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -471,22 +474,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int rcol = 0; int rcol = 0;
int dcol = 0; int dcol = 0;
if (kvRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, kvRowValues(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
}
} else {
int nRowCols = kvRowNCols(row); int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) { while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
++dcol; ++dcol;
continue; continue;
...@@ -503,13 +495,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -503,13 +495,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
++rcol; ++rcol;
} else { } else {
if (forceSetNull) { if (forceSetNull) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
} }
++dcol; ++dcol;
} }
} }
}
pCols->numOfRows++; pCols->numOfRows++;
} }
......
Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206
...@@ -122,6 +122,7 @@ ...@@ -122,6 +122,7 @@
<exclude>**/TSDBJNIConnectorTest.java</exclude> <exclude>**/TSDBJNIConnectorTest.java</exclude>
<exclude>**/TaosInfoMonitorTest.java</exclude> <exclude>**/TaosInfoMonitorTest.java</exclude>
<exclude>**/UnsignedNumberJniTest.java</exclude> <exclude>**/UnsignedNumberJniTest.java</exclude>
<exclude>**/TimeZoneTest.java</exclude>
</excludes> </excludes>
<testFailureIgnore>true</testFailureIgnore> <testFailureIgnore>true</testFailureIgnore>
</configuration> </configuration>
......
...@@ -77,8 +77,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -77,8 +77,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
} }
public boolean supportsMixedCaseIdentifiers() throws SQLException { public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像database、table这些对象的标识符,在存储时是否采用大小写混合的模式 return false; //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
return false;
} }
public boolean storesUpperCaseIdentifiers() throws SQLException { public boolean storesUpperCaseIdentifiers() throws SQLException {
...@@ -514,7 +513,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -514,7 +513,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData(); ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex); col6.setColIndex(colIndex);
col6.setColName("TYPE_CAT"); col6.setColName("TYPE_CAT");
col6.setColType(Types.NCHAR); col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6; return col6;
} }
...@@ -522,7 +521,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -522,7 +521,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData(); ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(colIndex); col7.setColIndex(colIndex);
col7.setColName("TYPE_SCHEM"); col7.setColName("TYPE_SCHEM");
col7.setColType(Types.NCHAR); col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col7; return col7;
} }
...@@ -530,7 +529,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -530,7 +529,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col8 = new ColumnMetaData(); ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(colIndex); col8.setColIndex(colIndex);
col8.setColName("TYPE_NAME"); col8.setColName("TYPE_NAME");
col8.setColType(Types.NCHAR); col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col8; return col8;
} }
...@@ -538,7 +537,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -538,7 +537,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData(); ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(colIndex); col9.setColIndex(colIndex);
col9.setColName("SELF_REFERENCING_COL_NAME"); col9.setColName("SELF_REFERENCING_COL_NAME");
col9.setColType(Types.NCHAR); col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col9; return col9;
} }
...@@ -546,7 +545,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -546,7 +545,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData(); ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(colIndex); col10.setColIndex(colIndex);
col10.setColName("REF_GENERATION"); col10.setColName("REF_GENERATION");
col10.setColType(Types.NCHAR); col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col10; return col10;
} }
...@@ -592,7 +591,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -592,7 +591,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData(); ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex); col4.setColIndex(colIndex);
col4.setColName("TABLE_TYPE"); col4.setColName("TABLE_TYPE");
col4.setColType(Types.NCHAR); col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4; return col4;
} }
...@@ -734,7 +733,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -734,7 +733,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col1 = new ColumnMetaData(); ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(colIndex); col1.setColIndex(colIndex);
col1.setColName("TABLE_CAT"); col1.setColName("TABLE_CAT");
col1.setColType(Types.NCHAR); col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col1; return col1;
} }
...@@ -742,7 +741,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -742,7 +741,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col2 = new ColumnMetaData(); ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(colIndex); col2.setColIndex(colIndex);
col2.setColName("TABLE_SCHEM"); col2.setColName("TABLE_SCHEM");
col2.setColType(Types.NCHAR); col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col2; return col2;
} }
...@@ -751,7 +750,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -751,7 +750,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col3.setColIndex(colIndex); col3.setColIndex(colIndex);
col3.setColName("TABLE_NAME"); col3.setColName("TABLE_NAME");
col3.setColSize(193); col3.setColSize(193);
col3.setColType(Types.NCHAR); col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col3; return col3;
} }
...@@ -760,7 +759,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -760,7 +759,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col4.setColIndex(colIndex); col4.setColIndex(colIndex);
col4.setColName("COLUMN_NAME"); col4.setColName("COLUMN_NAME");
col4.setColSize(65); col4.setColSize(65);
col4.setColType(Types.NCHAR); col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4; return col4;
} }
...@@ -768,7 +767,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -768,7 +767,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData(); ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex); col5.setColIndex(colIndex);
col5.setColName("DATA_TYPE"); col5.setColName("DATA_TYPE");
col5.setColType(Types.INTEGER); col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col5; return col5;
} }
...@@ -776,7 +775,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -776,7 +775,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData(); ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7); col7.setColIndex(7);
col7.setColName("COLUMN_SIZE"); col7.setColName("COLUMN_SIZE");
col7.setColType(Types.INTEGER); col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col7; return col7;
} }
...@@ -791,7 +790,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -791,7 +790,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData(); ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9); col9.setColIndex(9);
col9.setColName("DECIMAL_DIGITS"); col9.setColName("DECIMAL_DIGITS");
col9.setColType(Types.INTEGER); col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col9; return col9;
} }
...@@ -799,7 +798,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -799,7 +798,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData(); ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10); col10.setColIndex(10);
col10.setColName("NUM_PREC_RADIX"); col10.setColName("NUM_PREC_RADIX");
col10.setColType(Types.INTEGER); col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col10; return col10;
} }
...@@ -807,7 +806,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -807,7 +806,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col11 = new ColumnMetaData(); ColumnMetaData col11 = new ColumnMetaData();
col11.setColIndex(11); col11.setColIndex(11);
col11.setColName("NULLABLE"); col11.setColName("NULLABLE");
col11.setColType(Types.INTEGER); col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col11; return col11;
} }
...@@ -815,7 +814,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -815,7 +814,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col12 = new ColumnMetaData(); ColumnMetaData col12 = new ColumnMetaData();
col12.setColIndex(colIndex); col12.setColIndex(colIndex);
col12.setColName("REMARKS"); col12.setColName("REMARKS");
col12.setColType(Types.NCHAR); col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col12; return col12;
} }
...@@ -823,7 +822,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -823,7 +822,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col13 = new ColumnMetaData(); ColumnMetaData col13 = new ColumnMetaData();
col13.setColIndex(13); col13.setColIndex(13);
col13.setColName("COLUMN_DEF"); col13.setColName("COLUMN_DEF");
col13.setColType(Types.NCHAR); col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col13; return col13;
} }
...@@ -831,7 +830,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -831,7 +830,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col14 = new ColumnMetaData(); ColumnMetaData col14 = new ColumnMetaData();
col14.setColIndex(14); col14.setColIndex(14);
col14.setColName("SQL_DATA_TYPE"); col14.setColName("SQL_DATA_TYPE");
col14.setColType(Types.INTEGER); col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col14; return col14;
} }
...@@ -839,7 +838,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -839,7 +838,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col15 = new ColumnMetaData(); ColumnMetaData col15 = new ColumnMetaData();
col15.setColIndex(15); col15.setColIndex(15);
col15.setColName("SQL_DATETIME_SUB"); col15.setColName("SQL_DATETIME_SUB");
col15.setColType(Types.INTEGER); col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col15; return col15;
} }
...@@ -847,7 +846,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -847,7 +846,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col16 = new ColumnMetaData(); ColumnMetaData col16 = new ColumnMetaData();
col16.setColIndex(16); col16.setColIndex(16);
col16.setColName("CHAR_OCTET_LENGTH"); col16.setColName("CHAR_OCTET_LENGTH");
col16.setColType(Types.INTEGER); col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col16; return col16;
} }
...@@ -855,7 +854,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -855,7 +854,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col17 = new ColumnMetaData(); ColumnMetaData col17 = new ColumnMetaData();
col17.setColIndex(17); col17.setColIndex(17);
col17.setColName("ORDINAL_POSITION"); col17.setColName("ORDINAL_POSITION");
col17.setColType(Types.INTEGER); col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col17; return col17;
} }
...@@ -863,7 +862,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -863,7 +862,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col18 = new ColumnMetaData(); ColumnMetaData col18 = new ColumnMetaData();
col18.setColIndex(18); col18.setColIndex(18);
col18.setColName("IS_NULLABLE"); col18.setColName("IS_NULLABLE");
col18.setColType(Types.NCHAR); col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col18; return col18;
} }
...@@ -871,7 +870,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -871,7 +870,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col19 = new ColumnMetaData(); ColumnMetaData col19 = new ColumnMetaData();
col19.setColIndex(19); col19.setColIndex(19);
col19.setColName("SCOPE_CATALOG"); col19.setColName("SCOPE_CATALOG");
col19.setColType(Types.NCHAR); col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col19; return col19;
} }
...@@ -879,7 +878,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -879,7 +878,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col20 = new ColumnMetaData(); ColumnMetaData col20 = new ColumnMetaData();
col20.setColIndex(20); col20.setColIndex(20);
col20.setColName("SCOPE_SCHEMA"); col20.setColName("SCOPE_SCHEMA");
col20.setColType(Types.NCHAR); col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col20; return col20;
} }
...@@ -887,7 +886,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -887,7 +886,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col21 = new ColumnMetaData(); ColumnMetaData col21 = new ColumnMetaData();
col21.setColIndex(21); col21.setColIndex(21);
col21.setColName("SCOPE_TABLE"); col21.setColName("SCOPE_TABLE");
col21.setColType(Types.NCHAR); col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col21; return col21;
} }
...@@ -903,7 +902,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -903,7 +902,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col23 = new ColumnMetaData(); ColumnMetaData col23 = new ColumnMetaData();
col23.setColIndex(23); col23.setColIndex(23);
col23.setColName("IS_AUTOINCREMENT"); col23.setColName("IS_AUTOINCREMENT");
col23.setColType(Types.NCHAR); col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col23; return col23;
} }
...@@ -911,7 +910,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -911,7 +910,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col24 = new ColumnMetaData(); ColumnMetaData col24 = new ColumnMetaData();
col24.setColIndex(24); col24.setColIndex(24);
col24.setColName("IS_GENERATEDCOLUMN"); col24.setColName("IS_GENERATEDCOLUMN");
col24.setColType(Types.NCHAR); col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col24; return col24;
} }
...@@ -1205,7 +1204,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -1205,7 +1204,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData(); ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex); col5.setColIndex(colIndex);
col5.setColName("KEY_SEQ"); col5.setColName("KEY_SEQ");
col5.setColType(Types.SMALLINT); col5.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
return col5; return col5;
} }
...@@ -1213,7 +1212,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -1213,7 +1212,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData(); ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex); col6.setColIndex(colIndex);
col6.setColName("PK_NAME"); col6.setColName("PK_NAME");
col6.setColType(Types.NCHAR); col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6; return col6;
} }
...@@ -1275,7 +1274,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ...@@ -1275,7 +1274,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData(); ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex); col4.setColIndex(colIndex);
col4.setColName("SUPERTABLE_NAME"); col4.setColName("SUPERTABLE_NAME");
col4.setColType(Types.NCHAR); col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4; return col4;
} }
......
...@@ -16,7 +16,7 @@ package com.taosdata.jdbc; ...@@ -16,7 +16,7 @@ package com.taosdata.jdbc;
public class ColumnMetaData { public class ColumnMetaData {
private int colType = 0; private int colType = 0; //taosType
private String colName = null; private String colName = null;
private int colSize = -1; private int colSize = -1;
private int colIndex = 0; private int colIndex = 0;
......
...@@ -68,71 +68,61 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet { ...@@ -68,71 +68,61 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override @Override
public String getString(int columnIndex) throws SQLException { public String getString(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getString(columnIndex, colType);
return rowCursor.getString(columnIndex, nativeType);
} }
@Override @Override
public boolean getBoolean(int columnIndex) throws SQLException { public boolean getBoolean(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getBoolean(columnIndex, colType);
return rowCursor.getBoolean(columnIndex, nativeType);
} }
@Override @Override
public byte getByte(int columnIndex) throws SQLException { public byte getByte(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return (byte) rowCursor.getInt(columnIndex, colType);
return (byte) rowCursor.getInt(columnIndex, nativeType);
} }
@Override @Override
public short getShort(int columnIndex) throws SQLException { public short getShort(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return (short) rowCursor.getInt(columnIndex, colType);
return (short) rowCursor.getInt(columnIndex, nativeType);
} }
@Override @Override
public int getInt(int columnIndex) throws SQLException { public int getInt(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getInt(columnIndex, colType);
return rowCursor.getInt(columnIndex, nativeType);
} }
@Override @Override
public long getLong(int columnIndex) throws SQLException { public long getLong(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getLong(columnIndex, colType);
return rowCursor.getLong(columnIndex, nativeType);
} }
@Override @Override
public float getFloat(int columnIndex) throws SQLException { public float getFloat(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getFloat(columnIndex, colType);
return rowCursor.getFloat(columnIndex, nativeType);
} }
@Override @Override
public double getDouble(int columnIndex) throws SQLException { public double getDouble(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getDouble(columnIndex, colType);
return rowCursor.getDouble(columnIndex, nativeType);
} }
@Override @Override
public byte[] getBytes(int columnIndex) throws SQLException { public byte[] getBytes(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return (rowCursor.getString(columnIndex, colType)).getBytes();
return (rowCursor.getString(columnIndex, nativeType)).getBytes();
} }
@Override @Override
public Timestamp getTimestamp(int columnIndex) throws SQLException { public Timestamp getTimestamp(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); return rowCursor.getTimestamp(columnIndex, colType);
return rowCursor.getTimestamp(columnIndex, nativeType);
} }
@Override @Override
...@@ -158,8 +148,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet { ...@@ -158,8 +148,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override @Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException { public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int colType = columnMetaDataList.get(columnIndex - 1).getColType();
int nativeType = TSDBConstants.jdbcType2TaosType(colType); double value = rowCursor.getDouble(columnIndex, colType);
double value = rowCursor.getDouble(columnIndex, nativeType);
return new BigDecimal(value); return new BigDecimal(value);
} }
......
...@@ -129,8 +129,9 @@ public abstract class TSDBConstants { ...@@ -129,8 +129,9 @@ public abstract class TSDBConstants {
return Types.TIMESTAMP; return Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR; return Types.NCHAR;
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
} }
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
} }
public static String taosType2JdbcTypeName(int taosType) throws SQLException { public static String taosType2JdbcTypeName(int taosType) throws SQLException {
...@@ -160,7 +161,7 @@ public abstract class TSDBConstants { ...@@ -160,7 +161,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR"; return "NCHAR";
default: default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
} }
} }
...@@ -187,7 +188,7 @@ public abstract class TSDBConstants { ...@@ -187,7 +188,7 @@ public abstract class TSDBConstants {
case Types.NCHAR: case Types.NCHAR:
return TSDBConstants.TSDB_DATA_TYPE_NCHAR; return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
} }
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
} }
public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException { public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException {
...@@ -213,7 +214,7 @@ public abstract class TSDBConstants { ...@@ -213,7 +214,7 @@ public abstract class TSDBConstants {
case Types.NCHAR: case Types.NCHAR:
return "NCHAR"; return "NCHAR";
default: default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
} }
} }
......
...@@ -80,7 +80,8 @@ public class TSDBJNIConnector { ...@@ -80,7 +80,8 @@ public class TSDBJNIConnector {
this.taos = this.connectImp(host, port, dbName, user, password); this.taos = this.connectImp(host, port, dbName, user, password);
if (this.taos == TSDBConstants.JNI_NULL_POINTER) { if (this.taos == TSDBConstants.JNI_NULL_POINTER) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); String errMsg = this.getErrMsg(0);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, errMsg);
} }
// invoke connectImp only here // invoke connectImp only here
taosInfo.conn_open_increment(); taosInfo.conn_open_increment();
......
...@@ -34,9 +34,8 @@ public class QueryDataTest { ...@@ -34,9 +34,8 @@ public class QueryDataTest {
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))"; String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql); statement.executeUpdate(createTableSql);
} catch (SQLException e) { } catch (SQLException e) {
return; e.printStackTrace();
} }
} }
......
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.Test;
import java.sql.*;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.Properties;
public class TimeZoneTest {
private String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
@Test
public void javaTimeZone() {
LocalDateTime localDateTime = LocalDateTime.of(1970, 1, 1, 0, 0, 0);
Instant instant = localDateTime.atZone(ZoneId.of("UTC-8")).toInstant();
System.out.println("UTC-8: " + instant.getEpochSecond() + "," + instant);
instant = localDateTime.atZone(ZoneId.of("UT")).toInstant();
System.out.println("UTC: " + instant.getEpochSecond() + "," + instant);
instant = localDateTime.atZone(ZoneId.of("UTC+8")).toInstant();
System.out.println("UTC+8: " + instant.getEpochSecond() + "," + instant);
}
@Test
public void taosTimeZone() {
// given
Properties props = new Properties();
props.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
// when and then
try (Connection connection = DriverManager.getConnection(url, props)) {
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists timezone_test");
stmt.execute("create database if not exists timezone_test keep 365000");
stmt.execute("use timezone_test");
stmt.execute("create table weather(ts timestamp, temperature float)");
stmt.execute("insert into timezone_test.weather(ts, temperature) values('1970-01-01 00:00:00', 1.0)");
ResultSet rs = stmt.executeQuery("select * from timezone_test.weather");
while (rs.next()) {
Timestamp ts = rs.getTimestamp("ts");
System.out.println("ts: " + ts.getTime() + "," + ts);
}
stmt.execute("insert into timezone_test.weather(ts, temperature, humidity) values('1970-01-02 00:00:00', 1.0, 2.0)");
rs = stmt.executeQuery("select * from timezone_test.weather");
while (rs.next()) {
Timestamp ts = rs.getTimestamp("ts");
System.out.println("ts: " + ts.getTime() + "," + ts);
}
stmt.execute("drop database if exists timezone_test");
stmt.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main package main
import ( import (
......
# TDengine Connector for Python # TDengine Connector for Python
[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications. [TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
## Install ## Install
...@@ -11,8 +12,417 @@ pip install ./TDengine/src/connector/python ...@@ -11,8 +12,417 @@ pip install ./TDengine/src/connector/python
## Source Code ## Source Code
[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python). [TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
## License - AGPL ## Examples
### Query with PEP-249 API
```python
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)
cursor.close()
conn.close()
```
### Query with objective API
```python
import taos
conn = taos.connect()
conn.exec("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
result.close()
conn.exec("drop database pytest")
conn.close()
```
### Query with async API
```python
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
conn.close()
if __name__ == "__main__":
test_query(connect())
```
### Statement API - Bind row after row
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Bind multi rows
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Subscribe
```python
import taos
conn = taos.connect()
dbname = "pytest_taos_subscribe_callback"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
print("# consume with a stop condition")
for i in range(10):
conn.exec("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
### Statement API - Subscribe asynchronously with callback
```python
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
time.sleep(0.7)
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())
```
### Statement API - Stream
```python
from taos import *
from ctypes import *
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(ctypes.Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())
```
### Insert with line protocol
```python
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
result.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
## License - AGPL-3.0
Keep same with [TDengine](https://github.com/taosdata/TDengine). Keep same with [TDengine](https://github.com/taosdata/TDengine).
# encoding:UTF-8
from taos import *
conn = connect()
dbname = "pytest_taos_stmt_multi"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
\ No newline at end of file
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
# No need to explicitly close, but ok for you
# result.close()
result = conn.query("select * from log")
for row in result:
print(row)
# No need to explicitly close, but ok for you
# result.close()
# stmt.close()
# conn.close()
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
]
conn.insert_lines(lines)
print("inserted")
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
# should explicitly close the result in fetch completed or cause error
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
# explicitly close result while query failed
result.close()
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
# conn.close()
if __name__ == "__main__":
test_query(connect())
\ No newline at end of file
import taos
conn = taos.connect()
conn.execute("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
conn.execute("drop database pytest")
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
time.sleep(0.7)
# sub.close()
conn.execute("drop database if exists %s" % dbname)
# conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
# conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())
import taos
import random
conn = taos.connect()
dbname = "pytest_taos_subscribe"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.execute("insert into log values(now, %d)" % i)
sub = conn.subscribe(False, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
sub.close(True)
print("# keep progress consume")
sub = conn.subscribe(False, "test", "select * from log", 1000)
result = sub.consume()
rows = result.fetch_all()
# consume from latest subscription needs root privilege(for /var/lib/taos).
assert result.row_count == 0
print("## consumed ", len(rows), "rows")
print("# consume with a stop condition")
for i in range(10):
conn.execute("insert into log values(now, %d)" % random.randint(0, 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
# sub.close()
conn.execute("drop database if exists %s" % dbname)
# conn.close()
[tool.poetry]
name = "taos"
version = "2.1.0"
description = "TDengine connector for python"
authors = ["Taosdata Inc. <support@taosdata.com>"]
license = "AGPL-3.0"
readme = "README.md"
[tool.poetry.dependencies]
python = "^2.7 || ^3.4"
typing = "*"
[tool.poetry.dev-dependencies]
pytest = [
{ version = "^4.6", python = "^2.7" },
{ version = "^6.2", python = "^3.7" }
]
pdoc = { version = "^7.1.1", python = "^3.7" }
mypy = { version = "^0.910", python = "^3.6" }
black = { version = "^21.7b0", python = "^3.6" }
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 119
...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh: ...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.11", version="2.1.0",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",
......
# encoding:UTF-8
"""
# TDengine Connector for Python
from .connection import TDengineConnection [TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
from .cursor import TDengineCursor using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
## Install
```sh
git clone --depth 1 https://github.com/taosdata/TDengine.git
pip install ./TDengine/src/connector/python
```
## Source Code
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
## Examples
### Query with PEP-249 API
```python
import taos
conn = taos.connect()
cursor = conn.cursor()
cursor.execute("show databases")
results = cursor.fetchall()
for row in results:
print(row)
cursor.close()
conn.close()
```
### Query with objective API
```python
import taos
conn = taos.connect()
conn.exec("create database if not exists pytest")
result = conn.query("show databases")
num_of_fields = result.field_count
for field in result.fields:
print(field)
for row in result:
print(row)
result.close()
conn.exec("drop database pytest")
conn.close()
```
### Query with async API
```python
from taos import *
from ctypes import *
import time
def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result)
if num_of_rows == 0:
print("fetching completed")
p.contents.done = True
result.close()
return
if num_of_rows < 0:
p.contents.done = True
result.check_error(num_of_rows)
result.close()
return None
for row in result.rows_iter(num_of_rows):
# print(row)
None
p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param)
def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None
if p_result == None:
return
result = TaosResult(p_result)
if code == 0:
result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code)
class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done)
def test_query(conn):
# type: (TaosConnection) -> None
counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done:
print("wait query callback")
time.sleep(1)
print(counter)
conn.close()
if __name__ == "__main__":
test_query(connect())
```
### Statement API - Bind row after row
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_bind_params(16)
params[0].timestamp(1626861392589)
params[1].bool(True)
params[2].null()
params[3].tinyint(2)
params[4].smallint(3)
params[5].int(4)
params[6].bigint(5)
params[7].tinyint_unsigned(6)
params[8].smallint_unsigned(7)
params[9].int_unsigned(8)
params[10].bigint_unsigned(9)
params[11].float(10.1)
params[12].double(10.11)
params[13].binary("hello")
params[14].nchar("stmt")
params[15].timestamp(1626861392589)
stmt.bind_param(params)
params[0].timestamp(1626861392590)
params[15].null()
stmt.bind_param(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 2
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Bind multi rows
```python
from taos import *
conn = connect()
dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
)
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
stmt.bind_param_batch(params)
stmt.execute()
result = stmt.use_result()
assert result.affected_rows == 3
result.close()
result = conn.query("select * from log")
for row in result:
print(row)
result.close()
stmt.close()
conn.close()
```
### Statement API - Subscribe
```python
import taos
conn = taos.connect()
dbname = "pytest_taos_subscribe_callback"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin")
for ts, n in sub.consume():
print(ts, n)
print("# consume new data")
for i in range(5):
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume()
for ts, n in result:
print(ts, n)
print("# consume with a stop condition")
for i in range(10):
conn.exec("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume()
try:
ts, n = next(result)
print(ts, n)
if n > 5:
result.stop_query()
print("## stopped")
break
except StopIteration:
continue
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
### Statement API - Subscribe asynchronously with callback
```python
from taos import *
from ctypes import *
import time
def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback")
result = TaosResult(p_result)
result.check_error(errno)
for row in result.rows_iter():
ts, n = row()
print(ts, n)
def test_subscribe_callback(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10):
conn.exec("insert into log values(now, %d)" % i)
time.sleep(0.7)
sub.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_subscribe_callback(connect())
```
### Statement API - Stream
```python
from taos import *
from ctypes import *
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(ctypes.Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.exec("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())
```
### Insert with line protocol
```python
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.exec("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
print("inserted")
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
]
conn.insert_lines(lines)
result = conn.query("show tables")
for row in result:
print(row)
result.close()
conn.exec("drop database if exists %s" % dbname)
conn.close()
```
## License - AGPL-3.0
Keep same with [TDengine](https://github.com/taosdata/TDengine).
"""
from .connection import TaosConnection
# For some reason, the following is needed for VS Code (through PyLance) to # For some reason, the following is needed for VS Code (through PyLance) to
# recognize that "error" is a valid module of the "taos" package. # recognize that "error" is a valid module of the "taos" package.
from .error import ProgrammingError from .error import *
from .bind import *
from .field import *
from .cursor import *
from .result import *
from .statement import *
from .subscription import *
try:
import importlib.metadata
__version__ = importlib.metadata.version("taos")
except:
None
# Globals # Globals
threadsafety = 0 threadsafety = 0
paramstyle = 'pyformat' paramstyle = "pyformat"
__all__ = ['connection', 'cursor']
__all__ = [
# functions
"connect",
"new_bind_param",
"new_bind_params",
"new_multi_binds",
"new_multi_bind",
# objects
"TaosBind",
"TaosConnection",
"TaosCursor",
"TaosResult",
"TaosRows",
"TaosRow",
"TaosStmt",
"PrecisionEnum",
]
def connect(*args, **kwargs): def connect(*args, **kwargs):
""" Function to return a TDengine connector object # type: (..., ...) -> TaosConnection
"""Function to return a TDengine connector object
Current supporting keyword parameters: Current supporting keyword parameters:
@dsn: Data source name as string @dsn: Data source name as string
...@@ -25,4 +483,4 @@ def connect(*args, **kwargs): ...@@ -25,4 +483,4 @@ def connect(*args, **kwargs):
@rtype: TDengineConnector @rtype: TDengineConnector
""" """
return TDengineConnection(*args, **kwargs) return TaosConnection(*args, **kwargs)
# encoding:UTF-8
import ctypes
from .constants import FieldType
from .error import *
from .precision import *
from datetime import datetime
from ctypes import *
import sys
_datetime_epoch = datetime.utcfromtimestamp(0)
def _is_not_none(obj):
obj != None
class TaosBind(ctypes.Structure):
_fields_ = [
("buffer_type", c_int),
("buffer", c_void_p),
("buffer_length", c_size_t),
("length", POINTER(c_size_t)),
("is_null", POINTER(c_int)),
("is_unsigned", c_int),
("error", POINTER(c_int)),
("u", c_int64),
("allocated", c_int),
]
def null(self):
self.buffer_type = FieldType.C_NULL
self.is_null = pointer(c_int(1))
def bool(self, value):
self.buffer_type = FieldType.C_BOOL
self.buffer = cast(pointer(c_bool(value)), c_void_p)
self.buffer_length = sizeof(c_bool)
def tinyint(self, value):
self.buffer_type = FieldType.C_TINYINT
self.buffer = cast(pointer(c_int8(value)), c_void_p)
self.buffer_length = sizeof(c_int8)
def smallint(self, value):
self.buffer_type = FieldType.C_SMALLINT
self.buffer = cast(pointer(c_int16(value)), c_void_p)
self.buffer_length = sizeof(c_int16)
def int(self, value):
self.buffer_type = FieldType.C_INT
self.buffer = cast(pointer(c_int32(value)), c_void_p)
self.buffer_length = sizeof(c_int32)
def bigint(self, value):
self.buffer_type = FieldType.C_BIGINT
self.buffer = cast(pointer(c_int64(value)), c_void_p)
self.buffer_length = sizeof(c_int64)
def float(self, value):
self.buffer_type = FieldType.C_FLOAT
self.buffer = cast(pointer(c_float(value)), c_void_p)
self.buffer_length = sizeof(c_float)
def double(self, value):
self.buffer_type = FieldType.C_DOUBLE
self.buffer = cast(pointer(c_double(value)), c_void_p)
self.buffer_length = sizeof(c_double)
def binary(self, value):
buffer = None
length = 0
if isinstance(value, str):
bytes = value.encode("utf-8")
buffer = create_string_buffer(bytes)
length = len(bytes)
else:
buffer = value
length = len(value)
self.buffer_type = FieldType.C_BINARY
self.buffer = cast(buffer, c_void_p)
self.buffer_length = length
self.length = pointer(c_size_t(self.buffer_length))
def timestamp(self, value, precision=PrecisionEnum.Milliseconds):
if type(value) is datetime:
if precision == PrecisionEnum.Milliseconds:
ts = int(round((value - _datetime_epoch).total_seconds() * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round((value - _datetime_epoch).total_seconds() * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif type(value) is float:
if precision == PrecisionEnum.Milliseconds:
ts = int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round(value * 10000000))
else:
raise PrecisionError("time float do not support nanosecond precision")
elif isinstance(value, int) and not isinstance(value, bool):
ts = value
elif isinstance(value, str):
value = datetime.fromisoformat(value)
if precision == PrecisionEnum.Milliseconds:
ts = int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
ts = int(round(value * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
self.buffer_type = FieldType.C_TIMESTAMP
self.buffer = cast(pointer(c_int64(ts)), c_void_p)
self.buffer_length = sizeof(c_int64)
def nchar(self, value):
buffer = None
length = 0
if isinstance(value, str):
bytes = value.encode("utf-8")
buffer = create_string_buffer(bytes)
length = len(bytes)
else:
buffer = value
length = len(value)
self.buffer_type = FieldType.C_NCHAR
self.buffer = cast(buffer, c_void_p)
self.buffer_length = length
self.length = pointer(c_size_t(self.buffer_length))
def tinyint_unsigned(self, value):
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
self.buffer = cast(pointer(c_uint8(value)), c_void_p)
self.buffer_length = sizeof(c_uint8)
def smallint_unsigned(self, value):
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
self.buffer = cast(pointer(c_uint16(value)), c_void_p)
self.buffer_length = sizeof(c_uint16)
def int_unsigned(self, value):
self.buffer_type = FieldType.C_INT_UNSIGNED
self.buffer = cast(pointer(c_uint32(value)), c_void_p)
self.buffer_length = sizeof(c_uint32)
def bigint_unsigned(self, value):
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
self.buffer = cast(pointer(c_uint64(value)), c_void_p)
self.buffer_length = sizeof(c_uint64)
def _datetime_to_timestamp(value, precision):
# type: (datetime | float | int | str | c_int64, PrecisionEnum) -> c_int64
if value is None:
return FieldType.C_BIGINT_NULL
if type(value) is datetime:
if precision == PrecisionEnum.Milliseconds:
return int(round((value - _datetime_epoch).total_seconds() * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round((value - _datetime_epoch).total_seconds() * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif type(value) is float:
if precision == PrecisionEnum.Milliseconds:
return int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round(value * 10000000))
else:
raise PrecisionError("time float do not support nanosecond precision")
elif isinstance(value, int) and not isinstance(value, bool):
return c_int64(value)
elif isinstance(value, str):
value = datetime.fromisoformat(value)
if precision == PrecisionEnum.Milliseconds:
return int(round(value * 1000))
elif precision == PrecisionEnum.Microseconds:
return int(round(value * 10000000))
else:
raise PrecisionError("datetime do not support nanosecond precision")
elif isinstance(value, c_int64):
return value
return FieldType.C_BIGINT_NULL
class TaosMultiBind(ctypes.Structure):
_fields_ = [
("buffer_type", c_int),
("buffer", c_void_p),
("buffer_length", c_size_t),
("length", POINTER(c_int32)),
("is_null", c_char_p),
("num", c_int),
]
def null(self, num):
self.buffer_type = FieldType.C_NULL
self.is_null = cast((c_char * num)(*[1 for _ in range(num)]), c_char_p)
self.buffer = c_void_p(None)
self.num = num
def bool(self, values):
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BOOL_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
self.buffer_type = FieldType.C_BOOL
self.buffer_length = sizeof(c_bool)
def tinyint(self, values):
self.buffer_type = FieldType.C_TINYINT
self.buffer_length = sizeof(c_int8)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def smallint(self, values):
self.buffer_type = FieldType.C_SMALLINT
self.buffer_length = sizeof(c_int16)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int16 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def int(self, values):
self.buffer_type = FieldType.C_INT
self.buffer_length = sizeof(c_int32)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int32 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def bigint(self, values):
self.buffer_type = FieldType.C_BIGINT
self.buffer_length = sizeof(c_int64)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int64 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def float(self, values):
self.buffer_type = FieldType.C_FLOAT
self.buffer_length = sizeof(c_float)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_float * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_FLOAT_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def double(self, values):
self.buffer_type = FieldType.C_DOUBLE
self.buffer_length = sizeof(c_double)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_double * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_DOUBLE_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def binary(self, values):
self.num = len(values)
self.buffer = cast(c_char_p("".join(filter(_is_not_none, values)).encode("utf-8")), c_void_p)
self.length = (c_int * len(values))(*[len(value) if value is not None else 0 for value in values])
self.buffer_type = FieldType.C_BINARY
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_int64 * len(values)
buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
self.buffer_type = FieldType.C_TIMESTAMP
self.buffer = cast(buffer, c_void_p)
self.buffer_length = sizeof(c_int64)
self.num = len(values)
def nchar(self, values):
# type: (list[str]) -> None
if sys.version_info < (3, 0):
_bytes = [bytes(value) if value is not None else None for value in values]
buffer_length = max(len(b) + 1 for b in _bytes if b is not None)
buffers = [
create_string_buffer(b, buffer_length) if b is not None else create_string_buffer(buffer_length)
for b in _bytes
]
buffer_all = b''.join(v[:] for v in buffers)
self.buffer = cast(c_char_p(buffer_all), c_void_p)
else:
_bytes = [value.encode("utf-8") if value is not None else None for value in values]
buffer_length = max(len(b) for b in _bytes if b is not None)
self.buffer = cast(
c_char_p(
b"".join(
[
create_string_buffer(b, buffer_length)
if b is not None
else create_string_buffer(buffer_length)
for b in _bytes
]
)
),
c_void_p,
)
self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes])
self.buffer_length = buffer_length
self.num = len(values)
self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
self.buffer_type = FieldType.C_NCHAR
def tinyint_unsigned(self, values):
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
self.buffer_length = sizeof(c_uint8)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint8 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def smallint_unsigned(self, values):
self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
self.buffer_length = sizeof(c_uint16)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint16 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def int_unsigned(self, values):
self.buffer_type = FieldType.C_INT_UNSIGNED
self.buffer_length = sizeof(c_uint32)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint32 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_INT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def bigint_unsigned(self, values):
self.buffer_type = FieldType.C_BIGINT_UNSIGNED
self.buffer_length = sizeof(c_uint64)
try:
buffer = cast(values, c_void_p)
except:
buffer_type = c_uint64 * len(values)
try:
buffer = buffer_type(*values)
except:
buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_UNSIGNED_NULL for v in values])
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
def new_bind_param():
# type: () -> TaosBind
return TaosBind()
def new_bind_params(size):
# type: (int) -> Array[TaosBind]
return (TaosBind * size)()
def new_multi_bind():
# type: () -> TaosMultiBind
return TaosMultiBind()
def new_multi_binds(size):
# type: (int) -> Array[TaosMultiBind]
return (TaosMultiBind * size)()
from .cursor import TDengineCursor # encoding:UTF-8
from .subscription import TDengineSubscription from types import FunctionType
from .cinterface import CTaosInterface from .cinterface import *
from .cursor import TaosCursor
from .subscription import TaosSubscription
from .statement import TaosStmt
from .stream import TaosStream
from .result import *
class TDengineConnection(object): class TaosConnection(object):
""" TDengine connection object """TDengine connection object"""
"""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self._conn = None self._conn = None
...@@ -21,63 +25,130 @@ class TDengineConnection(object): ...@@ -21,63 +25,130 @@ class TDengineConnection(object):
def config(self, **kwargs): def config(self, **kwargs):
# host # host
if 'host' in kwargs: if "host" in kwargs:
self._host = kwargs['host'] self._host = kwargs["host"]
# user # user
if 'user' in kwargs: if "user" in kwargs:
self._user = kwargs['user'] self._user = kwargs["user"]
# password # password
if 'password' in kwargs: if "password" in kwargs:
self._password = kwargs['password'] self._password = kwargs["password"]
# database # database
if 'database' in kwargs: if "database" in kwargs:
self._database = kwargs['database'] self._database = kwargs["database"]
# port # port
if 'port' in kwargs: if "port" in kwargs:
self._port = kwargs['port'] self._port = kwargs["port"]
# config # config
if 'config' in kwargs: if "config" in kwargs:
self._config = kwargs['config'] self._config = kwargs["config"]
self._chandle = CTaosInterface(self._config) self._chandle = CTaosInterface(self._config)
self._conn = self._chandle.connect( self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port)
self._host,
self._user,
self._password,
self._database,
self._port)
def close(self): def close(self):
"""Close current connection. """Close current connection."""
""" if self._conn:
return CTaosInterface.close(self._conn) taos_close(self._conn)
self._conn = None
def subscribe(self, restart, topic, sql, interval): @property
"""Create a subscription. def client_info(self):
""" # type: () -> str
return taos_get_client_info()
@property
def server_info(self):
# type: () -> str
return taos_get_server_info(self._conn)
def select_db(self, database):
# type: (str) -> None
taos_select_db(self._conn, database)
def execute(self, sql):
# type: (str) -> None
"""Simplely execute sql ignoring the results"""
res = taos_query(self._conn, sql)
taos_free_result(res)
def query(self, sql):
# type: (str) -> TaosResult
result = taos_query(self._conn, sql)
return TaosResult(result, True, self)
def query_a(self, sql, callback, param):
# type: (str, async_query_callback_type, c_void_p) -> None
"""Asynchronously query a sql with callback function"""
taos_query_a(self._conn, sql, callback, param)
def subscribe(self, restart, topic, sql, interval, callback=None, param=None):
# type: (bool, str, str, int, subscribe_callback_type, c_void_p) -> TaosSubscription
"""Create a subscription."""
if self._conn is None: if self._conn is None:
return None return None
sub = CTaosInterface.subscribe( sub = taos_subscribe(self._conn, restart, topic, sql, interval, callback, param)
self._conn, restart, topic, sql, interval) return TaosSubscription(sub, callback != None)
return TDengineSubscription(sub)
def insertLines(self, lines): def statement(self, sql=None):
""" # type: (str | None) -> TaosStmt
insert lines through line protocol
"""
if self._conn is None: if self._conn is None:
return None return None
return CTaosInterface.insertLines(self._conn, lines) stmt = taos_stmt_init(self._conn)
if sql != None:
taos_stmt_prepare(stmt, sql)
return TaosStmt(stmt)
def load_table_info(self, tables):
# type: (str) -> None
taos_load_table_info(self._conn, tables)
def stream(self, sql, callback, stime=0, param=None, callback2=None):
# type: (str, Callable[[Any, TaosResult, TaosRows], None], int, Any, c_void_p) -> TaosStream
# cb = cast(callback, stream_callback_type)
# ref = byref(cb)
stream = taos_open_stream(self._conn, sql, callback, stime, param, callback2)
return TaosStream(stream)
def insert_lines(self, lines):
# type: (list[str]) -> None
"""Line protocol and schemaless support
## Example
```python
import taos
conn = taos.connect()
conn.exec("drop database if exists test")
conn.select_db("test")
lines = [
'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532',
]
conn.insert_lines(lines)
```
## Exception
```python
try:
conn.insert_lines(lines)
except SchemalessError as err:
print(err)
```
"""
return taos_insert_lines(self._conn, lines)
def cursor(self): def cursor(self):
"""Return a new Cursor object using the connection. # type: () -> TaosCursor
""" """Return a new Cursor object using the connection."""
return TDengineCursor(self) return TaosCursor(self)
def commit(self): def commit(self):
"""Commit any pending transaction to the database. """Commit any pending transaction to the database.
...@@ -87,17 +158,18 @@ class TDengineConnection(object): ...@@ -87,17 +158,18 @@ class TDengineConnection(object):
pass pass
def rollback(self): def rollback(self):
"""Void functionality """Void functionality"""
"""
pass pass
def clear_result_set(self): def clear_result_set(self):
"""Clear unused result set on this connection. """Clear unused result set on this connection."""
"""
pass pass
def __del__(self):
self.close()
if __name__ == "__main__": if __name__ == "__main__":
conn = TDengineConnection(host='192.168.1.107') conn = TaosConnection()
conn.close() conn.close()
print("Hello world") print("Hello world")
# encoding:UTF-8
"""Constants in TDengine python """Constants in TDengine python
""" """
from .dbapi import *
class FieldType(object): class FieldType(object):
"""TDengine Field Types """TDengine Field Types"""
"""
# type_code # type_code
C_NULL = 0 C_NULL = 0
C_BOOL = 1 C_BOOL = 1
...@@ -34,9 +33,9 @@ class FieldType(object): ...@@ -34,9 +33,9 @@ class FieldType(object):
C_INT_UNSIGNED_NULL = 4294967295 C_INT_UNSIGNED_NULL = 4294967295
C_BIGINT_NULL = -9223372036854775808 C_BIGINT_NULL = -9223372036854775808
C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_BIGINT_UNSIGNED_NULL = 18446744073709551615
C_FLOAT_NULL = float('nan') C_FLOAT_NULL = float("nan")
C_DOUBLE_NULL = float('nan') C_DOUBLE_NULL = float("nan")
C_BINARY_NULL = bytearray([int('0xff', 16)]) C_BINARY_NULL = bytearray([int("0xff", 16)])
# Timestamp precision definition # Timestamp precision definition
C_TIMESTAMP_MILLI = 0 C_TIMESTAMP_MILLI = 0
C_TIMESTAMP_MICRO = 1 C_TIMESTAMP_MICRO = 1
......
from .cinterface import CTaosInterface # encoding:UTF-8
from .cinterface import *
from .error import * from .error import *
from .constants import FieldType from .constants import FieldType
from .result import *
# querySeqNum = 0
class TaosCursor(object):
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation. """Database cursor which is used to manage the context of a fetch operation.
Attributes: Attributes:
.description: Read-only attribute consists of 7-item sequences: .description: Read-only attribute consists of 7-item sequences:
> name (mondatory) > name (mandatory)
> type_code (mondatory) > type_code (mandatory)
> display_size > display_size
> internal_size > internal_size
> precision > precision
...@@ -55,8 +55,7 @@ class TDengineCursor(object): ...@@ -55,8 +55,7 @@ class TDengineCursor(object):
raise OperationalError("Invalid use of fetch iterator") raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter: if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchRow( block, self._block_rows = taos_fetch_row(self._result, self._fields)
self._result, self._fields)
if self._block_rows == 0: if self._block_rows == 0:
raise StopIteration raise StopIteration
self._block = list(map(tuple, zip(*block))) self._block = list(map(tuple, zip(*block)))
...@@ -69,20 +68,17 @@ class TDengineCursor(object): ...@@ -69,20 +68,17 @@ class TDengineCursor(object):
@property @property
def description(self): def description(self):
"""Return the description of the object. """Return the description of the object."""
"""
return self._description return self._description
@property @property
def rowcount(self): def rowcount(self):
"""Return the rowcount of the object """Return the rowcount of the object"""
"""
return self._rowcount return self._rowcount
@property @property
def affected_rows(self): def affected_rows(self):
"""Return the rowcount of insertion """Return the rowcount of insertion"""
"""
return self._affected_rows return self._affected_rows
def callproc(self, procname, *args): def callproc(self, procname, *args):
...@@ -96,8 +92,7 @@ class TDengineCursor(object): ...@@ -96,8 +92,7 @@ class TDengineCursor(object):
self._logfile = logfile self._logfile = logfile
def close(self): def close(self):
"""Close the cursor. """Close the cursor."""
"""
if self._connection is None: if self._connection is None:
return False return False
...@@ -107,8 +102,7 @@ class TDengineCursor(object): ...@@ -107,8 +102,7 @@ class TDengineCursor(object):
return True return True
def execute(self, operation, params=None): def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command). """Prepare and execute a database operation (query or command)."""
"""
if not operation: if not operation:
return None return None
...@@ -124,104 +118,91 @@ class TDengineCursor(object): ...@@ -124,104 +118,91 @@ class TDengineCursor(object):
# global querySeqNum # global querySeqNum
# querySeqNum += 1 # querySeqNum += 1
# localSeqNum = querySeqNum # avoid raice condition # localSeqNum = querySeqNum # avoid race condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
self._result = CTaosInterface.query(self._connection._conn, stmt) self._result = taos_query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum)) # print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile): if self._logfile:
with open(self._logfile, "a") as logfile: with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation) logfile.write("%s;\n" % operation)
errno = CTaosInterface.libtaos.taos_errno(self._result) if taos_field_count(self._result) == 0:
if errno == 0: affected_rows = taos_affected_rows(self._result)
if CTaosInterface.fieldsCount(self._result) == 0: self._affected_rows += affected_rows
self._affected_rows += CTaosInterface.affectedRows( return affected_rows
self._result)
return CTaosInterface.affectedRows(self._result)
else: else:
self._fields = CTaosInterface.useResult( self._fields = taos_fetch_fields(self._result)
self._result)
return self._handle_result() return self._handle_result()
else:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
def executemany(self, operation, seq_of_parameters): def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters."""
"""
pass pass
def fetchone(self): def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available."""
"""
pass pass
def fetchmany(self): def fetchmany(self):
pass pass
def istype(self, col, dataType): def istype(self, col, dataType):
if (dataType.upper() == "BOOL"): if dataType.upper() == "BOOL":
if (self._description[col][1] == FieldType.C_BOOL): if self._description[col][1] == FieldType.C_BOOL:
return True return True
if (dataType.upper() == "TINYINT"): if dataType.upper() == "TINYINT":
if (self._description[col][1] == FieldType.C_TINYINT): if self._description[col][1] == FieldType.C_TINYINT:
return True return True
if (dataType.upper() == "TINYINT UNSIGNED"): if dataType.upper() == "TINYINT UNSIGNED":
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): if self._description[col][1] == FieldType.C_TINYINT_UNSIGNED:
return True return True
if (dataType.upper() == "SMALLINT"): if dataType.upper() == "SMALLINT":
if (self._description[col][1] == FieldType.C_SMALLINT): if self._description[col][1] == FieldType.C_SMALLINT:
return True return True
if (dataType.upper() == "SMALLINT UNSIGNED"): if dataType.upper() == "SMALLINT UNSIGNED":
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): if self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED:
return True return True
if (dataType.upper() == "INT"): if dataType.upper() == "INT":
if (self._description[col][1] == FieldType.C_INT): if self._description[col][1] == FieldType.C_INT:
return True return True
if (dataType.upper() == "INT UNSIGNED"): if dataType.upper() == "INT UNSIGNED":
if (self._description[col][1] == FieldType.C_INT_UNSIGNED): if self._description[col][1] == FieldType.C_INT_UNSIGNED:
return True return True
if (dataType.upper() == "BIGINT"): if dataType.upper() == "BIGINT":
if (self._description[col][1] == FieldType.C_BIGINT): if self._description[col][1] == FieldType.C_BIGINT:
return True return True
if (dataType.upper() == "BIGINT UNSIGNED"): if dataType.upper() == "BIGINT UNSIGNED":
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): if self._description[col][1] == FieldType.C_BIGINT_UNSIGNED:
return True return True
if (dataType.upper() == "FLOAT"): if dataType.upper() == "FLOAT":
if (self._description[col][1] == FieldType.C_FLOAT): if self._description[col][1] == FieldType.C_FLOAT:
return True return True
if (dataType.upper() == "DOUBLE"): if dataType.upper() == "DOUBLE":
if (self._description[col][1] == FieldType.C_DOUBLE): if self._description[col][1] == FieldType.C_DOUBLE:
return True return True
if (dataType.upper() == "BINARY"): if dataType.upper() == "BINARY":
if (self._description[col][1] == FieldType.C_BINARY): if self._description[col][1] == FieldType.C_BINARY:
return True return True
if (dataType.upper() == "TIMESTAMP"): if dataType.upper() == "TIMESTAMP":
if (self._description[col][1] == FieldType.C_TIMESTAMP): if self._description[col][1] == FieldType.C_TIMESTAMP:
return True return True
if (dataType.upper() == "NCHAR"): if dataType.upper() == "NCHAR":
if (self._description[col][1] == FieldType.C_NCHAR): if self._description[col][1] == FieldType.C_NCHAR:
return True return True
return False return False
def fetchall_row(self): def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation."""
"""
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))] buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0 self._rowcount = 0
while True: while True:
block, num_of_fields = CTaosInterface.fetchRow( block, num_of_fields = taos_fetch_row(self._result, self._fields)
self._result, self._fields) errno = taos_errno(self._result)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0: if errno != 0:
raise ProgrammingError( raise ProgrammingError(taos_errstr(self._result), errno)
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0: if num_of_fields == 0:
break break
self._rowcount += num_of_fields self._rowcount += num_of_fields
...@@ -230,19 +211,16 @@ class TDengineCursor(object): ...@@ -230,19 +211,16 @@ class TDengineCursor(object):
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def fetchall(self): def fetchall(self):
if self._result is None or self._fields is None: if self._result is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
fields = self._fields if self._fields is not None else taos_fetch_fields(self._result)
buffer = [[] for i in range(len(self._fields))] buffer = [[] for i in range(len(fields))]
self._rowcount = 0 self._rowcount = 0
while True: while True:
block, num_of_fields = CTaosInterface.fetchBlock( block, num_of_fields = taos_fetch_block(self._result, self._fields)
self._result, self._fields) errno = taos_errno(self._result)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0: if errno != 0:
raise ProgrammingError( raise ProgrammingError(taos_errstr(self._result), errno)
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0: if num_of_fields == 0:
break break
self._rowcount += num_of_fields self._rowcount += num_of_fields
...@@ -250,9 +228,12 @@ class TDengineCursor(object): ...@@ -250,9 +228,12 @@ class TDengineCursor(object):
buffer[i].extend(block[i]) buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def stop_query(self):
if self._result != None:
taos_stop_query(self._result)
def nextset(self): def nextset(self):
""" """ """
"""
pass pass
def setinputsize(self, sizes): def setinputsize(self, sizes):
...@@ -262,12 +243,11 @@ class TDengineCursor(object): ...@@ -262,12 +243,11 @@ class TDengineCursor(object):
pass pass
def _reset_result(self): def _reset_result(self):
"""Reset the result to unused version. """Reset the result to unused version."""
"""
self._description = [] self._description = []
self._rowcount = -1 self._rowcount = -1
if self._result is not None: if self._result is not None:
CTaosInterface.freeResult(self._result) taos_free_result(self._result)
self._result = None self._result = None
self._fields = None self._fields = None
self._block = None self._block = None
...@@ -276,11 +256,12 @@ class TDengineCursor(object): ...@@ -276,11 +256,12 @@ class TDengineCursor(object):
self._affected_rows = 0 self._affected_rows = 0
def _handle_result(self): def _handle_result(self):
"""Handle the return result from query. """Handle the return result from query."""
"""
self._description = [] self._description = []
for ele in self._fields: for ele in self._fields:
self._description.append( self._description.append((ele["name"], ele["type"], None, None, None, None, False))
(ele['name'], ele['type'], None, None, None, None, False))
return self._result return self._result
def __del__(self):
self.close()
"""Type Objects and Constructors.
"""
import time
import datetime
class DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __com__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DataFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
# ROWID = DBAPITypeObject()
# encoding:UTF-8
"""Python exceptions """Python exceptions
""" """
class Error(Exception): class Error(Exception):
def __init__(self, msg=None, errno=None): def __init__(self, msg=None, errno=0xffff):
self.msg = msg self.msg = msg
self._full_msg = self.msg
self.errno = errno self.errno = errno
self._full_msg = "[0x%04x]: %s" % (self.errno & 0xffff, self.msg)
def __str__(self): def __str__(self):
return self._full_msg return self._full_msg
class Warning(Exception): class Warning(Exception):
"""Exception raised for important warnings like data truncations while inserting. """Exception raised for important warnings like data truncations while inserting."""
"""
pass pass
class InterfaceError(Error): class InterfaceError(Error):
"""Exception raised for errors that are related to the database interface rather than the database itself. """Exception raised for errors that are related to the database interface rather than the database itself."""
"""
pass pass
class DatabaseError(Error): class DatabaseError(Error):
"""Exception raised for errors that are related to the database. """Exception raised for errors that are related to the database."""
"""
pass pass
class ConnectionError(Error):
"""Exceptin raised for connection failed"""
pass
class DataError(DatabaseError): class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range."""
"""
pass pass
class OperationalError(DatabaseError): class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer"""
"""
pass pass
class IntegrityError(DatabaseError): class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database is affected. """Exception raised when the relational integrity of the database is affected."""
"""
pass pass
class InternalError(DatabaseError): class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal error. """Exception raised when the database encounters an internal error."""
"""
pass pass
class ProgrammingError(DatabaseError): class ProgrammingError(DatabaseError):
"""Exception raised for programming errors. """Exception raised for programming errors."""
"""
pass pass
class NotSupportedError(DatabaseError): class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used which is not supported by the database,. """Exception raised in case a method or database API was used which is not supported by the database,."""
"""
pass
class StatementError(DatabaseError):
"""Exception raised in STMT API."""
pass
class ResultError(DatabaseError):
"""Result related APIs."""
pass
class LinesError(DatabaseError):
"""taos_insert_lines errors."""
pass pass
\ No newline at end of file
# encoding:UTF-8
import ctypes
import math
import datetime
from ctypes import *
from .constants import FieldType
from .error import *
_datetime_epoch = datetime.datetime.fromtimestamp(0)
def _convert_millisecond_to_datetime(milli):
return _datetime_epoch + datetime.timedelta(seconds=milli / 1000.0)
def _convert_microsecond_to_datetime(micro):
return _datetime_epoch + datetime.timedelta(seconds=micro / 1000000.0)
def _convert_nanosecond_to_datetime(nanosec):
return nanosec
def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bool row to python row"""
_timestamp_converter = _convert_millisecond_to_datetime
if precision == FieldType.C_TIMESTAMP_MILLI:
_timestamp_converter = _convert_millisecond_to_datetime
elif precision == FieldType.C_TIMESTAMP_MICRO:
_timestamp_converter = _convert_microsecond_to_datetime
elif precision == FieldType.C_TIMESTAMP_NANO:
_timestamp_converter = _convert_nanosecond_to_datetime
else:
raise DatabaseError("Unknown precision returned from database")
return [
None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele)
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bool row to python row"""
return [
None if ele == FieldType.C_BOOL_NULL else bool(ele)
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C tinyint row to python row"""
return [
None if ele == FieldType.C_TINYINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
]
def _crow_tinyint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C tinyint row to python row"""
return [
None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte))[: abs(num_of_rows)]
]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C smallint row to python row"""
return [
None if ele == FieldType.C_SMALLINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[: abs(num_of_rows)]
]
def _crow_smallint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C smallint row to python row"""
return [
None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ushort))[: abs(num_of_rows)]
]
def _crow_int_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C int row to python row"""
return [
None if ele == FieldType.C_INT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[: abs(num_of_rows)]
]
def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C int row to python row"""
return [
None if ele == FieldType.C_INT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint))[: abs(num_of_rows)]
]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bigint row to python row"""
return [
None if ele == FieldType.C_BIGINT_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
]
def _crow_bigint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C bigint row to python row"""
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint64))[: abs(num_of_rows)]
]
def _crow_float_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C float row to python row"""
return [
None if math.isnan(ele) else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[: abs(num_of_rows)]
]
def _crow_double_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C double row to python row"""
return [
None if math.isnan(ele) else ele
for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[: abs(num_of_rows)]
]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C binary row to python row"""
assert nbytes is not None
return [
None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode("utf-8")
for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[: abs(num_of_rows)]
]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C nchar row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
if num_of_rows >= 0:
tmpstr = ctypes.c_char_p(data)
res.append(tmpstr.value.decode())
else:
res.append(
(
ctypes.cast(
data + nbytes * i,
ctypes.POINTER(ctypes.c_wchar * (nbytes // 4)),
)
)[0].value
)
except ValueError:
res.append(None)
return res
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C binary row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop()
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
res.append(tmpstr.value.decode()[0:rbyte])
except ValueError:
res.append(None)
return res
def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
"""Function to convert C nchar row to python row"""
assert nbytes is not None
res = []
for i in range(abs(num_of_rows)):
try:
tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
res.append(tmpstr.value.decode())
except ValueError:
res.append(None)
return res
CONVERT_FUNC = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT: _crow_tinyint_to_python,
FieldType.C_SMALLINT: _crow_smallint_to_python,
FieldType.C_INT: _crow_int_to_python,
FieldType.C_BIGINT: _crow_bigint_to_python,
FieldType.C_FLOAT: _crow_float_to_python,
FieldType.C_DOUBLE: _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python,
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
FieldType.C_NCHAR: _crow_nchar_to_python,
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
}
CONVERT_FUNC_BLOCK = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT: _crow_tinyint_to_python,
FieldType.C_SMALLINT: _crow_smallint_to_python,
FieldType.C_INT: _crow_int_to_python,
FieldType.C_BIGINT: _crow_bigint_to_python,
FieldType.C_FLOAT: _crow_float_to_python,
FieldType.C_DOUBLE: _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python_block,
FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
FieldType.C_NCHAR: _crow_nchar_to_python_block,
FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
}
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
_fields_ = [
("_name", ctypes.c_char * 65),
("_type", ctypes.c_uint8),
("_bytes", ctypes.c_uint16),
]
@property
def name(self):
return self._name.decode("utf-8")
@property
def length(self):
"""alias to self.bytes"""
return self._bytes
@property
def bytes(self):
return self._bytes
@property
def type(self):
return self._type
def __dict__(self):
return {"name": self.name, "type": self.type, "bytes": self.length}
def __str__(self):
return "{name: %s, type: %d, bytes: %d}" % (self.name, self.type, self.length)
def __getitem__(self, item):
return getattr(self, item)
class TaosFields(object):
def __init__(self, fields, count):
if isinstance(fields, c_void_p):
self._fields = cast(fields, POINTER(TaosField))
if isinstance(fields, POINTER(TaosField)):
self._fields = fields
self._count = count
self._iter = 0
def as_ptr(self):
return self._fields
@property
def count(self):
return self._count
@property
def fields(self):
return self._fields
def __next__(self):
return self._next_field()
def next(self):
return self._next_field()
def _next_field(self):
if self._iter < self.count:
field = self._fields[self._iter]
self._iter += 1
return field
else:
raise StopIteration
def __getitem__(self, item):
return self._fields[item]
def __iter__(self):
return self
def __len__(self):
return self.count
class PrecisionEnum(object):
"""Precision enums"""
Milliseconds = 0
Microseconds = 1
Nanoseconds = 2
class PrecisionError(Exception):
"""Python datetime does not support nanoseconds error"""
pass
此差异已折叠。
此差异已折叠。
from taos.cinterface import *
from taos.error import *
from taos.result import *
class TaosStream(object):
"""TDengine Stream interface"""
def __init__(self, stream):
self._raw = stream
def as_ptr(self):
return self._raw
def close(self):
"""Close stmt."""
if self._raw is not None:
taos_close_stream(self._raw)
self._raw = None
def __del__(self):
self.close()
class TimestampType(object):
"""Choose which type that parsing TDengine timestamp data to
- DATETIME: use python datetime.datetime, note that it does not support nanosecond precision,
and python taos will use raw c_int64 as a fallback for nanosecond results.
- NUMPY: use numpy.datetime64 type.
- RAW: use raw c_int64.
- TAOS: use taos' TaosTimestamp.
"""
DATETIME = 0,
NUMPY = 1,
RAW = 2,
TAOS = 3,
class TaosTimestamp:
pass
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -224,6 +224,7 @@ do { \ ...@@ -224,6 +224,7 @@ do { \
#define TSDB_IPv4ADDR_LEN 16 #define TSDB_IPv4ADDR_LEN 16
#define TSDB_FILENAME_LEN 128 #define TSDB_FILENAME_LEN 128
#define TSDB_SHOW_SQL_LEN 512 #define TSDB_SHOW_SQL_LEN 512
#define TSDB_SHOW_SUBQUERY_LEN 1000
#define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_STEP_NAME_LEN 32 #define TSDB_STEP_NAME_LEN 32
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册