提交 910ad400 编写于 作者: D dapan1121

Merge branch 'feature/cenc' of https://github.com/taosdata/TDengine into feature/cenc

......@@ -10,3 +10,6 @@
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
......@@ -59,6 +59,11 @@ IF (TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
ENDIF ()
ENDIF ()
IF (TD_LINUX_32)
......
......@@ -72,3 +72,8 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
SET(TD_RANDOM_NETWORK_FAIL TRUE)
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()
IF (${JEMALLOC_ENABLED} MATCHES "true")
SET(TD_JEMALLOC_ENABLED TRUE)
MESSAGE(STATUS "build with jemalloc enabled")
ENDIF ()
......@@ -19,3 +19,16 @@ ENDIF ()
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
ENDIF ()
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
......@@ -107,9 +107,9 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
![replica-forward.png](page://images/architecture/replica-forward.png)
1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增)
1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增)
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeer,如vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
3. 应用调用API syncForwardToPeer,如vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
......@@ -219,7 +219,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
不同之处:
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request, 如果超过半数回答Yes, 这个candidate就成为Leader,开始一个新的term. 而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组类传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会给节点组内其他节点进行广播的。Raft里不存在这样的机制,因此需要投票来解决。
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。
- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。
如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。
......
......@@ -343,7 +343,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。
给定days与keep两个参数,一个vnode总的数据文件数为:keep/days。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数maxRows(每块最大记录条数)决定,缺省值为4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。
......
......@@ -516,7 +516,7 @@ conn.close()
- _TDengineCursor_ 类
参考python中help(taos.TDengineCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。
- _connect_ 方法
......
......@@ -37,7 +37,7 @@ taos> DESCRIBE meters;
- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
......@@ -400,6 +400,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入NULL。
从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写:
```mysql
INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
......@@ -423,9 +424,9 @@ Query OK, 1 row(s) in set (0.001029s)
taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s)
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2);
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
DB error: invalid SQL: keyword VALUES or FILE required
DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
taos> SHOW TABLES;
table_name | created_time | columns | stable_name |
......
......@@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir}
rm -rf ${pkg_dir}
fi
mkdir -p ${pkg_dir}
cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
# create install dir
# create install dir
install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
......@@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
......@@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
......@@ -67,7 +67,41 @@ fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
install_user_local_path="/usr/local"
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
......@@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
......@@ -90,7 +124,7 @@ fi
if [ "$verType" == "beta" ]; then
debname=${debname}-${verType}".deb"
elif [ "$verType" == "stable" ]; then
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
......@@ -101,7 +135,7 @@ fi
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
cp ${pkg_dir}/*.deb ${output_dir}
# clean tmep dir
rm -rf ${pkg_dir}
......
......@@ -6,7 +6,7 @@ set -e
#set -x
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
# -l [full | lite]
......@@ -22,11 +22,12 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
allocator=glibc # [glibc | jemalloc]
dbName=taos # [taos | power]
verNumber=""
verNumberComp="2.0.0.0"
while getopts "hv:V:c:o:l:s:d:n:m:" arg
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
do
case $arg in
v)
......@@ -53,6 +54,10 @@ do
#echo "dbName=$OPTARG"
dbName=$(echo $OPTARG)
;;
a)
#echo "allocator=$OPTARG"
allocator=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
......@@ -71,20 +76,21 @@ do
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
echo " -d [taos | power] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
;;
?) #unknow option
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd)
......@@ -118,7 +124,7 @@ function vercomp () {
echo 0
exit 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
......@@ -164,7 +170,7 @@ if [[ "$verMode" == "cluster" ]]; then
else
gitinfoOfInternal=NULL
fi
cd ${curr_dir}
# 2. cmake executable file
......@@ -180,12 +186,18 @@ else
fi
cd ${compile_dir}
if [[ "$allocator" == "jemalloc" ]]; then
allocator_macro="-DJEMALLOC_ENABLED=true"
else
allocator_macro=""
fi
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} ${allocator_macro}
else
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"
......@@ -199,9 +211,9 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0'
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
......@@ -214,9 +226,9 @@ if [ "$osType" != "Darwin" ]; then
echo "==========dpkg command not exist, so not release deb package!!!"
fi
ret='0'
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
......@@ -229,11 +241,11 @@ if [ "$osType" != "Darwin" ]; then
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
fi
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
if [[ "$dbName" == "taos" ]]; then
if [[ "$dbName" == "taos" ]]; then
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
......
#!/bin/bash
#
# Generate rpm package for centos
# Generate rpm package for centos
set -e
# set -x
......@@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di
# copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo} cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
......@@ -74,7 +74,7 @@ fi
if [ "$verType" == "beta" ]; then
rpmname=${rpmname}-${verType}".rpm"
elif [ "$verType" == "stable" ]; then
elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm"
else
echo "unknow verType, nor stabel or beta"
......
%define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define __strip /bin/true
......@@ -12,22 +13,22 @@ URL: www.taosdata.com
AutoReqProv: no
#BuildRoot: %_topdir/BUILDROOT
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
#Prefix: /usr/local/taos
#BuildRequires:
#Requires:
#BuildRequires:
#Requires:
%description
Big Data Platform Designed and Optimized for IoT
#"prep" Nothing needs to be done
#"prep" Nothing needs to be done
#%prep
#%setup -q
#%setup -T
#%setup -T
#"build" Nothing needs to be done
#"build" Nothing needs to be done
#%build
#%configure
#make %{?_smp_mflags}
......@@ -75,9 +76,53 @@ fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation
%pre
csudo=""
......@@ -103,7 +148,7 @@ fi
# if taos.cfg already softlink, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then
${csudo} rm -f %{homepath}/cfg/taos.cfg || :
fi
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo} rm -f %{homepath}/driver/libtaos* || :
......@@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then
fi
cd %{homepath}/script
${csudo} ./post.sh
# Scripts executed before uninstall
%preun
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
# only remove package to call preun.sh, not but update(2)
# only remove package to call preun.sh, not but update(2)
if [ $1 -eq 0 ];then
#cd %{homepath}/script
#${csudo} ./preun.sh
if [ -f %{homepath}/script/preun.sh ]; then
cd %{homepath}/script
${csudo} ./preun.sh
......@@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
......@@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${log_link_dir} || :
${csudo} rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
fi
fi
fi
fi
# Scripts executed after uninstall
%postun
# clean build dir
%clean
csudo=""
......
此差异已折叠。
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
# -----------------------Variables definition
source_dir=$1
binary_dir=$2
osType=$3
......@@ -71,9 +71,9 @@ if [ "$osType" != "Darwin" ]; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
......@@ -123,9 +123,9 @@ function kill_taosd() {
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg
${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples
......@@ -176,6 +176,49 @@ function install_bin() {
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
fi
}
function install_jemalloc() {
if [ "$osType" != "Darwin" ]; then
/usr/bin/install -c -d /usr/local/bin
if [ -f ${binary_dir}/build/bin/jemalloc-config ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jemalloc.sh ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jeprof ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
fi
if [ -f ${binary_dir}/build/include/jemalloc/jemalloc.h ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${binary_dir}/build/lib/libjemalloc.so.2 ]; then
/usr/bin/install -c -d /usr/local/lib
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
/usr/bin/install -c -d /usr/local/lib
if [ -f ${binary_dir}/build/lib/libjemalloc.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${binary_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${binary_dir}/build/share/man/man3/jemalloc.3 ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function install_lib() {
# Remove links
......@@ -183,12 +226,12 @@ function install_lib() {
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
fi
if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
......@@ -198,7 +241,9 @@ function install_lib() {
${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
install_jemalloc
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
fi
......@@ -206,26 +251,26 @@ function install_lib() {
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
fi
${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
}
function install_log() {
function install_log() {
${csudo} rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
......@@ -239,7 +284,7 @@ function install_log() {
function install_data() {
${csudo} mkdir -p ${data_dir}
${csudo} ln -s ${data_dir} ${install_main_dir}/data
${csudo} ln -s ${data_dir} ${install_main_dir}/data
}
function install_connector() {
......@@ -254,8 +299,8 @@ function install_connector() {
echo "WARNING: go connector not found, please check if want to use it!"
fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
}
function install_examples() {
......@@ -264,8 +309,8 @@ function install_examples() {
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then
${csudo} service taosd stop || :
fi
......@@ -277,9 +322,9 @@ function clean_service_on_sysvinit() {
elif ((${initd_mod}==3)); then
${csudo} update-rc.d -f taosd remove || :
fi
${csudo} rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
......@@ -298,10 +343,10 @@ function install_service_on_sysvinit() {
${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
if ((${initd_mod}==1)); then
${csudo} chkconfig --add taosd || :
${csudo} chkconfig --level 2345 taosd on || :
......@@ -323,7 +368,7 @@ function clean_service_on_systemd() {
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
}
}
# taos:2345:respawn:/etc/init.d/taosd start
......@@ -383,7 +428,7 @@ function update_TDengine() {
sleep 1
fi
fi
install_main_path
install_log
......@@ -431,16 +476,16 @@ function install_TDengine() {
# Start to install
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN}Start to install TDEngine...${NC}"
else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
fi
install_main_path
install_main_path
if [ "$osType" != "Darwin" ]; then
if [ "$osType" != "Darwin" ]; then
install_data
fi
install_log
install_log
install_header
install_lib
install_connector
......@@ -452,7 +497,7 @@ function install_TDengine() {
install_service
fi
install_config
install_config
if [ "$osType" != "Darwin" ]; then
# Ask if to start the service
......
......@@ -30,12 +30,12 @@ else
install_dir="${release_dir}/TDengine-server-${version}"
fi
# Directories and files.
# Directories and files
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
fi
......@@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
......@@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target
fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
......@@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
......@@ -168,7 +201,7 @@ fi
# exit 1
cd ${release_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
......@@ -185,8 +218,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
......
......@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSCLOCALMERGE_H
#define TDENGINE_TSCLOCALMERGE_H
#ifndef TDENGINE_TSCGLOBALMERGE_H
#define TDENGINE_TSCGLOBALMERGE_H
#ifdef __cplusplus
extern "C" {
......@@ -24,7 +24,7 @@ extern "C" {
#include "qFill.h"
#include "taosmsg.h"
#include "tlosertree.h"
#include "tsclient.h"
#include "qExecutor.h"
#define MAX_NUM_OF_SUBQUERY_RETRY 3
......@@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
typedef struct SLocalMerger {
typedef struct SGlobalMerger {
SLocalDataSource **pLocalDataSrc;
int32_t numOfBuffer;
int32_t numOfCompleted;
......@@ -48,20 +48,22 @@ typedef struct SLocalMerger {
tOrderDescriptor *pDesc;
tExtMemBuffer **pExtMemBuffer; // disk-based buffer
char *buf; // temp buffer
} SLocalMerger;
} SGlobalMerger;
struct SSqlObj;
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor;
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSql;
struct SSqlObj *pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times
} SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
int32_t numOfRows, int32_t orderType);
......@@ -71,13 +73,13 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/*
* create local reducer to launch the second-stage reduce process at client site
*/
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo *pQueryInfo, SLocalMerger **pMerger, int64_t id);
int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo *pQueryInfo, SGlobalMerger **pMerger, int64_t id);
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger);
void tscDestroyGlobalMerger(SGlobalMerger* pMerger);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TSCLOCALMERGE_H
#endif // TDENGINE_TSCGLOBALMERGE_H
......@@ -20,13 +20,13 @@
extern "C" {
#endif
#include "tsched.h"
#include "exception.h"
#include "os.h"
#include "qExtbuffer.h"
#include "taosdef.h"
#include "tbuffer.h"
#include "tscLocalMerge.h"
#include "tscGlobalmerge.h"
#include "tsched.h"
#include "tsclient.h"
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
......@@ -63,7 +63,7 @@ typedef struct SJoinSupporter {
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SGroupbyExpr groupInfo; // group by info
SGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
......@@ -111,7 +111,7 @@ void* tscDestroyUdfArrayList(SArray* pUdfList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
......@@ -124,6 +124,8 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
......@@ -286,6 +288,7 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql);
void tscInitResForMerge(SSqlRes* pRes);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
......@@ -330,12 +333,15 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);
char* strdup_throw(const char* str);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
#ifdef __cplusplus
}
#endif
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TSCHEMAUTIL_H
#define TDENGINE_TSCHEMAUTIL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "taosmsg.h"
#include "tsclient.h"
#include "ttoken.h"
/**
* get the number of tags of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
/**
* get the number of columns of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
/**
* get the basic info of this table
* @param pTableMeta
* @return
*/
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
/**
* get the schema
* @param pTableMeta
* @return
*/
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
/**
* get the tag schema
* @param pMeta
* @return
*/
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
/**
* get the column schema according to the column index
* @param pMeta
* @param colIndex
* @return
*/
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
/**
* get the column schema according to the column id
* @param pTableMeta
* @param colId
* @return
*/
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* create the table meta from the msg
* @param pTableMetaMsg
* @param size size of the table meta
* @return
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TSCHEMAUTIL_H
......@@ -40,17 +40,9 @@ extern "C" {
// forward declaration
struct SSqlInfo;
struct SLocalMerger;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows);
typedef struct STableComInfo {
uint8_t numOfTags;
uint8_t precision;
int16_t numOfColumns;
int32_t rowSize;
} STableComInfo;
typedef struct SNewVgroupInfo {
int32_t vgId;
int8_t inUse;
......@@ -66,34 +58,6 @@ typedef struct CChildTableMeta {
uint64_t suid; // super table id
} CChildTableMeta;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
/*
* 1. keep the vgroup index during the multi-vnode super table projection query
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
......@@ -111,44 +75,6 @@ typedef struct SInternalField {
SExprInfo *pExpr;
} SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
char * cond;
} SCond;
typedef struct SJoinNode {
uint64_t uid;
int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode;
typedef struct SJoinInfo {
bool hasJoin;
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
} SJoinInfo;
typedef struct STagCond {
// relation between tbname list and query condition, including : TK_AND or TK_OR
int16_t relType;
// tbname query condition, only support tbname query condition on one table
SCond tbnameCond;
// join condition, only support two tables join currently
SJoinInfo joinInfo;
// for different table, the query condition must be seperated
SArray *pCond;
} STagCond;
typedef struct SParamInfo {
int32_t idx;
uint8_t type;
......@@ -191,59 +117,6 @@ typedef struct STableDataBlocks {
SParamInfo *params;
} STableDataBlocks;
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
STimeWindow window; // the whole query time window
SInterval interval; // tumble time window
SSessionWindow sessionWindow; // session time window
SGroupbyExpr groupbyExpr; // groupby tags info
SArray * colList; // SArray<SColumn*>
SFieldInfo fieldsInfo;
SArray * exprList; // SArray<SExprInfo*>
SArray * exprList1; // final exprlist in case of arithmetic expression exists
SLimitVal limit;
SLimitVal slimit;
STagCond tagCond;
SOrderVal order;
int16_t fillType; // final result fill type
int16_t numOfTables;
STableMetaInfo **pTableMetaInfo;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
SQInfo* pQInfo; // global merge operator
SQueryAttr* pQueryAttr; // query object
struct SQueryInfo *sibling; // sibling
SArray *pUpstream; // SArray<struct SQueryInfo>
struct SQueryInfo *pDownstream;
int32_t havingFieldNum;
bool stableQuery;
bool groupbyColumn;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
bool hasFilter;
bool onlyTagQuery;
bool globalMerge; // need global merge
SArray *pUdfInfo; // user defined function information SArray<SUdfInfo>
} SQueryInfo;
typedef struct {
STableMeta *pTableMeta;
SVgroupsInfo *pVgroupInfo;
......@@ -257,9 +130,13 @@ typedef struct SInsertStatementParam {
int8_t schemaAttached; // denote if submit block is built with table schema or not
STagData tagData; // NOTE: pTagData->data is used as a variant length array
int32_t batchSize; // for parameter ('?') binding and batch processing
int32_t numOfParams;
char msg[512]; // error message
char *sql; // current sql statement position
uint32_t insertType; // insert data from [file|sql statement| bound statement]
uint64_t objectId; // sql object id
char *sql; // current sql statement position
} SInsertStatementParam;
// TODO extract sql parser supporter
......@@ -268,15 +145,10 @@ typedef struct {
uint8_t msgType;
SInsertStatementParam insertParam;
char reserve1[3]; // fix bus error on arm32
int32_t count; // todo remove it
bool subCmd;
union {
int32_t count;
};
char * curSql; // current sql, resume position of sql after parsing paused
char reserve2[3]; // fix bus error on arm32
int16_t numOfCols;
char reserve3[2]; // fix bus error on arm32
uint32_t allocSize;
......@@ -287,8 +159,6 @@ typedef struct {
SQueryInfo *pQueryInfo;
SQueryInfo *active; // current active query info
int32_t batchSize; // for parameter ('?') binding and batch processing
int32_t numOfParams;
STagData tagData; // NOTE: pTagData->data is used as a variant length array
int32_t resColumnId;
} SSqlCmd;
......@@ -324,7 +194,7 @@ typedef struct {
TAOS_FIELD* final;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalMerger *pLocalMerger;
struct SGlobalMerger *pMerger;
} SSqlRes;
typedef struct {
......@@ -451,7 +321,7 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput);
void destroyTableNameList(SSqlCmd* pCmd);
void destroyTableNameList(SInsertStatementParam* pInsertParam);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
......@@ -483,7 +353,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
struct SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id);
bool tscIsUpdateQuery(SSqlObj* pSql);
char* tscGetSqlStr(SSqlObj* pSql);
bool tscIsQueryWithLimit(SSqlObj* pSql);
......@@ -493,7 +363,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);
......
......@@ -22,7 +22,7 @@
#include "tscSubquery.h"
#include "tscUtil.h"
#include "tsched.h"
#include "tschemautil.h"
#include "qTableMeta.h"
#include "tsclient.h"
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
......@@ -58,7 +58,6 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pCmd->curSql = pSql->sqlstr;
pCmd->resColumnId = TSDB_RES_COL_ID;
int32_t code = tsParseSql(pSql, true);
......@@ -221,7 +220,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
tscResetForNextRetrieve(pRes);
// handle the sub queries of join query
// handle outer query based on the already retrieved nest query results.
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
SSchedMsg schedMsg = {0};
......
......@@ -13,15 +13,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
#include "texpr.h"
#include "tlosertree.h"
#include "tscGlobalmerge.h"
#include "tscSubquery.h"
#include "tscLog.h"
#include "tsclient.h"
#include "qUtil.h"
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
tOrderDescriptor * pDesc;
......@@ -29,9 +33,18 @@ typedef struct SCompareParam {
int32_t groupOrderType;
} SCompareParam;
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndex, int32_t index, char **buf);
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
}
// if ret == 0, means the result belongs to the same group
return (ret == 0);
}
int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
static int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
int32_t pLeftIdx = *(int32_t *)pLeft;
int32_t pRightIdx = *(int32_t *)pRight;
......@@ -57,16 +70,16 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo* pQueryInfo, SLocalMerger **pMerger, int64_t id) {
int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SQueryInfo* pQueryInfo, SGlobalMerger **pMerger, int64_t id) {
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscError("0x%"PRIx64" %p pMemBuffer is NULL", id, pMemBuffer);
return TSDB_CODE_TSC_APP_ERROR;
}
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscError("0x%"PRIx64" no local buffer or intermediate result format model", id);
return TSDB_CODE_TSC_APP_ERROR;
}
......@@ -83,7 +96,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
}
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
tscDebug("0x%"PRIx64" no data to retrieve", id);
return TSDB_CODE_SUCCESS;
}
......@@ -92,15 +105,15 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", id, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_TSC_APP_ERROR;
}
*pMerger = (SLocalMerger *) calloc(1, sizeof(SLocalMerger));
*pMerger = (SGlobalMerger *) calloc(1, sizeof(SGlobalMerger));
if ((*pMerger) == NULL) {
tscError("0x%"PRIx64" failed to create local merge structure, out of memory", id);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -160,7 +173,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
// no data actually, no need to merge result.
if (idx == 0) {
tscDebug("0x%"PRIx64" retrieved no data", id);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, numOfBuffer);
tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
return TSDB_CODE_SUCCESS;
}
......@@ -198,7 +211,7 @@ int32_t tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tO
}
// restore the limitation value at the last stage
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->orderProjectQuery) {
pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
pQueryInfo->limit.offset = pQueryInfo->prjOffset;
}
......@@ -297,28 +310,28 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0;
}
void tscDestroyLocalMerger(SLocalMerger* pLocalMerger) {
if (pLocalMerger == NULL) {
void tscDestroyGlobalMerger(SGlobalMerger* pMerger) {
if (pMerger == NULL) {
return;
}
for (int32_t i = 0; i < pLocalMerger->numOfBuffer; ++i) {
tfree(pLocalMerger->pLocalDataSrc[i]);
for (int32_t i = 0; i < pMerger->numOfBuffer; ++i) {
tfree(pMerger->pLocalDataSrc[i]);
}
pLocalMerger->numOfBuffer = 0;
tscLocalReducerEnvDestroy(pLocalMerger->pExtMemBuffer, pLocalMerger->pDesc, pLocalMerger->numOfVnode);
pMerger->numOfBuffer = 0;
tscDestroyGlobalMergerEnv(pMerger->pExtMemBuffer, pMerger->pDesc, pMerger->numOfVnode);
pLocalMerger->numOfCompleted = 0;
pMerger->numOfCompleted = 0;
if (pLocalMerger->pLoserTree) {
tfree(pLocalMerger->pLoserTree->param);
tfree(pLocalMerger->pLoserTree);
if (pMerger->pLoserTree) {
tfree(pMerger->pLoserTree->param);
tfree(pMerger->pLoserTree);
}
tfree(pLocalMerger->buf);
tfree(pLocalMerger->pLocalDataSrc);
free(pLocalMerger);
tfree(pMerger->buf);
tfree(pMerger->pLocalDataSrc);
free(pMerger);
}
static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* pQueryInfo, SColumnModel *pModel) {
......@@ -329,7 +342,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
}
// primary timestamp column is involved in final result
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || pQueryInfo->orderProjectQuery) {
numOfGroupByCols++;
}
......@@ -392,7 +405,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
}
}
int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) {
SSchema *pSchema = NULL;
SColumnModel *pModel = NULL;
......@@ -456,7 +469,7 @@ int32_t tscLocalReducerEnvCreate(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu
* @param pDesc
* @param numOfVnodes
*/
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
tOrderDescDestroy(pDesc);
for (int32_t i = 0; i < numOfVnodes; ++i) {
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
......@@ -467,12 +480,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/**
*
* @param pLocalMerge
* @param pMerger
* @param pOneInterDataSrc
* @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled
*/
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
int32_t loadNewDataFromDiskFor(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1;
......@@ -489,17 +502,17 @@ int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOne
#endif
*needAdjustLoserTree = true;
} else {
pLocalMerge->numOfCompleted += 1;
pMerger->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true;
}
return pLocalMerge->numOfBuffer;
return pMerger->numOfBuffer;
}
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
void adjustLoserTreeFromNewData(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) {
/*
* load a new data page into memory for intermediate dataset source,
......@@ -507,7 +520,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
*/
bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
loadNewDataFromDiskFor(pMerger, pOneInterDataSrc, &needToAdjust);
}
/*
......@@ -515,7 +528,7 @@ void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOn
* if the loser tree is rebuild completed, we do not need to adjust
*/
if (needToAdjust) {
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
int32_t leafNodeIdx = pTree->pNode[0].index + pMerger->numOfBuffer;
#ifdef _DEBUG_VIEW
printf("before adjust:\t");
......@@ -567,7 +580,7 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
}
}
static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
SMultiwayMergeInfo* pInfo = pOperator->info;
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
......@@ -579,7 +592,7 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
if (pInfo->hasPrev) {
if (needToMergeRv(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
}
......@@ -694,45 +707,27 @@ static void doExecuteFinalMergeRv(SOperatorInfo* pOperator, int32_t numOfExpr, S
tfree(add);
}
bool needToMergeRv(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
}
// if ret == 0, means the result belongs to the same group
return (ret == 0);
static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
return (pMerger->numOfBuffer == pMerger->numOfCompleted);
}
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
}
void tscInitResObjForLocalQuery(SSqlObj *pSql, int32_t numOfRes, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
if (pRes->pLocalMerger != NULL) {
tscDestroyLocalMerger(pRes->pLocalMerger);
pRes->pLocalMerger = NULL;
tscDebug("0x%"PRIx64" free local reducer finished", pSql->self);
SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id) {
SGlobalMerger *pMerger = calloc(1, sizeof(SGlobalMerger));
if (pMerger == NULL) {
tscDebug("0x%"PRIx64" free local reducer finished", id);
return NULL;
}
pRes->qId = 1; // hack to pass the safety check in fetch_row function
pRes->numOfRows = 0;
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/*
* One more byte space is required, since the sprintf function needs one additional space to put '\0' at
* the end of string
*/
size_t size = numOfRes * rowLen + 1;
pRes->pLocalMerger->buf = calloc(1, size);
pRes->data = pRes->pLocalMerger->buf;
pMerger->buf = calloc(1, size);
return pMerger;
}
// todo remove it
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
int32_t maxRowSize = MAX(rowSize, finalRowSize);
char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
......@@ -776,9 +771,6 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
return offset;
}
#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
(data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex,
int32_t maxRows) {
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
......@@ -800,7 +792,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
SMultiwayMergeInfo *pInfo = pOperator->info;
SLocalMerger *pMerger = pInfo->pMerge;
SGlobalMerger *pMerger = pInfo->pMerge;
SLoserTreeInfo *pTree = pMerger->pLoserTree;
pInfo->binfo.pRes->info.rows = 0;
......@@ -884,7 +876,7 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL;
}
static bool isSameGroupRv(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
static bool isSameGroup(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList);
for (int32_t i = 0; i < numOfCols; ++i) {
SColIndex *pIndex = taosArrayGet(orderColumnList, i);
......@@ -937,7 +929,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
}
}
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0,
&pAggInfo->hasGroupColData);
......@@ -958,7 +950,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
}
if (pAggInfo->hasGroupColData) {
bool sameGroup = isSameGroupRv(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
if (!sameGroup) {
*newgroup = true;
pAggInfo->hasDataBlockForNewGroup = true;
......@@ -972,7 +964,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor);
doExecuteFinalMergeRv(pOperator, pOperator->numOfOutput, pBlock);
doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pBlock);
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData);
handleData = true;
}
......
......@@ -20,7 +20,7 @@
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "taos.h"
#include "tscSubquery.h"
......@@ -71,7 +71,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
numOfRows = numOfRows + tscGetNumOfTags(pMeta);
}
tscInitResObjForLocalQuery(pSql, totalNumOfRows, rowLen);
pSql->res.pMerger = tscInitResObjForLocalQuery(totalNumOfRows, rowLen, pSql->self);
tscInitResForMerge(&pSql->res);
SSchema *pSchema = tscGetTableSchema(pMeta);
for (int32_t i = 0; i < numOfRows; ++i) {
......@@ -433,7 +435,8 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
if (strlen(ddl) == 0) {
}
tscInitResObjForLocalQuery(pSql, numOfRows, rowLen);
pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self);
tscInitResForMerge(&pSql->res);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows;
......@@ -882,7 +885,8 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
pSql->res.pMerger = tscInitResObjForLocalQuery(1, (int32_t)valueLength, pSql->self);
tscInitResForMerge(&pSql->res);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
......
此差异已折叠。
......@@ -48,6 +48,7 @@ typedef struct SMultiTbStmt {
bool nameSet;
bool tagSet;
uint64_t currentUid;
char *sqlstr;
uint32_t tbNum;
SStrToken tbname;
SStrToken stbname;
......@@ -1085,7 +1086,7 @@ static int insertStmtExecute(STscStmt* stmt) {
fillTablesColumnsNull(stmt->pSql);
int code = tscMergeTableDataBlocks(stmt->pSql, false);
int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
......@@ -1185,7 +1186,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
fillTablesColumnsNull(pStmt->pSql);
if ((code = tscMergeTableDataBlocks(pStmt->pSql, false)) != TSDB_CODE_SUCCESS) {
if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
return code;
}
......@@ -1203,7 +1204,6 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
return pStmt->pSql->res.code;
}
int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
SSqlCmd *pCmd = &pSql->cmd;
int32_t ret = TSDB_CODE_SUCCESS;
......@@ -1213,7 +1213,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
}
int32_t index = 0;
SStrToken sToken = tStrGetToken(pCmd->curSql, &index, false);
SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
......@@ -1232,29 +1232,29 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.tagSet = true;
sToken = tStrGetToken(pCmd->curSql, &index, false);
if (sToken.n > 0 && sToken.type == TK_VALUES) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
return TSDB_CODE_SUCCESS;
}
if (sToken.n <= 0 || sToken.type != TK_USING) {
return TSDB_CODE_TSC_INVALID_OPERATION;
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z);
}
sToken = tStrGetToken(pCmd->curSql, &index, false);
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
return TSDB_CODE_TSC_INVALID_OPERATION;
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z);
}
pStmt->mtb.stbname = sToken;
sToken = tStrGetToken(pCmd->curSql, &index, false);
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
sToken = tStrGetToken(pCmd->curSql, &index, false);
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_LP) {
return TSDB_CODE_TSC_INVALID_OPERATION;
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
......@@ -1262,7 +1262,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
int32_t loopCont = 1;
while (loopCont) {
sToken = tStrGetToken(pCmd->curSql, &index, false);
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
......@@ -1285,20 +1285,18 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
sToken = tStrGetToken(pCmd->curSql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_VALUES) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
pStmt->mtb.values = sToken;
}
return TSDB_CODE_SUCCESS;
}
int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
size_t size = 1048576;
......@@ -1373,14 +1371,17 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
break;
}
free(pSql->sqlstr);
if (pStmt->mtb.sqlstr == NULL) {
pStmt->mtb.sqlstr = pSql->sqlstr;
} else {
tfree(pSql->sqlstr);
}
pSql->sqlstr = str;
return TSDB_CODE_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
// interface functions
......@@ -1468,7 +1469,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
pSql->cmd.numOfParams = 0;
pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0;
registerSqlObj(pSql);
......@@ -1561,11 +1562,10 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
}
pStmt->mtb.nameSet = true;
pStmt->mtb.tagSet = true;
tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pSql->cmd.numOfParams = 0;
pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0;
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
......@@ -1634,6 +1634,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
taosArrayDestroy(pStmt->mtb.tags);
tfree(pStmt->mtb.sqlstr);
}
}
......@@ -1851,8 +1852,8 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
if (pStmt->isInsert) {
SSqlObj* pSql = pStmt->pSql;
SSqlCmd *pCmd = &pSql->cmd;
*nums = pCmd->numOfParams;
SSqlCmd *pCmd = &pSql->cmd;
*nums = pCmd->insertParam.numOfParams;
return TSDB_CODE_SUCCESS;
} else {
SNormalStmt* normal = &pStmt->normal;
......
......@@ -28,7 +28,7 @@
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "tstrbuild.h"
#include "ttoken.h"
......@@ -91,6 +91,7 @@ static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCm
static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding);
static int32_t validateStateWindowNode(SSqlCmd* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem);
......@@ -196,7 +197,7 @@ static bool validateDebugFlag(int32_t v) {
* is not needed in the final error message.
*/
static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL);
return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
}
static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
......@@ -993,6 +994,59 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
// The following part is used to check for the invalid query expression.
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
}
static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
const char* msg1 = "invalid column name";
const char* msg3 = "not support state_window with group by ";
const char* msg4 = "function not support for super table query";
SStrToken *col = &(pSqlNode->windowstateVal.col) ;
if (col->z == NULL || col->n <= 0) {
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pQueryInfo->groupbyExpr.numOfGroupCols = 1;
//TODO(dengyihao): check tag column
if (isStable) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
if (pGroupExpr->columnInfo == NULL) {
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
}
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
pQueryInfo->stateWindow = true;
return TSDB_CODE_SUCCESS;
}
int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) {
const char* msg1 = "gap should be fixed time window";
......@@ -1027,11 +1081,17 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pQueryInfo->sessionWindow.gap == 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
......@@ -3167,6 +3227,9 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return true;
}
}
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
}
return false;
......@@ -6472,7 +6535,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "functions/columns not allowed in group by query";
const char* msg2 = "projection query on columns not allowed";
const char* msg3 = "group by not allowed on projection query";
const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up";
......@@ -6488,6 +6551,9 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
}
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
// check if all the tags prj columns belongs to the group by columns
......@@ -6931,7 +6997,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (!findColumnIndex) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z);
return tscInvalidOperationMsg(pCmd->payload, "invalid tag name", sToken->z);
}
}
} else {
......@@ -7074,6 +7140,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
......@@ -7747,6 +7814,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg1 = "point interpolation query needs timestamp";
const char* msg2 = "too many tables in from clause";
const char* msg3 = "start(end) time of query range required or time range too large";
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg9 = "only tag query not compatible with normal column filter";
int32_t code = TSDB_CODE_SUCCESS;
......@@ -7788,6 +7856,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// validate the query filter condition info
if (pSqlNode->pWhere != NULL) {
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
......@@ -7799,6 +7868,30 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
}
}
// validate the interval info
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
if (isTimeWindowQuery(pQueryInfo)) {
// check if the first column of the nest query result is timestamp column
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
// set order by info
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else {
pQueryInfo->command = TSDB_SQL_SELECT;
......@@ -7855,7 +7948,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// parse the window_state
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// set order by info
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) !=
TSDB_CODE_SUCCESS) {
......@@ -7896,6 +7992,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
* transfer sql functions that need secondary merge into another format
* in dealing with super table queries such as: count/first/last
*/
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (isSTable) {
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
......@@ -7903,10 +8003,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
}
if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
// no result due to invalid query time range
if (pQueryInfo->window.skey > pQueryInfo->window.ekey) {
pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
......@@ -7950,11 +8046,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pCmd, pQueryInfo, 0);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
SExprInfo** p = NULL;
int32_t numOfExpr = 0;
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
if (pQueryInfo->exprList1 == NULL) {
pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
}
......
......@@ -15,15 +15,15 @@
#include "os.h"
#include "tcmdtype.h"
#include "tlockfree.h"
#include "trpc.h"
#include "tscLocalMerge.h"
#include "tscGlobalmerge.h"
#include "tscLog.h"
#include "tscProfile.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "ttimer.h"
#include "tlockfree.h"
#include "qPlan.h"
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
......@@ -857,6 +857,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->simpleAgg = query.simpleAgg;
pQueryMsg->pointInterpQuery = query.pointInterpQuery;
pQueryMsg->needReverseScan = query.needReverseScan;
pQueryMsg->stateWindow = query.stateWindow;
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->sqlstrLen = htonl(sqlLen);
......@@ -1656,7 +1657,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
return code;
}
if (pRes->pLocalMerger == NULL) { // no result from subquery, so abort here directly.
if (pRes->pMerger == NULL) { // no result from subquery, so abort here directly.
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
return code;
}
......@@ -1673,15 +1674,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
taosArrayPush(group, &tableKeyInfo);
taosArrayPush(tableGroupInfo.pGroupList, &group);
// todo remove it
SExprInfo* list = calloc(tscNumOfExprs(pQueryInfo), sizeof(SExprInfo));
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExprInfo = tscExprGet(pQueryInfo, i);
list[i] = *pExprInfo;
}
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, list, &tableGroupInfo, NULL, NULL, pRes->pLocalMerger, MERGE_STAGE);
tfree(list);
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE);
}
uint64_t localQueryId = 0;
......@@ -2534,8 +2527,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
// tag data exists
if (autocreate && pSql->cmd.tagData.dataLen != 0) {
pMsg = serializeTagData(&pSql->cmd.tagData, pMsg);
if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) {
pMsg = serializeTagData(&pSql->cmd.insertParam.tagData, pMsg);
}
pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
......
......@@ -627,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
char *z = NULL;
if (len > 0) {
z = strstr(pCmd->payload, "invalid SQL");
z = strstr(pCmd->payload, "invalid operation");
if (z == NULL) {
z = strstr(pCmd->payload, "syntax error");
}
......
......@@ -13,7 +13,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <tschemautil.h>
#include "os.h"
#include "taosmsg.h"
#include "tscLog.h"
......
......@@ -21,7 +21,7 @@
#include "tcompare.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tschemautil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "qUdf.h"
#include "qUtil.h"
......@@ -2450,7 +2450,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
assert(pState->numOfSub > 0);
int32_t ret = tscLocalReducerEnvCreate(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
if (ret != 0) {
pRes->code = ret;
tscAsyncResultOnError(pSql);
......@@ -2463,7 +2463,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
if (pSql->pSubs == NULL) {
tfree(pSql->pSubs);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc,pState->numOfSub);
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
tscAsyncResultOnError(pSql);
return ret;
......@@ -2530,13 +2530,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub);
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i);
return pRes->code; // free all allocated resource
}
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pState->numOfSub);
tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i);
return pRes->code;
}
......@@ -2701,7 +2701,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
tstrerror(pParentSql->res.code));
// release allocated resource
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor,
pState->numOfSub);
tscFreeRetrieveSup(pSql);
......@@ -2776,7 +2776,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
tscClearInterpInfo(pPQueryInfo);
code = tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pLocalMerger, pParentSql->self);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) {
......@@ -3525,9 +3525,8 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData;
}
// todo remove pExprs
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo,
SOperatorInfo* pSourceOperator, char* sql, void* merger, int32_t stage) {
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
char* sql, void* merger, int32_t stage) {
assert(pQueryInfo != NULL);
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
if (pQInfo == NULL) {
......
此差异已折叠。
......@@ -16,13 +16,13 @@
*/
package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import com.taosdata.jdbc.utils.TaosInfo;
/**
* JNI connector
*/
......@@ -276,23 +276,14 @@ public class TSDBJNIConnector {
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException {
Long stmt = 0L;
try {
stmt = prepareStmtImp(sql.getBytes(), this.taos);
} catch (Exception e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
}
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
Long stmt = prepareStmtImp(sql.getBytes(), this.taos);
if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL);
} else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
}
if (stmt == TSDBConstants.JNI_SQL_NULL) {
} else if (stmt == TSDBConstants.JNI_SQL_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
}
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
} else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
}
......
......@@ -10,8 +10,15 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync)
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd taos_static)
......
......@@ -476,6 +476,7 @@ typedef struct {
bool simpleAgg;
bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan
bool stateWindow; // state window flag
STimeWindow window;
int32_t numOfTables;
......
......@@ -141,73 +141,74 @@
#define TK_VARIABLE 122
#define TK_INTERVAL 123
#define TK_SESSION 124
#define TK_FILL 125
#define TK_SLIDING 126
#define TK_ORDER 127
#define TK_BY 128
#define TK_ASC 129
#define TK_DESC 130
#define TK_GROUP 131
#define TK_HAVING 132
#define TK_LIMIT 133
#define TK_OFFSET 134
#define TK_SLIMIT 135
#define TK_SOFFSET 136
#define TK_WHERE 137
#define TK_NOW 138
#define TK_RESET 139
#define TK_QUERY 140
#define TK_SYNCDB 141
#define TK_ADD 142
#define TK_COLUMN 143
#define TK_TAG 144
#define TK_CHANGE 145
#define TK_SET 146
#define TK_KILL 147
#define TK_CONNECTION 148
#define TK_STREAM 149
#define TK_COLON 150
#define TK_ABORT 151
#define TK_AFTER 152
#define TK_ATTACH 153
#define TK_BEFORE 154
#define TK_BEGIN 155
#define TK_CASCADE 156
#define TK_CLUSTER 157
#define TK_CONFLICT 158
#define TK_COPY 159
#define TK_DEFERRED 160
#define TK_DELIMITERS 161
#define TK_DETACH 162
#define TK_EACH 163
#define TK_END 164
#define TK_EXPLAIN 165
#define TK_FAIL 166
#define TK_FOR 167
#define TK_IGNORE 168
#define TK_IMMEDIATE 169
#define TK_INITIALLY 170
#define TK_INSTEAD 171
#define TK_MATCH 172
#define TK_KEY 173
#define TK_OF 174
#define TK_RAISE 175
#define TK_REPLACE 176
#define TK_RESTRICT 177
#define TK_ROW 178
#define TK_STATEMENT 179
#define TK_TRIGGER 180
#define TK_VIEW 181
#define TK_SEMI 182
#define TK_NONE 183
#define TK_PREV 184
#define TK_LINEAR 185
#define TK_IMPORT 186
#define TK_TBNAME 187
#define TK_JOIN 188
#define TK_INSERT 189
#define TK_INTO 190
#define TK_VALUES 191
#define TK_STATE_WINDOW 125
#define TK_FILL 126
#define TK_SLIDING 127
#define TK_ORDER 128
#define TK_BY 129
#define TK_ASC 130
#define TK_DESC 131
#define TK_GROUP 132
#define TK_HAVING 133
#define TK_LIMIT 134
#define TK_OFFSET 135
#define TK_SLIMIT 136
#define TK_SOFFSET 137
#define TK_WHERE 138
#define TK_NOW 139
#define TK_RESET 140
#define TK_QUERY 141
#define TK_SYNCDB 142
#define TK_ADD 143
#define TK_COLUMN 144
#define TK_TAG 145
#define TK_CHANGE 146
#define TK_SET 147
#define TK_KILL 148
#define TK_CONNECTION 149
#define TK_STREAM 150
#define TK_COLON 151
#define TK_ABORT 152
#define TK_AFTER 153
#define TK_ATTACH 154
#define TK_BEFORE 155
#define TK_BEGIN 156
#define TK_CASCADE 157
#define TK_CLUSTER 158
#define TK_CONFLICT 159
#define TK_COPY 160
#define TK_DEFERRED 161
#define TK_DELIMITERS 162
#define TK_DETACH 163
#define TK_EACH 164
#define TK_END 165
#define TK_EXPLAIN 166
#define TK_FAIL 167
#define TK_FOR 168
#define TK_IGNORE 169
#define TK_IMMEDIATE 170
#define TK_INITIALLY 171
#define TK_INSTEAD 172
#define TK_MATCH 173
#define TK_KEY 174
#define TK_OF 175
#define TK_RAISE 176
#define TK_REPLACE 177
#define TK_RESTRICT 178
#define TK_ROW 179
#define TK_STATEMENT 180
#define TK_TRIGGER 181
#define TK_VIEW 182
#define TK_SEMI 183
#define TK_NONE 184
#define TK_PREV 185
#define TK_LINEAR 186
#define TK_IMPORT 187
#define TK_TBNAME 188
#define TK_JOIN 189
#define TK_INSERT 190
#define TK_INTO 191
#define TK_VALUES 192
#define TK_SPACE 300
#define TK_COMMENT 301
......
......@@ -11,10 +11,17 @@ IF (TD_LINUX)
LIST(REMOVE_ITEM SRC ./src/shellDarwin.c)
ADD_EXECUTABLE(shell ${SRC})
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(shell taos_static lua)
TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC})
ELSE ()
TARGET_LINK_LIBRARIES(shell taos lua)
TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC})
ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
......
......@@ -55,14 +55,21 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC})
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson)
TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC})
ENDIF ()
ELSEIF (TD_WINDOWS)
AUX_SOURCE_DIRECTORY(. SRC)
......@@ -71,7 +78,7 @@ ELSEIF (TD_WINDOWS)
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua})
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF ()
ELSEIF (TD_DARWIN)
# missing a few dependencies, such as <argp.h>
......
......@@ -1063,6 +1063,13 @@ static SDbCfg mnodeGetAlterDbOption(SDbObj *pDb, SAlterDbMsg *pAlter) {
newCfg.partitions = partitions;
}
// community version can only change daysToKeep
// but enterprise version can change all daysToKeep options
#ifndef _STORAGE
newCfg.daysToKeep1 = newCfg.daysToKeep;
newCfg.daysToKeep2 = newCfg.daysToKeep;
#endif
return newCfg;
}
......
......@@ -22,6 +22,10 @@
extern "C" {
#endif
#ifdef TD_JEMALLOC_ENABLED
#include <jemalloc/jemalloc.h>
#endif
typedef enum {
TAOS_ALLOC_MODE_DEFAULT = 0,
TAOS_ALLOC_MODE_RANDOM_FAIL = 1,
......
......@@ -158,7 +158,7 @@ static bool restProcessUdfRequest(HttpContext* pContext) {
}
char* sql = pContext->parser->body.str;
int len = pContext->parser->body.size;
int len = pContext->parser->body.pos;
if (sql == NULL) {
httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_SQL_INPUT);
return false;
......
......@@ -22,6 +22,7 @@
#include "qFill.h"
#include "qResultbuf.h"
#include "qSqlparser.h"
#include "qTableMeta.h"
#include "qTsbuf.h"
#include "query.h"
#include "taosdef.h"
......@@ -71,14 +72,6 @@ typedef struct SResultRowPool {
SArray* pData; // SArray<void*>
} SResultRowPool;
typedef struct SGroupbyExpr {
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols; // todo remove it
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SGroupbyExpr;
typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
int32_t offset:29; // row index in buffer page
......@@ -197,6 +190,7 @@ typedef struct SQueryAttr {
bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan
bool distinctTag; // distinct tag query
bool stateWindow; // window State on sub/normal table
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
......@@ -217,7 +211,7 @@ typedef struct SQueryAttr {
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
int32_t maxTableColumnWidth;
int32_t tagLen; // tag value length of current query
SGroupbyExpr* pGroupbyExpr;
SGroupbyExpr *pGroupbyExpr;
SExprInfo* pExpr1;
SExprInfo* pExpr2;
......@@ -306,6 +300,7 @@ enum OPERATOR_TYPE_E {
OP_Filter = 19,
OP_Distinct = 20,
OP_Join = 21,
OP_StateWindow = 22,
};
typedef struct SOperatorInfo {
......@@ -471,6 +466,16 @@ typedef struct SSWindowOperatorInfo {
int32_t start; // start row index
} SSWindowOperatorInfo;
typedef struct SStateWindowOperatorInfo {
SOptrBasicInfo binfo;
STimeWindow curWindow; // current time window
int32_t numOfRows; // number of rows
int32_t colIndex; // start row index
int32_t start;
char* prevData; // previous data
} SStateWindowOperatorInfo ;
typedef struct SDistinctOperatorInfo {
SHashObj *pSet;
SSDataBlock *pRes;
......@@ -479,10 +484,10 @@ typedef struct SDistinctOperatorInfo {
int64_t outputCapacity;
} SDistinctOperatorInfo;
struct SLocalMerger;
struct SGlobalMerger;
typedef struct SMultiwayMergeInfo {
struct SLocalMerger *pMerge;
struct SGlobalMerger *pMerge;
SOptrBasicInfo binfo;
int32_t bufCapacity;
int64_t seed;
......@@ -522,6 +527,7 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
int32_t numOfRows, void* merger, bool groupMix);
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
......@@ -565,6 +571,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId);
void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf);
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win);
int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg);
bool isQueryKilled(SQInfo *pQInfo);
......
......@@ -16,6 +16,8 @@
#ifndef TDENGINE_QPLAN_H
#define TDENGINE_QPLAN_H
#include "qExecutor.h"
struct SQueryInfo;
typedef struct SQueryNodeBasicInfo {
......
......@@ -89,6 +89,10 @@ typedef struct SSessionWindowVal {
SStrToken gap;
} SSessionWindowVal;
typedef struct SWindowStateVal {
SStrToken col;
} SWindowStateVal;
struct SRelationInfo;
typedef struct SSqlNode {
......@@ -100,6 +104,7 @@ typedef struct SSqlNode {
SArray *fillType; // fill type[optional], SArray<tVariantListItem>
SIntervalVal interval; // (interval, interval_offset) [optional]
SSessionWindowVal sessionVal; // session window [optional]
SWindowStateVal windowstateVal; // window_state(col) [optional]
SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
......@@ -142,7 +147,7 @@ typedef struct SCreateTableSql {
} colInfo;
SArray *childTableInfo; // SArray<SCreatedTableInfo>
SSqlNode *pSelect;
SSqlNode *pSelect;
} SCreateTableSql;
typedef struct SAlterTableInfo {
......@@ -268,7 +273,6 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
//SRelationInfo *setSubquery(SRelationInfo* pFromInfo, SRelElementPair* p);
void *destroyRelationInfo(SRelationInfo* pFromInfo);
SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias);
......@@ -286,7 +290,7 @@ SArray *tSqlExprListAppend(SArray *pList, tSqlExpr *pNode, SStrToken *pDistinc
void tSqlExprListDestroy(SArray *pList);
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SWindowStateVal *pw,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
......
#ifndef TDENGINE_QTABLEUTIL_H
#define TDENGINE_QTABLEUTIL_H
#include "tsdb.h" //todo tsdb should not be here
#include "qSqlparser.h"
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SCond {
uint64_t uid;
int32_t len; // length of tag query condition data
char * cond;
} SCond;
typedef struct SJoinNode {
uint64_t uid;
int16_t tagColId;
SArray* tsJoin;
SArray* tagJoin;
} SJoinNode;
typedef struct SJoinInfo {
bool hasJoin;
SJoinNode *joinTables[TSDB_MAX_JOIN_TABLE_NUM];
} SJoinInfo;
typedef struct STagCond {
// relation between tbname list and query condition, including : TK_AND or TK_OR
int16_t relType;
// tbname query condition, only support tbname query condition on one table
SCond tbnameCond;
// join condition, only support two tables join currently
SJoinInfo joinInfo;
// for different table, the query condition must be seperated
SArray *pCond;
} STagCond;
typedef struct SGroupbyExpr {
int16_t tableIndex;
SArray* columnInfo; // SArray<SColIndex>, group by columns information
int16_t numOfGroupCols; // todo remove it
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SGroupbyExpr;
typedef struct STableComInfo {
uint8_t numOfTags;
uint8_t precision;
int16_t numOfColumns;
int32_t rowSize;
} STableComInfo;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
/*
* 1. keep the vgroup index during the multi-vnode super table projection query
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
struct SQInfo; // global merge operator
struct SQueryAttr; // query object
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
STimeWindow window; // the whole query time window
SInterval interval; // tumble time window
SSessionWindow sessionWindow; // session time window
SGroupbyExpr groupbyExpr; // groupby tags info
SArray * colList; // SArray<SColumn*>
SFieldInfo fieldsInfo;
SArray * exprList; // SArray<SExprInfo*>
SArray * exprList1; // final exprlist in case of arithmetic expression exists
SLimitVal limit;
SLimitVal slimit;
STagCond tagCond;
SOrderVal order;
int16_t fillType; // final result fill type
int16_t numOfTables;
STableMetaInfo **pTableMetaInfo;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
SArray *pUdfInfo;
struct SQInfo *pQInfo; // global merge operator
struct SQueryAttr *pQueryAttr; // query object
struct SQueryInfo *sibling; // sibling
SArray *pUpstream; // SArray<struct SQueryInfo>
struct SQueryInfo *pDownstream;
int32_t havingFieldNum;
bool stableQuery;
bool groupbyColumn;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
bool hasFilter;
bool onlyTagQuery;
bool orderProjectQuery;
bool stateWindow;
bool globalMerge;
} SQueryInfo;
/**
* get the number of tags of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
/**
* get the number of columns of this table
* @param pTableMeta
* @return
*/
int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
/**
* get the basic info of this table
* @param pTableMeta
* @return
*/
STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
/**
* get the schema
* @param pTableMeta
* @return
*/
SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
/**
* get the tag schema
* @param pMeta
* @return
*/
SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
/**
* get the column schema according to the column index
* @param pMeta
* @param colIndex
* @return
*/
SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
/**
* get the column schema according to the column id
* @param pTableMeta
* @param colId
* @return
*/
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* create the table meta from the msg
* @param pTableMetaMsg
* @param size size of the table meta
* @return
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
#endif // TDENGINE_QTABLEUTIL_H
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -141,6 +141,7 @@ static SKeyword keywordTable[] = {
{"VARIABLE", TK_VARIABLE},
{"INTERVAL", TK_INTERVAL},
{"SESSION", TK_SESSION},
{"STATE_WINDOW", TK_STATE_WINDOW},
{"FILL", TK_FILL},
{"SLIDING", TK_SLIDING},
{"ORDER", TK_ORDER},
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册