Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
7d5430b3
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7d5430b3
编写于
11月 26, 2021
作者:
wmmhello
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into fix/TD-11074-master
上级
0108f619
8ecbd51c
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
262 addition
and
40 deletion
+262
-40
packaging/release.sh
packaging/release.sh
+6
-1
packaging/tools/makeclient_kh.sh
packaging/tools/makeclient_kh.sh
+1
-0
packaging/tools/makepkg_kh.sh
packaging/tools/makepkg_kh.sh
+1
-1
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+1
-1
src/kit/shell/src/shellEngine.c
src/kit/shell/src/shellEngine.c
+8
-4
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+42
-2
src/plugins/http/src/httpServer.c
src/plugins/http/src/httpServer.c
+10
-4
src/plugins/monitor/src/monMain.c
src/plugins/monitor/src/monMain.c
+4
-0
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+0
-3
tests/pytest/functions/function_count_last_stab.py
tests/pytest/functions/function_count_last_stab.py
+12
-12
tests/pytest/query/nestedQuery/nestedQuery.py
tests/pytest/query/nestedQuery/nestedQuery.py
+118
-6
tests/pytest/query/queryStateWindow.py
tests/pytest/query/queryStateWindow.py
+42
-2
tests/script/general/parser/col_arithmetic_query.sim
tests/script/general/parser/col_arithmetic_query.sim
+4
-4
tests/script/general/parser/nestquery.sim
tests/script/general/parser/nestquery.sim
+13
-0
未找到文件。
packaging/release.sh
浏览文件 @
7d5430b3
...
@@ -385,9 +385,14 @@ if [[ "$dbName" == "kh" ]]; then
...
@@ -385,9 +385,14 @@ if [[ "$dbName" == "kh" ]]; then
# src/kit/shell/src/shellEngine.c
# src/kit/shell/src/shellEngine.c
sed
-i
"s/TDengine shell/KingHistorian shell/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/TDengine shell/KingHistorian shell/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/
\"
taos>
\"
/
\"
kh>
\"
/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/taos connect failed/kh connect failed/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/
\"
taos>
\"
/
\"
khclient>
\"
/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/
\"
->
\"
/
\"
->
\"
/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/
\"
->
\"
/
\"
->
\"
/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/prompt_size = 6/prompt_size = 4/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
sed
-i
"s/prompt_size = 6/prompt_size = 4/g"
${
top_dir
}
/src/kit/shell/src/shellEngine.c
# src/rpc/src/rpcMain.c
sed
-i
"s/taos connections/kh connections/g"
${
top_dir
}
/src/rpc/src/rpcMain.c
# src/plugins/monitor/src/monMain.c
sed
-i
"s/taosd is quiting/khserver is quiting/g"
${
top_dir
}
/src/plugins/monitor/src/monMain.c
fi
fi
...
...
packaging/tools/makeclient_kh.sh
浏览文件 @
7d5430b3
...
@@ -63,6 +63,7 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghis
...
@@ -63,6 +63,7 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghis
sed
-i
'/dataDir/ {s/taos/kinghistorian/g}'
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
'/dataDir/ {s/taos/kinghistorian/g}'
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
'/logDir/ {s/taos/kinghistorian/g}'
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
'/logDir/ {s/taos/kinghistorian/g}'
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/TDengine/KingHistorian/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/TDengine/KingHistorian/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/TAOS/KingHistorian/g"
${
install_dir
}
/cfg/kinghistorian.cfg
mkdir
-p
${
install_dir
}
/bin
mkdir
-p
${
install_dir
}
/bin
if
[
"
$osType
"
!=
"Darwin"
]
;
then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
...
...
packaging/tools/makepkg_kh.sh
浏览文件 @
7d5430b3
...
@@ -74,7 +74,6 @@ if [ "$verMode" == "cluster" ]; then
...
@@ -74,7 +74,6 @@ if [ "$verMode" == "cluster" ]; then
cp
${
nginx_dir
}
/png/taos.png
${
install_dir
}
/nginxd/admin/images/taos.png
cp
${
nginx_dir
}
/png/taos.png
${
install_dir
}
/nginxd/admin/images/taos.png
rm
-rf
${
install_dir
}
/nginxd/png
rm
-rf
${
install_dir
}
/nginxd/png
# replace the OEM name, add by yangzy@2021-09-22
sed
-i
-e
's/www.taosdata.com/www.wellintech.com/g'
$(
grep
-r
'www.taosdata.com'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
sed
-i
-e
's/www.taosdata.com/www.wellintech.com/g'
$(
grep
-r
'www.taosdata.com'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
sed
-i
-e
's/2017/2021/g'
$(
grep
-r
'2017'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
sed
-i
-e
's/2017/2021/g'
$(
grep
-r
'2017'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
sed
-i
-e
's/TAOS Data/Wellintech/g'
$(
grep
-r
'TAOS Data'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
sed
-i
-e
's/TAOS Data/Wellintech/g'
$(
grep
-r
'TAOS Data'
${
install_dir
}
/nginxd |
sed
-r
"s/(.*
\.
html):
\s
*(.*)/
\1
/g"
)
...
@@ -99,6 +98,7 @@ sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg
...
@@ -99,6 +98,7 @@ sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg
sed
-i
"s/support@taosdata.com/support@wellintech.com/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/support@taosdata.com/support@wellintech.com/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/taos client/khclient/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/taos client/khclient/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/taosd/khserver/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/taosd/khserver/g"
${
install_dir
}
/cfg/kinghistorian.cfg
sed
-i
"s/TAOS/KingHistorian/g"
${
install_dir
}
/cfg/kinghistorian.cfg
cd
${
install_dir
}
cd
${
install_dir
}
tar
-zcv
-f
kinghistorian.tar.gz
*
--remove-files
||
:
tar
-zcv
-f
kinghistorian.tar.gz
*
--remove-files
||
:
...
...
src/client/src/tscUtil.c
浏览文件 @
7d5430b3
...
@@ -4606,7 +4606,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
...
@@ -4606,7 +4606,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
}
}
}
}
pse
->
colInfo
.
flag
=
TSDB_COL_NORMAL
;
pse
->
colInfo
.
flag
=
pSource
->
base
.
colInfo
.
flag
;
//
TSDB_COL_NORMAL;
pse
->
resType
=
pSource
->
base
.
resType
;
pse
->
resType
=
pSource
->
base
.
resType
;
pse
->
resBytes
=
pSource
->
base
.
resBytes
;
pse
->
resBytes
=
pSource
->
base
.
resBytes
;
strncpy
(
pse
->
colInfo
.
name
,
pSource
->
base
.
aliasName
,
tListLen
(
pse
->
colInfo
.
name
));
strncpy
(
pse
->
colInfo
.
name
,
pSource
->
base
.
aliasName
,
tListLen
(
pse
->
colInfo
.
name
));
...
...
src/kit/shell/src/shellEngine.c
浏览文件 @
7d5430b3
...
@@ -53,11 +53,11 @@ char CONTINUE_PROMPT[] = " -> ";
...
@@ -53,11 +53,11 @@ char CONTINUE_PROMPT[] = " -> ";
int
prompt_size
=
7
;
int
prompt_size
=
7
;
#elif (_TD_KH_ == true)
#elif (_TD_KH_ == true)
char
CLIENT_VERSION
[]
=
"Welcome to the KingHistorian shell from %s, Client Version:%s
\n
"
char
CLIENT_VERSION
[]
=
"Welcome to the KingHistorian shell from %s, Client Version:%s
\n
"
"Copyright (c) 2021 by
Hana
tech, Inc. All rights reserved.
\n\n
"
;
"Copyright (c) 2021 by
Wellin
tech, Inc. All rights reserved.
\n\n
"
;
char
PROMPT_HEADER
[]
=
"kh> "
;
char
PROMPT_HEADER
[]
=
"kh
client
> "
;
char
CONTINUE_PROMPT
[]
=
" -> "
;
char
CONTINUE_PROMPT
[]
=
"
-> "
;
int
prompt_size
=
4
;
int
prompt_size
=
10
;
#else
#else
char
CLIENT_VERSION
[]
=
"Welcome to the TDengine shell from %s, Client Version:%s
\n
"
char
CLIENT_VERSION
[]
=
"Welcome to the TDengine shell from %s, Client Version:%s
\n
"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
\n\n
"
;
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
\n\n
"
;
...
@@ -118,7 +118,11 @@ TAOS *shellInit(SShellArguments *_args) {
...
@@ -118,7 +118,11 @@ TAOS *shellInit(SShellArguments *_args) {
}
}
if
(
con
==
NULL
)
{
if
(
con
==
NULL
)
{
#ifdef _TD_KH_
printf
(
"kh connect failed, reason: %s.
\n\n
"
,
tstrerror
(
terrno
));
#else
printf
(
"taos connect failed, reason: %s.
\n\n
"
,
tstrerror
(
terrno
));
printf
(
"taos connect failed, reason: %s.
\n\n
"
,
tstrerror
(
terrno
));
#endif
fflush
(
stdout
);
fflush
(
stdout
);
return
con
;
return
con
;
}
}
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
7d5430b3
...
@@ -4956,21 +4956,27 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -4956,21 +4956,27 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"INT"
,
strlen
(
"INT"
)))
{
"INT"
,
strlen
(
"INT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_INT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_INT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
int
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"TINYINT"
,
strlen
(
"TINYINT"
)))
{
"TINYINT"
,
strlen
(
"TINYINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_TINYINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_TINYINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
char
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"SMALLINT"
,
strlen
(
"SMALLINT"
)))
{
"SMALLINT"
,
strlen
(
"SMALLINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_SMALLINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_SMALLINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
int16_t
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"BIGINT"
,
strlen
(
"BIGINT"
)))
{
"BIGINT"
,
strlen
(
"BIGINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BIGINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BIGINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
int64_t
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"FLOAT"
,
strlen
(
"FLOAT"
)))
{
"FLOAT"
,
strlen
(
"FLOAT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_FLOAT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_FLOAT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
float
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"DOUBLE"
,
strlen
(
"DOUBLE"
)))
{
"DOUBLE"
,
strlen
(
"DOUBLE"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_DOUBLE
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_DOUBLE
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
double
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"BINARY"
,
strlen
(
"BINARY"
)))
{
"BINARY"
,
strlen
(
"BINARY"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BINARY
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BINARY
;
...
@@ -4980,21 +4986,27 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -4980,21 +4986,27 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"BOOL"
,
strlen
(
"BOOL"
)))
{
"BOOL"
,
strlen
(
"BOOL"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BOOL
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_BOOL
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
char
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"TIMESTAMP"
,
strlen
(
"TIMESTAMP"
)))
{
"TIMESTAMP"
,
strlen
(
"TIMESTAMP"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_TIMESTAMP
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_TIMESTAMP
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
int64_t
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"UTINYINT"
,
strlen
(
"UTINYINT"
)))
{
"UTINYINT"
,
strlen
(
"UTINYINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UTINYINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UTINYINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
char
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"USMALLINT"
,
strlen
(
"USMALLINT"
)))
{
"USMALLINT"
,
strlen
(
"USMALLINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_USMALLINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_USMALLINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
uint16_t
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"UINT"
,
strlen
(
"UINT"
)))
{
"UINT"
,
strlen
(
"UINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
uint32_t
);
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
}
else
if
(
0
==
strncasecmp
(
superTbls
->
columns
[
c
].
dataType
,
"UBIGINT"
,
strlen
(
"UBIGINT"
)))
{
"UBIGINT"
,
strlen
(
"UBIGINT"
)))
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UBIGINT
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_UBIGINT
;
superTbls
->
columns
[
c
].
dataLen
=
sizeof
(
uint64_t
);
}
else
{
}
else
{
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_NULL
;
superTbls
->
columns
[
c
].
data_type
=
TSDB_DATA_TYPE_NULL
;
}
}
...
@@ -8555,7 +8567,7 @@ static int parseSamplefileToStmtBatch(
...
@@ -8555,7 +8567,7 @@ static int parseSamplefileToStmtBatch(
case
TSDB_DATA_TYPE_BINARY
:
case
TSDB_DATA_TYPE_BINARY
:
case
TSDB_DATA_TYPE_NCHAR
:
case
TSDB_DATA_TYPE_NCHAR
:
tmpP
=
calloc
(
1
,
MAX_SAMPLES
*
tmpP
=
calloc
(
1
,
MAX_SAMPLES
*
(((
stbInfo
)
?
stbInfo
->
columns
[
c
].
dataLen
:
g_args
.
binwidth
)));
(((
stbInfo
)
?
stbInfo
->
columns
[
c
].
dataLen
:
g_args
.
binwidth
)
+
1
));
assert
(
tmpP
);
assert
(
tmpP
);
*
(
uintptr_t
*
)(
sampleBindBatchArray
+
sizeof
(
uintptr_t
*
)
*
c
)
=
(
uintptr_t
)
tmpP
;
*
(
uintptr_t
*
)(
sampleBindBatchArray
+
sizeof
(
uintptr_t
*
)
*
c
)
=
(
uintptr_t
)
tmpP
;
break
;
break
;
...
@@ -12016,7 +12028,35 @@ static void setParaFromArg() {
...
@@ -12016,7 +12028,35 @@ static void setParaFromArg() {
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
type
,
min
(
DATATYPE_BUFF_LEN
,
strlen
(
type
)
+
1
));
type
,
min
(
DATATYPE_BUFF_LEN
,
strlen
(
type
)
+
1
));
}
else
{
}
else
{
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
g_args
.
binwidth
;
switch
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
data_type
){
case
TSDB_DATA_TYPE_BOOL
:
case
TSDB_DATA_TYPE_UTINYINT
:
case
TSDB_DATA_TYPE_TINYINT
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
char
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
case
TSDB_DATA_TYPE_USMALLINT
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
case
TSDB_DATA_TYPE_UINT
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_TIMESTAMP
:
case
TSDB_DATA_TYPE_BIGINT
:
case
TSDB_DATA_TYPE_UBIGINT
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_FLOAT
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
float
);
break
;
case
TSDB_DATA_TYPE_DOUBLE
:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
sizeof
(
double
);
break
;
default:
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
g_args
.
binwidth
;
break
;
}
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
dataType
[
i
],
min
(
DATATYPE_BUFF_LEN
,
strlen
(
dataType
[
i
])
+
1
));
dataType
[
i
],
min
(
DATATYPE_BUFF_LEN
,
strlen
(
dataType
[
i
])
+
1
));
}
}
...
...
src/plugins/http/src/httpServer.c
浏览文件 @
7d5430b3
...
@@ -53,7 +53,7 @@ static void httpStopThread(HttpThread *pThread) {
...
@@ -53,7 +53,7 @@ static void httpStopThread(HttpThread *pThread) {
break
;
break
;
}
}
}
while
(
0
);
}
while
(
0
);
if
(
r
)
{
if
(
r
&&
taosCheckPthreadValid
(
pThread
->
thread
)
)
{
pthread_cancel
(
pThread
->
thread
);
pthread_cancel
(
pThread
->
thread
);
}
}
#else
#else
...
@@ -63,15 +63,21 @@ static void httpStopThread(HttpThread *pThread) {
...
@@ -63,15 +63,21 @@ static void httpStopThread(HttpThread *pThread) {
httpError
(
"%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s"
,
httpError
(
"%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s"
,
pThread
->
label
,
strerror
(
errno
));
pThread
->
label
,
strerror
(
errno
));
pThread
->
stop
=
true
;
pThread
->
stop
=
true
;
pthread_cancel
(
pThread
->
thread
);
if
(
taosCheckPthreadValid
(
pThread
->
thread
))
{
pthread_cancel
(
pThread
->
thread
);
}
}
else
if
(
epoll_ctl
(
pThread
->
pollFd
,
EPOLL_CTL_ADD
,
fd
,
&
event
)
<
0
)
{
}
else
if
(
epoll_ctl
(
pThread
->
pollFd
,
EPOLL_CTL_ADD
,
fd
,
&
event
)
<
0
)
{
httpError
(
"%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s"
,
httpError
(
"%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s"
,
pThread
->
label
,
strerror
(
errno
));
pThread
->
label
,
strerror
(
errno
));
pthread_cancel
(
pThread
->
thread
);
if
(
taosCheckPthreadValid
(
pThread
->
thread
))
{
pthread_cancel
(
pThread
->
thread
);
}
}
}
#endif // __APPLE__
#endif // __APPLE__
pthread_join
(
pThread
->
thread
,
NULL
);
if
(
taosCheckPthreadValid
(
pThread
->
thread
))
{
pthread_join
(
pThread
->
thread
,
NULL
);
}
#ifdef __APPLE__
#ifdef __APPLE__
if
(
sv
[
0
]
!=
-
1
)
{
if
(
sv
[
0
]
!=
-
1
)
{
...
...
src/plugins/monitor/src/monMain.c
浏览文件 @
7d5430b3
...
@@ -123,7 +123,11 @@ static void *monThreadFunc(void *param) {
...
@@ -123,7 +123,11 @@ static void *monThreadFunc(void *param) {
if
(
tsMonitor
.
quiting
)
{
if
(
tsMonitor
.
quiting
)
{
tsMonitor
.
state
=
MON_STATE_NOT_INIT
;
tsMonitor
.
state
=
MON_STATE_NOT_INIT
;
#ifdef _TD_KH_
monInfo
(
"monitor thread will quit, for khserver is quiting"
);
#else
monInfo
(
"monitor thread will quit, for taosd is quiting"
);
monInfo
(
"monitor thread will quit, for taosd is quiting"
);
#endif
break
;
break
;
}
else
{
}
else
{
taosGetDisk
();
taosGetDisk
();
...
...
src/query/src/qExecutor.c
浏览文件 @
7d5430b3
...
@@ -1713,9 +1713,6 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
...
@@ -1713,9 +1713,6 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
int32_t
num
=
0
;
int32_t
num
=
0
;
for
(
int32_t
j
=
0
;
j
<
pSDataBlock
->
info
.
rows
;
++
j
)
{
for
(
int32_t
j
=
0
;
j
<
pSDataBlock
->
info
.
rows
;
++
j
)
{
char
*
val
=
((
char
*
)
pColInfoData
->
pData
)
+
bytes
*
j
;
char
*
val
=
((
char
*
)
pColInfoData
->
pData
)
+
bytes
*
j
;
if
(
isNull
(
val
,
type
))
{
continue
;
}
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if
(
pInfo
->
prevData
==
NULL
)
{
if
(
pInfo
->
prevData
==
NULL
)
{
...
...
tests/pytest/functions/function_count_last_stab.py
浏览文件 @
7d5430b3
...
@@ -42,22 +42,22 @@ class TDTestCase:
...
@@ -42,22 +42,22 @@ class TDTestCase:
%
(
self
.
ts
+
i
,
i
+
1
,
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
%
(
self
.
ts
+
i
,
i
+
1
,
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
tdSql
.
query
(
"select count(*),last(*) from stest group by col1"
)
tdSql
.
query
(
"select count(*),last(*) from stest group by col1"
)
tdSql
.
checkRows
(
1
0
)
tdSql
.
checkRows
(
1
1
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
1
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
2
,
2
,
2
)
tdSql
.
checkData
(
1
,
3
,
1
)
tdSql
.
checkData
(
2
,
3
,
1
)
tdSql
.
query
(
"select count(*),last(*) from stest group by col2"
)
tdSql
.
query
(
"select count(*),last(*) from stest group by col2"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
checkData
(
1
,
0
,
10
)
tdSql
.
checkData
(
0
,
2
,
10
)
tdSql
.
checkData
(
1
,
2
,
10
)
tdSql
.
checkData
(
0
,
3
,
1
)
tdSql
.
checkData
(
1
,
3
,
1
)
tdSql
.
query
(
"select count(*),last(ts,stest.*) from stest group by col1"
)
tdSql
.
query
(
"select count(*),last(ts,stest.*) from stest group by col1"
)
tdSql
.
checkRows
(
1
0
)
tdSql
.
checkRows
(
1
1
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
1
)
tdSql
.
checkData
(
0
,
2
,
"2018-09-17 09:00:00"
)
tdSql
.
checkData
(
1
,
2
,
"2018-09-17 09:00:00"
)
tdSql
.
checkData
(
1
,
4
,
1
)
tdSql
.
checkData
(
2
,
4
,
1
)
...
...
tests/pytest/query/nestedQuery/nestedQuery.py
浏览文件 @
7d5430b3
...
@@ -470,9 +470,9 @@ class TDTestCase:
...
@@ -470,9 +470,9 @@ class TDTestCase:
#inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all
#inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all
interval_sliding
=
[
'interval(4w) sliding(1w) '
,
'interval(1w) sliding(1d) '
,
'interval(1d) sliding(1h) '
,
interval_sliding
=
[
'interval(4w) sliding(1w) '
,
'interval(1w) sliding(1d) '
,
'interval(1d) sliding(1h) '
,
'interval(1h) sliding(1m) '
,
'interval(1m) sliding(1s) '
,
'interval(1s) sliding(10a) '
,
'interval(1h) sliding(1m) '
,
'interval(1m) sliding(1s) '
,
'interval(1s) sliding(10
0
a) '
,
'interval(1y) '
,
'interval(1n) '
,
'interval(1w) '
,
'interval(1d) '
,
'interval(1h) '
,
'interval(1m) '
,
'interval(1s) '
,
'interval(10a)'
,
'interval(1y) '
,
'interval(1n) '
,
'interval(1w) '
,
'interval(1d) '
,
'interval(1h) '
,
'interval(1m) '
,
'interval(1s) '
,
'interval(10
0
a)'
,
'interval(1y,1n) '
,
'interval(1n,1w) '
,
'interval(1w,1d) '
,
'interval(1d,1h) '
,
'interval(1h,1m) '
,
'interval(1m,1s) '
,
'interval(1s,10a) '
,
'interval(100a,30a)'
]
'interval(1y,1n) '
,
'interval(1n,1w) '
,
'interval(1w,1d) '
,
'interval(1d,1h) '
,
'interval(1h,1m) '
,
'interval(1m,1s) '
,
'interval(1s,10
0
a) '
,
'interval(100a,30a)'
]
#1 select * from (select column form regular_table where <\>\in\and\or order by)
#1 select * from (select column form regular_table where <\>\in\and\or order by)
tdSql
.
query
(
"select 1-1 from table_0;"
)
tdSql
.
query
(
"select 1-1 from table_0;"
)
...
@@ -488,11 +488,10 @@ class TDTestCase:
...
@@ -488,11 +488,10 @@ class TDTestCase:
tdLog
.
info
(
len
(
sql
))
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
sql
)
#tdSql.checkData(0,0,'2020-09-13 20:26:40.000')
#tdSql.checkData(0,0,'2020-09-13 20:26:40.000')
tdSql
.
checkRows
(
6
*
self
.
num
)
tdSql
.
checkRows
(
6
*
self
.
num
)
#1 outer union not support
#1 outer union not support
dcDB
=
self
.
dropandcreateDB
(
random
.
randint
(
1
,
3
))
#
dcDB = self.dropandcreateDB(random.randint(1,3))
tdSql
.
query
(
"select 1-2 from table_0;"
)
tdSql
.
query
(
"select 1-2 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
for
i
in
range
(
self
.
fornum
):
sql
=
"select ts , * from ( select "
sql
=
"select ts , * from ( select "
...
@@ -2114,6 +2113,119 @@ class TDTestCase:
...
@@ -2114,6 +2113,119 @@ class TDTestCase:
tdLog
.
info
(
len
(
sql
))
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-1 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from regular_table_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
" where ts >now-10h and ts < now+10h "
sql
+=
"%s "
%
random
.
choice
(
interval_sliding
)
sql
+=
"%s "
%
random
.
choice
(
fill_where
)
sql
+=
"%s ;"
%
random
.
choice
(
limit_where
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-2 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from table_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
" where ts >now-10h and ts < now+10h "
sql
+=
"%s "
%
random
.
choice
(
interval_sliding
)
sql
+=
"%s "
%
random
.
choice
(
fill_where
)
sql
+=
"%s ;"
%
random
.
choice
([
limit_where
[
2
]
,
limit_where
[
3
]]
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-3 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from stable_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
" where ts >now-10h and ts < now+10h "
sql
+=
"%s "
%
random
.
choice
(
interval_sliding
)
sql
+=
"%s "
%
random
.
choice
(
fill_where
)
sql
+=
"%s ;"
%
random
.
choice
(
limit_where
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-4 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from regular_table_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
"group by ts "
sql
+=
"%s ;"
%
random
.
choice
(
limit_where
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-5 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from table_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
"group by ts "
sql
+=
"%s ;"
%
random
.
choice
([
limit_where
[
2
]
,
limit_where
[
3
]]
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-6 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from stable_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s ) "
%
random
.
choice
(
interval_sliding
)
sql
+=
"group by ts "
sql
+=
"%s ;"
%
random
.
choice
(
limit_where
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
tdSql
.
query
(
"select 21-7 from table_0;"
)
for
i
in
range
(
self
.
fornum
):
sql
=
"select avg(res1),min(res2),max(res3) from ( select "
sql
+=
"%s res1, "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res2,"
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
"%s res3 "
%
random
.
choice
(
calc_aggregate_all
)
sql
+=
" from stable_1 t1 "
sql
+=
" where %s "
%
random
.
choice
(
q_where
)
sql
+=
" %s "
%
random
.
choice
(
interval_sliding
)
sql
+=
" %s ) "
%
random
.
choice
(
group_where
)
sql
+=
"group by ts "
sql
+=
"%s ;"
%
random
.
choice
(
limit_where
)
tdLog
.
info
(
sql
)
tdLog
.
info
(
len
(
sql
))
tdSql
.
query
(
sql
)
# error
# error
#1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit ))
#1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit ))
tdSql
.
query
(
"select 1-1 from table_1;"
)
tdSql
.
query
(
"select 1-1 from table_1;"
)
...
...
tests/pytest/query/queryStateWindow.py
浏览文件 @
7d5430b3
...
@@ -42,8 +42,12 @@ class TDTestCase:
...
@@ -42,8 +42,12 @@ class TDTestCase:
tdSql
.
execute
(
tdSql
.
execute
(
"INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)"
)
"INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)"
)
for
i
in
range
(
self
.
rowNum
):
for
i
in
range
(
10
):
tdSql
.
execute
(
"insert into dev_002 (ts,t1) values(%d, %d,)"
%
(
self
.
ts
+
i
,
i
+
1
))
sql
=
"insert into dev_002(ts, t1) values"
batch
=
int
(
self
.
rowNum
/
10
)
for
j
in
range
(
batch
):
sql
+=
"(%d, %d)"
%
(
self
.
ts
+
batch
*
i
+
j
,
batch
*
i
+
j
)
tdSql
.
execute
(
sql
)
tdSql
.
query
(
"select count(ts) from dev_001 state_window(t1)"
)
tdSql
.
query
(
"select count(ts) from dev_001 state_window(t1)"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkRows
(
3
)
...
@@ -101,6 +105,42 @@ class TDTestCase:
...
@@ -101,6 +105,42 @@ class TDTestCase:
tdSql
.
error
(
"select count(*) from dev_001 state_window(t10)"
)
tdSql
.
error
(
"select count(*) from dev_001 state_window(t10)"
)
tdSql
.
error
(
"select count(*) from dev_001 state_window(tag2)"
)
tdSql
.
error
(
"select count(*) from dev_001 state_window(tag2)"
)
# TS-537
tdLog
.
info
(
"case for TS-537"
)
tdSql
.
execute
(
"create stable stb (ts timestamp, c1 int, c2 float) tags(tg1 int)"
)
tdSql
.
execute
(
"CREATE TABLE IF NOT EXISTS db.tb1 USING db.stb TAGS (1)"
)
sql
=
"insert into tb1 values(1635398806734, 1, 1.000000)(1635398810062, 1, 2.000000)(1635398811528, 1, 3.000000)(1635398813301, 1, 4.000000)"
sql
+=
"(1635398818507, 2, 1.000000)(1635398823464, 2, 1.000000)(1635398825150, 2, 1.000000)(1635398826453, 2, 1.000000)(1635399123037, 2, 2.000000)"
sql
+=
"(1635399125335, 2, 2.000000)(1635399126292, 2, 2.000000)(1635399127288, 2, 2.000000)(1635399129361, 2, 2.000000)(1635399133331, 1, 2.000000)"
sql
+=
"(1635399134179, 1, 2.000000)(1635399134909, 1, 2.000000)(1635399135617, 1, 2.000000)(1635399136372, 1, 2.000000)"
tdSql
.
execute
(
sql
)
tdSql
.
query
(
"select * from (select first(ts), count(*), c1 from db.tb1 state_window(c1))"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
1
,
4
)
tdSql
.
checkData
(
0
,
2
,
1
)
tdSql
.
checkData
(
1
,
1
,
9
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
2
,
1
,
5
)
tdSql
.
checkData
(
2
,
2
,
1
)
tdSql
.
query
(
"select fts, cnt, c1 from (select first(ts) fts, count(*) cnt, c1 from db.tb1 state_window(c1))"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
1
,
4
)
tdSql
.
checkData
(
0
,
2
,
1
)
tdSql
.
checkData
(
1
,
1
,
9
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
2
,
1
,
5
)
tdSql
.
checkData
(
2
,
2
,
1
)
tdSql
.
query
(
"select * from (select first(ts) fts, count(*) cnt, c1 from db.tb1 state_window(c1))"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
1
,
4
)
tdSql
.
checkData
(
0
,
2
,
1
)
tdSql
.
checkData
(
1
,
1
,
9
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
2
,
1
,
5
)
tdSql
.
checkData
(
2
,
2
,
1
)
def
stop
(
self
):
def
stop
(
self
):
tdSql
.
close
()
tdSql
.
close
()
...
...
tests/script/general/parser/col_arithmetic_query.sim
浏览文件 @
7d5430b3
...
@@ -553,19 +553,19 @@ endi
...
@@ -553,19 +553,19 @@ endi
sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3;
sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3;
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5;
sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5;
if $rows != 1
0
then
if $rows != 1
1
then
return -1
return -1
endi
endi
if $data00 !=
0.00000
0000 then
if $data00 !=
-0.00216
0000 then
return -1
return -1
endi
endi
if $data10 != 0.
9976
00000 then
if $data10 != 0.
0000
00000 then
return -1
return -1
endi
endi
if $data90 !=
8.9784
00000 then
if $data90 !=
7.9808
00000 then
return -1
return -1
endi
endi
...
...
tests/script/general/parser/nestquery.sim
浏览文件 @
7d5430b3
...
@@ -884,5 +884,18 @@ if $data00 != 24 then
...
@@ -884,5 +884,18 @@ if $data00 != 24 then
return -1
return -1
endi
endi
sql select sum(a)/sum(b) from meters where ts >= '2021-09-30 15:00:00.000' and ts <= '2021-09-30 15:00:05.000' interval(1s) fill(null) group by area order by area;
if $rows != 12 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data02 != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s stop -x SIGINT
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录