Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
0e246a7f
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0e246a7f
编写于
4月 08, 2021
作者:
H
huili
提交者:
GitHub
4月 08, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into test/TD-3295
上级
91a60312
2d9ae699
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
92 addition
and
15 deletion
+92
-15
Jenkinsfile
Jenkinsfile
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+15
-12
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+7
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/query/queryStddevWithGroupby.py
tests/pytest/query/queryStddevWithGroupby.py
+68
-0
未找到文件。
Jenkinsfile
浏览文件 @
0e246a7f
...
@@ -82,9 +82,9 @@ def pre_test(){
...
@@ -82,9 +82,9 @@ def pre_test(){
}
}
}
}
sh
'''
sh
'''
cd ${WK}
cd ${WK}
git pull >/dev/null
git pull >/dev/null
export TZ=Asia/Harbin
export TZ=Asia/Harbin
date
date
git clean -dfx
git clean -dfx
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
0e246a7f
...
@@ -2573,10 +2573,7 @@ static void* createTable(void *sarg)
...
@@ -2573,10 +2573,7 @@ static void* createTable(void *sarg)
int64_t
lastPrintTime
=
taosGetTimestampMs
();
int64_t
lastPrintTime
=
taosGetTimestampMs
();
int
buff_len
;
int
buff_len
;
if
(
superTblInfo
)
buff_len
=
BUFFER_SIZE
/
8
;
buff_len
=
superTblInfo
->
maxSqlLen
;
else
buff_len
=
BUFFER_SIZE
;
char
*
buffer
=
calloc
(
buff_len
,
1
);
char
*
buffer
=
calloc
(
buff_len
,
1
);
if
(
buffer
==
NULL
)
{
if
(
buffer
==
NULL
)
{
...
@@ -2624,7 +2621,7 @@ static void* createTable(void *sarg)
...
@@ -2624,7 +2621,7 @@ static void* createTable(void *sarg)
return
NULL
;
return
NULL
;
}
}
len
+=
snprintf
(
buffer
+
len
,
len
+=
snprintf
(
buffer
+
len
,
superTblInfo
->
maxSqlL
en
-
len
,
buff_l
en
-
len
,
"if not exists %s.%s%d using %s.%s tags %s "
,
"if not exists %s.%s%d using %s.%s tags %s "
,
winfo
->
db_name
,
superTblInfo
->
childTblPrefix
,
winfo
->
db_name
,
superTblInfo
->
childTblPrefix
,
i
,
winfo
->
db_name
,
i
,
winfo
->
db_name
,
...
@@ -2632,7 +2629,7 @@ static void* createTable(void *sarg)
...
@@ -2632,7 +2629,7 @@ static void* createTable(void *sarg)
free
(
tagsValBuf
);
free
(
tagsValBuf
);
batchNum
++
;
batchNum
++
;
if
((
batchNum
<
superTblInfo
->
batchCreateTableNum
)
if
((
batchNum
<
superTblInfo
->
batchCreateTableNum
)
&&
((
superTblInfo
->
maxSqlL
en
-
len
)
&&
((
buff_l
en
-
len
)
>=
(
superTblInfo
->
lenOfTagOfOneRow
+
256
)))
{
>=
(
superTblInfo
->
lenOfTagOfOneRow
+
256
)))
{
continue
;
continue
;
}
}
...
@@ -3479,9 +3476,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3479,9 +3476,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if
(
childTblExists
if
(
childTblExists
&&
childTblExists
->
type
==
cJSON_String
&&
childTblExists
->
type
==
cJSON_String
&&
childTblExists
->
valuestring
!=
NULL
)
{
&&
childTblExists
->
valuestring
!=
NULL
)
{
if
(
0
==
strncasecmp
(
childTblExists
->
valuestring
,
"yes"
,
3
))
{
if
((
0
==
strncasecmp
(
childTblExists
->
valuestring
,
"yes"
,
3
))
&&
(
g_Dbs
.
db
[
i
].
drop
==
false
))
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_ALREADY_EXISTS
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_ALREADY_EXISTS
;
}
else
if
(
0
==
strncasecmp
(
childTblExists
->
valuestring
,
"no"
,
2
))
{
}
else
if
((
0
==
strncasecmp
(
childTblExists
->
valuestring
,
"no"
,
2
)
||
(
g_Dbs
.
db
[
i
].
drop
==
true
)))
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_NO_EXISTS
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_NO_EXISTS
;
}
else
{
}
else
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_NO_EXISTS
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
=
TBL_NO_EXISTS
;
...
@@ -3527,18 +3526,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3527,18 +3526,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
}
cJSON
*
childTbl_limit
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_limit"
);
cJSON
*
childTbl_limit
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_limit"
);
if
(
childTbl_limit
)
{
if
((
childTbl_limit
)
&&
(
g_Dbs
.
db
[
i
].
drop
!=
true
)
&&
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
==
TBL_ALREADY_EXISTS
))
{
if
(
childTbl_limit
->
type
!=
cJSON_Number
)
{
if
(
childTbl_limit
->
type
!=
cJSON_Number
)
{
printf
(
"ERROR: failed to read json, childtable_limit
\n
"
);
printf
(
"ERROR: failed to read json, childtable_limit
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
=
childTbl_limit
->
valueint
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
=
childTbl_limit
->
valueint
;
}
else
{
}
else
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
=
-
1
;
// select ... limit -1 means all query result
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
=
-
1
;
// select ... limit -1 means all query result
, drop = yes mean all table need recreate, limit value is invalid.
}
}
cJSON
*
childTbl_offset
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_offset"
);
cJSON
*
childTbl_offset
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_offset"
);
if
(
childTbl_offset
)
{
if
((
childTbl_offset
)
&&
(
g_Dbs
.
db
[
i
].
drop
!=
true
)
&&
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
==
TBL_ALREADY_EXISTS
))
{
if
(
childTbl_offset
->
type
!=
cJSON_Number
||
0
>
childTbl_offset
->
valueint
)
{
if
(
childTbl_offset
->
type
!=
cJSON_Number
||
0
>
childTbl_offset
->
valueint
)
{
printf
(
"ERROR: failed to read json, childtable_offset
\n
"
);
printf
(
"ERROR: failed to read json, childtable_offset
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -5170,7 +5171,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5170,7 +5171,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if
((
superTblInfo
->
childTblExists
==
TBL_ALREADY_EXISTS
)
if
((
superTblInfo
->
childTblExists
==
TBL_ALREADY_EXISTS
)
&&
(
superTblInfo
->
childTblOffset
>=
0
))
{
&&
(
superTblInfo
->
childTblOffset
>=
0
))
{
if
(
superTblInfo
->
childTblLimit
<
0
)
{
if
((
superTblInfo
->
childTblLimit
<
0
)
||
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
>
(
superTblInfo
->
childTblCount
)))
{
superTblInfo
->
childTblLimit
=
superTblInfo
->
childTblLimit
=
superTblInfo
->
childTblCount
-
superTblInfo
->
childTblOffset
;
superTblInfo
->
childTblCount
-
superTblInfo
->
childTblOffset
;
}
}
...
...
src/mnode/src/mnodeSdb.c
浏览文件 @
0e246a7f
...
@@ -315,6 +315,10 @@ void sdbUpdateAsync() {
...
@@ -315,6 +315,10 @@ void sdbUpdateAsync() {
taosTmrReset
(
sdbUpdateSyncTmrFp
,
200
,
NULL
,
tsMnodeTmr
,
&
tsSdbTmr
);
taosTmrReset
(
sdbUpdateSyncTmrFp
,
200
,
NULL
,
tsMnodeTmr
,
&
tsSdbTmr
);
}
}
static
int
node_cmp
(
const
void
*
l
,
const
void
*
r
)
{
return
((
SNodeInfo
*
)
l
)
->
nodeId
-
((
SNodeInfo
*
)
r
)
->
nodeId
;
}
int32_t
sdbUpdateSync
(
void
*
pMnodes
)
{
int32_t
sdbUpdateSync
(
void
*
pMnodes
)
{
SMInfos
*
pMinfos
=
pMnodes
;
SMInfos
*
pMinfos
=
pMnodes
;
if
(
!
mnodeIsRunning
())
{
if
(
!
mnodeIsRunning
())
{
...
@@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
...
@@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) {
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
qsort
(
syncCfg
.
nodeInfo
,
syncCfg
.
replica
,
sizeof
(
syncCfg
.
nodeInfo
[
0
]),
node_cmp
);
sdbInfo
(
"vgId:1, work as mnode, replica:%d"
,
syncCfg
.
replica
);
sdbInfo
(
"vgId:1, work as mnode, replica:%d"
,
syncCfg
.
replica
);
for
(
int32_t
i
=
0
;
i
<
syncCfg
.
replica
;
++
i
)
{
for
(
int32_t
i
=
0
;
i
<
syncCfg
.
replica
;
++
i
)
{
sdbInfo
(
"vgId:1, mnode:%d, %s:%d"
,
syncCfg
.
nodeInfo
[
i
].
nodeId
,
syncCfg
.
nodeInfo
[
i
].
nodeFqdn
,
sdbInfo
(
"vgId:1, mnode:%d, %s:%d"
,
syncCfg
.
nodeInfo
[
i
].
nodeId
,
syncCfg
.
nodeInfo
[
i
].
nodeFqdn
,
...
...
tests/pytest/fulltest.sh
浏览文件 @
0e246a7f
...
@@ -218,7 +218,7 @@ python3 ./test.py -f query/query1970YearsAf.py
...
@@ -218,7 +218,7 @@ python3 ./test.py -f query/query1970YearsAf.py
python3 ./test.py
-f
query/bug3351.py
python3 ./test.py
-f
query/bug3351.py
python3 ./test.py
-f
query/bug3375.py
python3 ./test.py
-f
query/bug3375.py
python3 ./test.py
-f
query/queryJoin10tables.py
python3 ./test.py
-f
query/queryJoin10tables.py
python3 ./test.py
-f
query/queryStddevWithGroupby.py
#stream
#stream
python3 ./test.py
-f
stream/metric_1.py
python3 ./test.py
-f
stream/metric_1.py
...
...
tests/pytest/query/queryStddevWithGroupby.py
0 → 100644
浏览文件 @
0e246a7f
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
querysqls
(
self
):
tdSql
.
query
(
"select stddev(c1) from t10 group by c1"
)
tdSql
.
checkRows
(
6
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
checkData
(
1
,
0
,
0
)
tdSql
.
checkData
(
2
,
0
,
0
)
tdSql
.
checkData
(
3
,
0
,
0
)
tdSql
.
checkData
(
4
,
0
,
0
)
tdSql
.
checkData
(
5
,
0
,
0
)
tdSql
.
query
(
"select stddev(c2) from t10"
)
tdSql
.
checkData
(
0
,
0
,
0.5
)
def
run
(
self
):
tdSql
.
execute
(
"drop database if exists db"
)
tdSql
.
execute
(
"create database if not exists db keep 36500"
)
tdSql
.
execute
(
"use db"
)
tdLog
.
printNoPrefix
(
"==========step1:create table && insert data"
)
tdSql
.
execute
(
"create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)"
)
tdSql
.
execute
(
"create table t10 using stb1 tags(1)"
)
tdSql
.
execute
(
"insert into t10 values ('1969-12-31 00:00:00.000', 2,1)"
)
tdSql
.
execute
(
"insert into t10 values ('1970-01-01 00:00:00.000', 3,1)"
)
tdSql
.
execute
(
"insert into t10 values (0, 4,1)"
)
tdSql
.
execute
(
"insert into t10 values (now-18725d, 1,2)"
)
tdSql
.
execute
(
"insert into t10 values ('2021-04-06 00:00:00.000', 5,2)"
)
tdSql
.
execute
(
"insert into t10 values (now+1d,6,2)"
)
tdLog
.
printNoPrefix
(
"==========step2:query and check"
)
self
.
querysqls
()
tdLog
.
printNoPrefix
(
"==========step3:after wal,check again"
)
tdSql
.
query
(
"show dnodes"
)
index
=
tdSql
.
getData
(
0
,
0
)
tdDnodes
.
stop
(
index
)
tdDnodes
.
start
(
index
)
self
.
querysqls
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录