Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
85847473
TDengine
项目概览
taosdata
/
TDengine
11 个月 前同步成功
通知
1179
Star
22014
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
85847473
编写于
6月 15, 2022
作者:
H
Hongze Cheng
浏览文件
操作
浏览文件
下载
差异文件
Merge branch '3.0' of
https://github.com/taosdata/TDengine
into feat/tsdb_refact
上级
555469c7
ddaf6164
变更
43
隐藏空白更改
内联
并排
Showing
43 changed file
with
1670 addition
and
1021 deletion
+1670
-1021
include/common/tmsg.h
include/common/tmsg.h
+4
-3
include/util/taoserror.h
include/util/taoserror.h
+10
-6
source/client/src/clientSml.c
source/client/src/clientSml.c
+28
-28
source/common/src/systable.c
source/common/src/systable.c
+6
-6
source/common/src/tmsg.c
source/common/src/tmsg.c
+20
-8
source/dnode/mnode/impl/src/mndDb.c
source/dnode/mnode/impl/src/mndDb.c
+7
-7
source/dnode/vnode/CMakeLists.txt
source/dnode/vnode/CMakeLists.txt
+1
-1
source/dnode/vnode/src/inc/sma.h
source/dnode/vnode/src/inc/sma.h
+12
-22
source/dnode/vnode/src/inc/vnodeInt.h
source/dnode/vnode/src/inc/vnodeInt.h
+3
-2
source/dnode/vnode/src/meta/metaEntry.c
source/dnode/vnode/src/meta/metaEntry.c
+1
-1
source/dnode/vnode/src/sma/sma.c
source/dnode/vnode/src/sma/sma.c
+206
-0
source/dnode/vnode/src/sma/smaEnv.c
source/dnode/vnode/src/sma/smaEnv.c
+22
-153
source/dnode/vnode/src/sma/smaOpen.c
source/dnode/vnode/src/sma/smaOpen.c
+3
-1
source/dnode/vnode/src/sma/smaRollup.c
source/dnode/vnode/src/sma/smaRollup.c
+1
-1
source/dnode/vnode/src/sma/smaTimeRange.c
source/dnode/vnode/src/sma/smaTimeRange.c
+16
-16
source/dnode/vnode/src/sma/smaTimeRange2.c
source/dnode/vnode/src/sma/smaTimeRange2.c
+0
-170
source/dnode/vnode/src/tq/tqSink.c
source/dnode/vnode/src/tq/tqSink.c
+1
-4
source/dnode/vnode/src/vnd/vnodeSvr.c
source/dnode/vnode/src/vnd/vnodeSvr.c
+1
-1
source/dnode/vnode/test/tsdbSmaTest.cpp
source/dnode/vnode/test/tsdbSmaTest.cpp
+1
-1
source/libs/parser/src/parAstCreater.c
source/libs/parser/src/parAstCreater.c
+4
-4
source/libs/parser/test/parInitialCTest.cpp
source/libs/parser/test/parInitialCTest.cpp
+1
-1
source/libs/planner/src/planSpliter.c
source/libs/planner/src/planSpliter.c
+177
-107
source/libs/planner/test/planGroupByTest.cpp
source/libs/planner/test/planGroupByTest.cpp
+2
-0
source/libs/planner/test/planIntervalTest.cpp
source/libs/planner/test/planIntervalTest.cpp
+2
-0
source/libs/planner/test/planJoinTest.cpp
source/libs/planner/test/planJoinTest.cpp
+6
-0
source/libs/planner/test/planOrderByTest.cpp
source/libs/planner/test/planOrderByTest.cpp
+2
-0
source/libs/planner/test/planOtherTest.cpp
source/libs/planner/test/planOtherTest.cpp
+7
-0
source/libs/planner/test/planTestUtil.h
source/libs/planner/test/planTestUtil.h
+4
-0
source/libs/sync/src/syncMain.c
source/libs/sync/src/syncMain.c
+1
-3
source/libs/sync/src/syncRespMgr.c
source/libs/sync/src/syncRespMgr.c
+14
-0
source/libs/sync/src/syncUtil.c
source/libs/sync/src/syncUtil.c
+26
-1
source/libs/transport/src/transCli.c
source/libs/transport/src/transCli.c
+19
-17
source/libs/transport/src/transSvr.c
source/libs/transport/src/transSvr.c
+8
-8
source/libs/transport/test/transUT.cpp
source/libs/transport/test/transUT.cpp
+23
-22
source/util/src/terror.c
source/util/src/terror.c
+4
-1
tests/script/jenkins/basic.txt
tests/script/jenkins/basic.txt
+1
-1
tests/script/tsim/testsuit.sim
tests/script/tsim/testsuit.sim
+1
-1
tests/system-test/1-insert/create_retentions.py
tests/system-test/1-insert/create_retentions.py
+254
-0
tests/system-test/2-query/explain.py
tests/system-test/2-query/explain.py
+357
-0
tests/system-test/2-query/histogram.py
tests/system-test/2-query/histogram.py
+12
-374
tests/system-test/2-query/hyperloglog.py
tests/system-test/2-query/hyperloglog.py
+7
-49
tests/system-test/2-query/leastsquares.py
tests/system-test/2-query/leastsquares.py
+392
-0
tests/system-test/fulltest.sh
tests/system-test/fulltest.sh
+3
-1
未找到文件。
include/common/tmsg.h
浏览文件 @
85847473
...
...
@@ -2427,6 +2427,7 @@ typedef struct {
static
FORCE_INLINE
void
tDestroyTSma
(
STSma
*
pSma
)
{
if
(
pSma
)
{
taosMemoryFreeClear
(
pSma
->
dstTbName
);
taosMemoryFreeClear
(
pSma
->
expr
);
taosMemoryFreeClear
(
pSma
->
tagsFilter
);
}
...
...
@@ -2455,7 +2456,7 @@ int32_t tEncodeSVCreateTSmaReq(SEncoder* pCoder, const SVCreateTSmaReq* pReq);
int32_t
tDecodeSVCreateTSmaReq
(
SDecoder
*
pCoder
,
SVCreateTSmaReq
*
pReq
);
int32_t
tEncodeTSma
(
SEncoder
*
pCoder
,
const
STSma
*
pSma
);
int32_t
tDecodeTSma
(
SDecoder
*
pCoder
,
STSma
*
pSma
);
int32_t
tDecodeTSma
(
SDecoder
*
pCoder
,
STSma
*
pSma
,
bool
deepCopy
);
static
int32_t
tEncodeTSmaWrapper
(
SEncoder
*
pEncoder
,
const
STSmaWrapper
*
pReq
)
{
if
(
tEncodeI32
(
pEncoder
,
pReq
->
number
)
<
0
)
return
-
1
;
...
...
@@ -2465,10 +2466,10 @@ static int32_t tEncodeTSmaWrapper(SEncoder* pEncoder, const STSmaWrapper* pReq)
return
0
;
}
static
int32_t
tDecodeTSmaWrapper
(
SDecoder
*
pDecoder
,
STSmaWrapper
*
pReq
)
{
static
int32_t
tDecodeTSmaWrapper
(
SDecoder
*
pDecoder
,
STSmaWrapper
*
pReq
,
bool
deepCopy
)
{
if
(
tDecodeI32
(
pDecoder
,
&
pReq
->
number
)
<
0
)
return
-
1
;
for
(
int32_t
i
=
0
;
i
<
pReq
->
number
;
++
i
)
{
tDecodeTSma
(
pDecoder
,
pReq
->
tSma
+
i
);
tDecodeTSma
(
pDecoder
,
pReq
->
tSma
+
i
,
deepCopy
);
}
return
0
;
}
...
...
include/util/taoserror.h
浏览文件 @
85847473
...
...
@@ -686,12 +686,15 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
//tsma
#define TSDB_CODE_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x3100)
#define TSDB_CODE_TSMA_NO_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x3101)
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3102)
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3103)
#define TSDB_CODE_TSMA_NO_INDEX_IN_CACHE TAOS_DEF_ERROR_CODE(0, 0x3104)
#define TSDB_CODE_TSMA_RM_SKEY_IN_HASH TAOS_DEF_ERROR_CODE(0, 0x3105)
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
#define TSDB_CODE_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x3101)
#define TSDB_CODE_TSMA_NO_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x3102)
#define TSDB_CODE_TSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3103)
#define TSDB_CODE_TSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3104)
#define TSDB_CODE_TSMA_INVALID_PTR TAOS_DEF_ERROR_CODE(0, 0x3105)
#define TSDB_CODE_TSMA_INVALID_PARA TAOS_DEF_ERROR_CODE(0, 0x3106)
#define TSDB_CODE_TSMA_NO_INDEX_IN_CACHE TAOS_DEF_ERROR_CODE(0, 0x3107)
//rsma
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
...
...
@@ -700,6 +703,7 @@ int32_t* taosGetErrno();
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
#ifdef __cplusplus
}
#endif
...
...
source/client/src/clientSml.c
浏览文件 @
85847473
...
...
@@ -2355,34 +2355,34 @@ static int smlProcess(SSmlHandle *info, char *lines[], int numLines) {
}
static
int32_t
isSchemalessDb
(
STscObj
*
taos
,
SRequestObj
*
request
)
{
SCatalog
*
catalog
=
NULL
;
int32_t
code
=
catalogGetHandle
(((
STscObj
*
)
taos
)
->
pAppInfo
->
clusterId
,
&
catalog
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
uError
(
"SML get catalog error %d"
,
code
);
return
code
;
}
SName
name
;
tNameSetDbName
(
&
name
,
taos
->
acctId
,
taos
->
db
,
strlen
(
taos
->
db
));
char
dbFname
[
TSDB_DB_FNAME_LEN
]
=
{
0
};
tNameGetFullDbName
(
&
name
,
dbFname
);
SDbCfgInfo
pInfo
=
{
0
};
SRequestConnInfo
conn
=
{
0
};
conn
.
pTrans
=
taos
->
pAppInfo
->
pTransporter
;
conn
.
requestId
=
request
->
requestId
;
conn
.
requestObjRefId
=
request
->
self
;
conn
.
mgmtEps
=
getEpSet_s
(
&
taos
->
pAppInfo
->
mgmtEp
);
code
=
catalogGetDBCfg
(
catalog
,
&
conn
,
dbFname
,
&
pInfo
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
taosArrayDestroy
(
pInfo
.
pRetensions
);
if
(
!
pInfo
.
schemaless
)
{
return
TSDB_CODE_SML_INVALID_DB_CONF
;
}
//
SCatalog *catalog = NULL;
//
int32_t code = catalogGetHandle(((STscObj *)taos)->pAppInfo->clusterId, &catalog);
//
if (code != TSDB_CODE_SUCCESS) {
//
uError("SML get catalog error %d", code);
//
return code;
//
}
//
//
SName name;
//
tNameSetDbName(&name, taos->acctId, taos->db, strlen(taos->db));
//
char dbFname[TSDB_DB_FNAME_LEN] = {0};
//
tNameGetFullDbName(&name, dbFname);
//
SDbCfgInfo pInfo = {0};
//
//
SRequestConnInfo conn = {0};
//
conn.pTrans = taos->pAppInfo->pTransporter;
//
conn.requestId = request->requestId;
//
conn.requestObjRefId = request->self;
//
conn.mgmtEps = getEpSet_s(&taos->pAppInfo->mgmtEp);
//
//
code = catalogGetDBCfg(catalog, &conn, dbFname, &pInfo);
//
if (code != TSDB_CODE_SUCCESS) {
//
return code;
//
}
//
taosArrayDestroy(pInfo.pRetensions);
//
//
if (!pInfo.schemaless) {
//
return TSDB_CODE_SML_INVALID_DB_CONF;
//
}
return
TSDB_CODE_SUCCESS
;
}
...
...
source/common/src/systable.c
浏览文件 @
85847473
...
...
@@ -91,8 +91,8 @@ static const SSysDbTableSchema userDBSchema[] = {
{.
name
=
"precision"
,
.
bytes
=
2
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
{.
name
=
"single_stable_model"
,
.
bytes
=
1
,
.
type
=
TSDB_DATA_TYPE_BOOL
},
{.
name
=
"status"
,
.
bytes
=
10
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
// {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.
name
=
"reten
s
ion"
,
.
bytes
=
60
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
// {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.
name
=
"reten
t
ion"
,
.
bytes
=
60
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
};
...
...
@@ -137,7 +137,7 @@ static const SSysDbTableSchema streamSchema[] = {
{.
name
=
"target_table"
,
.
bytes
=
SYSTABLE_SCH_TABLE_NAME_LEN
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
{.
name
=
"watermark"
,
.
bytes
=
8
,
.
type
=
TSDB_DATA_TYPE_BIGINT
},
{.
name
=
"trigger"
,
.
bytes
=
4
,
.
type
=
TSDB_DATA_TYPE_INT
},
};
};
static
const
SSysDbTableSchema
userTblsSchema
[]
=
{
{.
name
=
"table_name"
,
.
bytes
=
SYSTABLE_SCH_TABLE_NAME_LEN
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
...
...
@@ -221,7 +221,9 @@ static const SSysDbTableSchema transSchema[] = {
{.
name
=
"db"
,
.
bytes
=
SYSTABLE_SCH_DB_NAME_LEN
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
{.
name
=
"failed_times"
,
.
bytes
=
4
,
.
type
=
TSDB_DATA_TYPE_INT
},
{.
name
=
"last_exec_time"
,
.
bytes
=
8
,
.
type
=
TSDB_DATA_TYPE_TIMESTAMP
},
{.
name
=
"last_action_info"
,
.
bytes
=
(
TSDB_TRANS_ERROR_LEN
-
1
)
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
{.
name
=
"last_action_info"
,
.
bytes
=
(
TSDB_TRANS_ERROR_LEN
-
1
)
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
};
static
const
SSysDbTableSchema
configSchema
[]
=
{
...
...
@@ -314,8 +316,6 @@ static const SSysDbTableSchema querySchema[] = {
{.
name
=
"sql"
,
.
bytes
=
TSDB_SHOW_SQL_LEN
+
VARSTR_HEADER_SIZE
,
.
type
=
TSDB_DATA_TYPE_VARCHAR
},
};
static
const
SSysTableMeta
perfsMeta
[]
=
{
{
TSDB_PERFS_TABLE_CONNECTIONS
,
connectionsSchema
,
tListLen
(
connectionsSchema
)},
{
TSDB_PERFS_TABLE_QUERIES
,
querySchema
,
tListLen
(
querySchema
)},
...
...
source/common/src/tmsg.c
浏览文件 @
85847473
...
...
@@ -2653,7 +2653,7 @@ int32_t tSerializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
}
int32_t
numOfIndex
=
taosArrayGetSize
(
pRsp
->
pIndexRsp
);
if
(
tEncodeI32
(
&
encoder
,
numOfIndex
)
<
0
)
return
-
1
;
if
(
tEncodeI32
(
&
encoder
,
numOfIndex
)
<
0
)
return
-
1
;
for
(
int32_t
i
=
0
;
i
<
numOfIndex
;
++
i
)
{
STableIndexRsp
*
pIndexRsp
=
taosArrayGet
(
pRsp
->
pIndexRsp
,
i
);
if
(
tEncodeCStr
(
&
encoder
,
pIndexRsp
->
tbName
)
<
0
)
return
-
1
;
...
...
@@ -2738,7 +2738,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
}
taosArrayPush
(
pRsp
->
pIndexRsp
,
&
tableIndexRsp
);
}
tEndDecode
(
&
decoder
);
tDecoderClear
(
&
decoder
);
...
...
@@ -4000,7 +4000,7 @@ int32_t tEncodeTSma(SEncoder *pCoder, const STSma *pSma) {
return
0
;
}
int32_t
tDecodeTSma
(
SDecoder
*
pCoder
,
STSma
*
pSma
)
{
int32_t
tDecodeTSma
(
SDecoder
*
pCoder
,
STSma
*
pSma
,
bool
deepCopy
)
{
if
(
tDecodeI8
(
pCoder
,
&
pSma
->
version
)
<
0
)
return
-
1
;
if
(
tDecodeI8
(
pCoder
,
&
pSma
->
intervalUnit
)
<
0
)
return
-
1
;
if
(
tDecodeI8
(
pCoder
,
&
pSma
->
slidingUnit
)
<
0
)
return
-
1
;
...
...
@@ -4012,17 +4012,30 @@ int32_t tDecodeTSma(SDecoder *pCoder, STSma *pSma) {
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
indexUid
)
<
0
)
return
-
1
;
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
tableUid
)
<
0
)
return
-
1
;
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
dstTbUid
)
<
0
)
return
-
1
;
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
dstTbName
)
<
0
)
return
-
1
;
if
(
deepCopy
)
{
if
(
tDecodeCStrAlloc
(
pCoder
,
&
pSma
->
dstTbName
)
<
0
)
return
-
1
;
}
else
{
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
dstTbName
)
<
0
)
return
-
1
;
}
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
interval
)
<
0
)
return
-
1
;
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
offset
)
<
0
)
return
-
1
;
if
(
tDecodeI64
(
pCoder
,
&
pSma
->
sliding
)
<
0
)
return
-
1
;
if
(
pSma
->
exprLen
>
0
)
{
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
expr
)
<
0
)
return
-
1
;
if
(
deepCopy
)
{
if
(
tDecodeCStrAlloc
(
pCoder
,
&
pSma
->
expr
)
<
0
)
return
-
1
;
}
else
{
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
expr
)
<
0
)
return
-
1
;
}
}
else
{
pSma
->
expr
=
NULL
;
}
if
(
pSma
->
tagsFilterLen
>
0
)
{
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
tagsFilter
)
<
0
)
return
-
1
;
if
(
deepCopy
)
{
if
(
tDecodeCStrAlloc
(
pCoder
,
&
pSma
->
tagsFilter
)
<
0
)
return
-
1
;
}
else
{
if
(
tDecodeCStr
(
pCoder
,
&
pSma
->
tagsFilter
)
<
0
)
return
-
1
;
}
}
else
{
pSma
->
tagsFilter
=
NULL
;
}
...
...
@@ -4045,7 +4058,7 @@ int32_t tEncodeSVCreateTSmaReq(SEncoder *pCoder, const SVCreateTSmaReq *pReq) {
int32_t
tDecodeSVCreateTSmaReq
(
SDecoder
*
pCoder
,
SVCreateTSmaReq
*
pReq
)
{
if
(
tStartDecode
(
pCoder
)
<
0
)
return
-
1
;
tDecodeTSma
(
pCoder
,
pReq
);
tDecodeTSma
(
pCoder
,
pReq
,
false
);
tEndDecode
(
pCoder
);
return
0
;
...
...
@@ -4879,4 +4892,3 @@ int32_t tDecodeSTqOffset(SDecoder *pDecoder, STqOffset *pOffset) {
if
(
tDecodeCStrTo
(
pDecoder
,
pOffset
->
subKey
)
<
0
)
return
-
1
;
return
0
;
}
source/dnode/mnode/impl/src/mndDb.c
浏览文件 @
85847473
...
...
@@ -183,12 +183,12 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) {
pDb
->
cfg
.
pRetensions
=
taosArrayInit
(
pDb
->
cfg
.
numOfRetensions
,
sizeof
(
SRetention
));
if
(
pDb
->
cfg
.
pRetensions
==
NULL
)
goto
_OVER
;
for
(
int32_t
i
=
0
;
i
<
pDb
->
cfg
.
numOfRetensions
;
++
i
)
{
SRetention
reten
s
ion
=
{
0
};
SDB_GET_INT64
(
pRaw
,
dataPos
,
&
reten
s
ion
.
freq
,
_OVER
)
SDB_GET_INT64
(
pRaw
,
dataPos
,
&
reten
s
ion
.
keep
,
_OVER
)
SDB_GET_INT8
(
pRaw
,
dataPos
,
&
reten
s
ion
.
freqUnit
,
_OVER
)
SDB_GET_INT8
(
pRaw
,
dataPos
,
&
reten
s
ion
.
keepUnit
,
_OVER
)
if
(
taosArrayPush
(
pDb
->
cfg
.
pRetensions
,
&
reten
s
ion
)
==
NULL
)
{
SRetention
reten
t
ion
=
{
0
};
SDB_GET_INT64
(
pRaw
,
dataPos
,
&
reten
t
ion
.
freq
,
_OVER
)
SDB_GET_INT64
(
pRaw
,
dataPos
,
&
reten
t
ion
.
keep
,
_OVER
)
SDB_GET_INT8
(
pRaw
,
dataPos
,
&
reten
t
ion
.
freqUnit
,
_OVER
)
SDB_GET_INT8
(
pRaw
,
dataPos
,
&
reten
t
ion
.
keepUnit
,
_OVER
)
if
(
taosArrayPush
(
pDb
->
cfg
.
pRetensions
,
&
reten
t
ion
)
==
NULL
)
{
goto
_OVER
;
}
}
...
...
@@ -1382,7 +1382,7 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
char
*
status
=
"ready"
;
if
(
objStatus
==
SDB_STATUS_CREATING
)
status
=
"creating"
;
if
(
objStatus
==
SDB_STATUS_DROPPING
)
status
=
"dropping"
;
char
statusB
[
24
]
=
{
0
};
char
statusB
[
24
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
statusB
,
status
,
strlen
(
status
));
if
(
sysDb
)
{
...
...
source/dnode/vnode/CMakeLists.txt
浏览文件 @
85847473
...
...
@@ -31,7 +31,7 @@ target_sources(
"src/sma/smaEnv.c"
"src/sma/smaOpen.c"
"src/sma/smaRollup.c"
"src/sma/smaTimeRange
2
.c"
"src/sma/smaTimeRange.c"
# tsdb
"src/tsdb/tsdbCommit.c"
...
...
source/dnode/vnode/src/inc/sma.h
浏览文件 @
85847473
...
...
@@ -38,8 +38,6 @@ typedef struct SSmaStatItem SSmaStatItem;
typedef
struct
SSmaKey
SSmaKey
;
typedef
struct
SRSmaInfo
SRSmaInfo
;
#define SMA_IVLD_FID INT_MIN
struct
SSmaEnv
{
TdThreadRwlock
lock
;
int8_t
type
;
...
...
@@ -49,45 +47,38 @@ struct SSmaEnv {
#define SMA_ENV_LOCK(env) ((env)->lock)
#define SMA_ENV_TYPE(env) ((env)->type)
#define SMA_ENV_STAT(env) ((env)->pStat)
#define SMA_ENV_STAT_ITEM
S(env) ((env)->pStat->smaStatItems
)
#define SMA_ENV_STAT_ITEM
(env) ((env)->pStat->tsmaStatItem
)
struct
SSmaStatItem
{
int8_t
state
;
// ETsdbSmaStat
STSma
*
pTSma
;
// cache schema
int8_t
state
;
// ETsdbSmaStat
STSma
*
pTSma
;
// cache schema
STSchema
*
pTSchema
;
};
struct
SSmaStat
{
union
{
S
HashObj
*
smaStatItems
;
// key: indexUid, value: SSmaStatItem for tsma
SHashObj
*
rsmaInfoHash
;
// key: stbUid, value: SRSmaInfo;
S
SmaStatItem
tsmaStatItem
;
SHashObj
*
rsmaInfoHash
;
// key: stbUid, value: SRSmaInfo;
};
T_REF_DECLARE
()
};
#define SMA_STAT_ITEM
S(s) ((s)->smaStatItems
)
#define SMA_STAT_ITEM
(s) ((s)->tsmaStatItem
)
#define SMA_STAT_INFO_HASH(s) ((s)->rsmaInfoHash)
void
tdDestroySmaEnv
(
SSmaEnv
*
pSmaEnv
);
void
*
tdFreeSmaEnv
(
SSmaEnv
*
pSmaEnv
);
#if 0
int32_t tbGetTSmaStatus(SSma *pSma, STSma *param, void *result);
int32_t tbRemoveTSmaData(SSma *pSma, STSma *param, STimeWindow *pWin);
#endif
int32_t
tdInitSma
(
SSma
*
pSma
);
int32_t
tdDropTSma
(
SSma
*
pSma
,
char
*
pMsg
);
int32_t
tdDropTSmaData
(
SSma
*
pSma
,
int64_t
indexUid
);
int32_t
tdInsertRSmaData
(
SSma
*
pSma
,
char
*
msg
);
int32_t
tdRefSmaStat
(
SSma
*
pSma
,
SSmaStat
*
pStat
);
int32_t
tdUnRefSmaStat
(
SSma
*
pSma
,
SSmaStat
*
pStat
);
int32_t
tdCheckAndInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
bool
onlyCheck
);
int32_t
tdCheckAndInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
);
int32_t
tdLockSma
(
SSma
*
pSma
);
int32_t
tdUnLockSma
(
SSma
*
pSma
);
static
FORCE_INLINE
int16_t
tdTSmaAdd
(
SSma
*
pSma
,
int16_t
n
)
{
return
atomic_add_fetch_16
(
&
SMA_TSMA_NUM
(
pSma
),
n
);
}
static
FORCE_INLINE
int16_t
tdTSmaSub
(
SSma
*
pSma
,
int16_t
n
)
{
return
atomic_sub_fetch_16
(
&
SMA_TSMA_NUM
(
pSma
),
n
);
}
static
FORCE_INLINE
int32_t
tdRLockSmaEnv
(
SSmaEnv
*
pEnv
)
{
int
code
=
taosThreadRwlockRdlock
(
&
(
pEnv
->
lock
));
if
(
code
!=
0
)
{
...
...
@@ -160,11 +151,10 @@ static FORCE_INLINE void tdSmaStatSetDropped(SSmaStatItem *pStatItem) {
}
}
static
int32_t
tdInitSmaStat
(
SSmaStat
**
pSmaStat
,
int8_t
smaType
);
void
*
tdFreeSmaStatItem
(
SSmaStatItem
*
pSmaStatItem
);
static
int32_t
tdDestroySmaState
(
SSmaStat
*
pSmaStat
,
int8_t
smaType
);
static
SSmaEnv
*
tdNewSmaEnv
(
const
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
SDiskID
did
);
static
int32_t
tdInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
SDiskID
did
,
SSmaEnv
**
pEnv
);
static
int32_t
tdInitSmaStat
(
SSmaStat
**
pSmaStat
,
int8_t
smaType
);
void
*
tdFreeSmaStatItem
(
SSmaStatItem
*
pSmaStatItem
);
static
int32_t
tdDestroySmaState
(
SSmaStat
*
pSmaStat
,
int8_t
smaType
);
void
*
tdFreeSmaState
(
SSmaStat
*
pSmaStat
,
int8_t
smaType
);
void
*
tdFreeRSmaInfo
(
SRSmaInfo
*
pInfo
);
...
...
source/dnode/vnode/src/inc/vnodeInt.h
浏览文件 @
85847473
...
...
@@ -147,6 +147,9 @@ int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
int32_t
tqProcessTaskDispatchRsp
(
STQ
*
pTq
,
SRpcMsg
*
pMsg
);
int32_t
tqProcessTaskRecoverRsp
(
STQ
*
pTq
,
SRpcMsg
*
pMsg
);
SSubmitReq
*
tdBlockToSubmit
(
const
SArray
*
pBlocks
,
const
STSchema
*
pSchema
,
bool
createTb
,
int64_t
suid
,
const
char
*
stbFullName
,
int32_t
vgId
);
// sma
int32_t
smaOpen
(
SVnode
*
pVnode
);
int32_t
smaClose
(
SSma
*
pSma
);
...
...
@@ -245,7 +248,6 @@ struct STbUidStore {
};
struct
SSma
{
int16_t
nTSma
;
bool
locked
;
TdThreadMutex
mutex
;
SVnode
*
pVnode
;
...
...
@@ -261,7 +263,6 @@ struct SSma {
#define SMA_META(s) ((s)->pVnode->pMeta)
#define SMA_VID(s) TD_VID((s)->pVnode)
#define SMA_TFS(s) ((s)->pVnode->pTfs)
#define SMA_TSMA_NUM(s) ((s)->nTSma)
#define SMA_TSMA_ENV(s) ((s)->pTSmaEnv)
#define SMA_RSMA_ENV(s) ((s)->pRSmaEnv)
#define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb)
...
...
source/dnode/vnode/src/meta/metaEntry.c
浏览文件 @
85847473
...
...
@@ -75,7 +75,7 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
terrno
=
TSDB_CODE_OUT_OF_MEMORY
;
return
-
1
;
}
if
(
tDecodeTSma
(
pCoder
,
pME
->
smaEntry
.
tsma
)
<
0
)
return
-
1
;
if
(
tDecodeTSma
(
pCoder
,
pME
->
smaEntry
.
tsma
,
true
)
<
0
)
return
-
1
;
}
else
{
ASSERT
(
0
);
}
...
...
source/dnode/vnode/src/sma/sma.c
浏览文件 @
85847473
...
...
@@ -44,3 +44,209 @@ int32_t smaGetTSmaDays(SVnodeCfg* pCfg, void* pCont, uint32_t contLen, int32_t*
smaDebug
(
"vgId:%d, get tsma days %d"
,
pCfg
->
vgId
,
*
days
);
return
code
;
}
#if 0
/**
* @brief TODO: Assume that the final generated result it less than 3M
*
* @param pReq
* @param pDataBlocks
* @param vgId
* @param suid // TODO: check with Liao whether suid response is reasonable
*
* TODO: colId should be set
*/
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid, const char* stbName, bool isCreateCtb) {
int32_t sz = taosArrayGetSize(pDataBlocks);
int32_t bufSize = sizeof(SSubmitReq);
for (int32_t i = 0; i < sz; ++i) {
SDataBlockInfo* pBlkInfo = &((SSDataBlock*)taosArrayGet(pDataBlocks, i))->info;
bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(pBlkInfo->numOfCols));
bufSize += sizeof(SSubmitBlk);
}
*pReq = taosMemoryCalloc(1, bufSize);
if (!(*pReq)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
}
void* pDataBuf = *pReq;
SArray* pTagArray = NULL;
int32_t msgLen = sizeof(SSubmitReq);
int32_t numOfBlks = 0;
int32_t schemaLen = 0;
SRowBuilder rb = {0};
tdSRowInit(&rb, pTSchema->version);
for (int32_t i = 0; i < sz; ++i) {
SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i);
SDataBlockInfo* pDataBlkInfo = &pDataBlock->info;
int32_t colNum = pDataBlkInfo->numOfCols;
int32_t rows = pDataBlkInfo->rows;
int32_t rowSize = pDataBlkInfo->rowSize;
int64_t groupId = pDataBlkInfo->groupId;
if (rb.nCols != colNum) {
tdSRowSetTpInfo(&rb, colNum, pTSchema->flen);
}
if(isCreateCtb) {
SMetaReader mr = {0};
const char* ctbName = buildCtbNameByGroupId(stbName, pDataBlock->info.groupId);
if (metaGetTableEntryByName(&mr, ctbName) != 0) {
smaDebug("vgId:%d, no tsma ctb %s exists", vgId, ctbName);
}
SVCreateTbReq ctbReq = {0};
ctbReq.name = ctbName;
ctbReq.type = TSDB_CHILD_TABLE;
ctbReq.ctb.suid = suid;
STagVal tagVal = {.cid = colNum + PRIMARYKEY_TIMESTAMP_COL_ID,
.type = TSDB_DATA_TYPE_BIGINT,
.i64 = groupId};
STag* pTag = NULL;
if(!pTagArray) {
pTagArray = taosArrayInit(1, sizeof(STagVal));
if (!pTagArray) goto _err;
}
taosArrayClear(pTagArray);
taosArrayPush(pTagArray, &tagVal);
tTagNew(pTagArray, 1, false, &pTag);
if (pTag == NULL) {
tdDestroySVCreateTbReq(&ctbReq);
goto _err;
}
ctbReq.ctb.pTag = (uint8_t*)pTag;
int32_t code;
tEncodeSize(tEncodeSVCreateTbReq, &ctbReq, schemaLen, code);
tdDestroySVCreateTbReq(&ctbReq);
if (code < 0) {
goto _err;
}
}
SSubmitBlk* pSubmitBlk = POINTER_SHIFT(pDataBuf, msgLen);
pSubmitBlk->suid = suid;
pSubmitBlk->uid = groupId;
pSubmitBlk->numOfRows = rows;
msgLen += sizeof(SSubmitBlk);
int32_t dataLen = 0;
for (int32_t j = 0; j < rows; ++j) { // iterate by row
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf
bool isStartKey = false;
int32_t offset = 0;
for (int32_t k = 0; k < colNum; ++k) { // iterate by column
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
STColumn* pCol = &pTSchema->columns[k];
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
switch (pColInfoData->info.type) {
case TSDB_DATA_TYPE_TIMESTAMP:
if (!isStartKey) {
isStartKey = true;
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true,
offset, k);
} else {
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var,
true, offset, k);
}
break;
case TSDB_DATA_TYPE_NCHAR: {
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true,
offset, k);
break;
}
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true,
offset, k);
break;
}
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_MEDIUMBLOB:
uError("the column type %" PRIi16 " is defined but not implemented yet", pColInfoData->info.type);
TASSERT(0);
break;
default:
if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) {
if (pCol->type == pColInfoData->info.type) {
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pCol->type, TD_VTYPE_NORM, var, true, offset,
k);
} else {
char tv[8] = {0};
if (pColInfoData->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
GET_TYPED_DATA(v, float, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else if (pColInfoData->info.type == TSDB_DATA_TYPE_DOUBLE) {
double v = 0;
GET_TYPED_DATA(v, double, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else if (IS_SIGNED_NUMERIC_TYPE(pColInfoData->info.type)) {
int64_t v = 0;
GET_TYPED_DATA(v, int64_t, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
} else {
uint64_t v = 0;
GET_TYPED_DATA(v, uint64_t, pColInfoData->info.type, var);
SET_TYPED_DATA(&tv, pCol->type, v);
}
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pCol->type, TD_VTYPE_NORM, tv, true, offset,
k);
}
} else {
uError("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
TASSERT(0);
}
break;
}
offset += TYPE_BYTES[pCol->type]; // sum/avg would convert to int64_t/uint64_t/double during aggregation
}
dataLen += TD_ROW_LEN(rb.pBuf);
#ifdef TD_DEBUG_PRINT_ROW
tdSRowPrint(rb.pBuf, pTSchema, __func__);
#endif
}
++numOfBlks;
pSubmitBlk->dataLen = dataLen;
msgLen += pSubmitBlk->dataLen;
}
(*pReq)->length = msgLen;
(*pReq)->header.vgId = htonl(vgId);
(*pReq)->header.contLen = htonl(msgLen);
(*pReq)->length = (*pReq)->header.contLen;
(*pReq)->numOfBlocks = htonl(numOfBlks);
SSubmitBlk* blk = (SSubmitBlk*)((*pReq) + 1);
while (numOfBlks--) {
int32_t dataLen = blk->dataLen;
blk->uid = htobe64(blk->uid);
blk->suid = htobe64(blk->suid);
blk->padding = htonl(blk->padding);
blk->sversion = htonl(blk->sversion);
blk->dataLen = htonl(blk->dataLen);
blk->schemaLen = htonl(blk->schemaLen);
blk->numOfRows = htons(blk->numOfRows);
blk = (SSubmitBlk*)(blk->data + dataLen);
}
return TSDB_CODE_SUCCESS;
_err:
taosMemoryFreeClear(*pReq);
taosArrayDestroy(pTagArray);
return TSDB_CODE_FAILED;
}
#endif
source/dnode/vnode/src/sma/smaEnv.c
浏览文件 @
85847473
...
...
@@ -17,123 +17,17 @@
typedef
struct
SSmaStat
SSmaStat
;
static
const
char
*
TSDB_SMA_DNAME
[]
=
{
""
,
// TSDB_SMA_TYPE_BLOCK
"tsma"
,
// TSDB_SMA_TYPE_TIME_RANGE
"rsma"
,
// TSDB_SMA_TYPE_ROLLUP
};
#define SMA_TEST_INDEX_NAME "smaTestIndexName" // TODO: just for test
#define SMA_TEST_INDEX_UID 2000000001 // TODO: just for test
#define SMA_STATE_HASH_SLOT 4
#define RSMA_TASK_INFO_HASH_SLOT 8
typedef
struct
SPoolMem
{
int64_t
size
;
struct
SPoolMem
*
prev
;
struct
SPoolMem
*
next
;
}
SPoolMem
;
// declaration of static functions
// insert data
static
void
tdGetSmaDir
(
int32_t
vgId
,
ETsdbSmaType
smaType
,
char
dirName
[]);
// Pool Memory
static
SPoolMem
*
openPool
();
static
void
clearPool
(
SPoolMem
*
pPool
);
static
void
closePool
(
SPoolMem
*
pPool
);
static
void
*
poolMalloc
(
void
*
arg
,
size_t
size
);
static
void
poolFree
(
void
*
arg
,
void
*
ptr
);
static
int32_t
tdInitSmaStat
(
SSmaStat
**
pSmaStat
,
int8_t
smaType
);
static
SSmaEnv
*
tdNewSmaEnv
(
const
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
);
static
int32_t
tdInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
SSmaEnv
**
pEnv
);
// implementation
static
SPoolMem
*
openPool
()
{
SPoolMem
*
pPool
=
(
SPoolMem
*
)
taosMemoryMalloc
(
sizeof
(
*
pPool
));
pPool
->
prev
=
pPool
->
next
=
pPool
;
pPool
->
size
=
0
;
return
pPool
;
}
static
void
clearPool
(
SPoolMem
*
pPool
)
{
if
(
!
pPool
)
return
;
SPoolMem
*
pMem
;
do
{
pMem
=
pPool
->
next
;
if
(
pMem
==
pPool
)
break
;
pMem
->
next
->
prev
=
pMem
->
prev
;
pMem
->
prev
->
next
=
pMem
->
next
;
pPool
->
size
-=
pMem
->
size
;
taosMemoryFree
(
pMem
);
}
while
(
1
);
assert
(
pPool
->
size
==
0
);
}
static
void
closePool
(
SPoolMem
*
pPool
)
{
if
(
pPool
)
{
clearPool
(
pPool
);
taosMemoryFree
(
pPool
);
}
}
static
void
*
poolMalloc
(
void
*
arg
,
size_t
size
)
{
void
*
ptr
=
NULL
;
SPoolMem
*
pPool
=
(
SPoolMem
*
)
arg
;
SPoolMem
*
pMem
;
pMem
=
(
SPoolMem
*
)
taosMemoryMalloc
(
sizeof
(
*
pMem
)
+
size
);
if
(
!
pMem
)
{
assert
(
0
);
}
pMem
->
size
=
sizeof
(
*
pMem
)
+
size
;
pMem
->
next
=
pPool
->
next
;
pMem
->
prev
=
pPool
;
pPool
->
next
->
prev
=
pMem
;
pPool
->
next
=
pMem
;
pPool
->
size
+=
pMem
->
size
;
ptr
=
(
void
*
)(
&
pMem
[
1
]);
return
ptr
;
}
static
void
poolFree
(
void
*
arg
,
void
*
ptr
)
{
SPoolMem
*
pPool
=
(
SPoolMem
*
)
arg
;
SPoolMem
*
pMem
;
pMem
=
&
(((
SPoolMem
*
)
ptr
)[
-
1
]);
pMem
->
next
->
prev
=
pMem
->
prev
;
pMem
->
prev
->
next
=
pMem
->
next
;
pPool
->
size
-=
pMem
->
size
;
taosMemoryFree
(
pMem
);
}
int32_t
tdInitSma
(
SSma
*
pSma
)
{
int32_t
numOfTSma
=
taosArrayGetSize
(
metaGetSmaTbUids
(
SMA_META
(
pSma
)));
if
(
numOfTSma
>
0
)
{
atomic_store_16
(
&
SMA_TSMA_NUM
(
pSma
),
(
int16_t
)
numOfTSma
);
}
return
TSDB_CODE_SUCCESS
;
}
static
void
tdGetSmaDir
(
int32_t
vgId
,
ETsdbSmaType
smaType
,
char
dirName
[])
{
snprintf
(
dirName
,
TSDB_FILENAME_LEN
,
"vnode%svnode%d%s%s"
,
TD_DIRSEP
,
vgId
,
TD_DIRSEP
,
TSDB_SMA_DNAME
[
smaType
]);
}
static
SSmaEnv
*
tdNewSmaEnv
(
const
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
SDiskID
did
)
{
static
SSmaEnv
*
tdNewSmaEnv
(
const
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
)
{
SSmaEnv
*
pEnv
=
NULL
;
pEnv
=
(
SSmaEnv
*
)
taosMemoryCalloc
(
1
,
sizeof
(
SSmaEnv
));
...
...
@@ -156,18 +50,17 @@ static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path,
return
NULL
;
}
return
pEnv
;
}
static
int32_t
tdInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
S
DiskID
did
,
S
SmaEnv
**
pEnv
)
{
static
int32_t
tdInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
const
char
*
path
,
SSmaEnv
**
pEnv
)
{
if
(
!
pEnv
)
{
terrno
=
TSDB_CODE_INVALID_PTR
;
return
TSDB_CODE_FAILED
;
}
if
(
!
(
*
pEnv
))
{
if
(
!
(
*
pEnv
=
tdNewSmaEnv
(
pSma
,
smaType
,
path
,
did
)))
{
if
(
!
(
*
pEnv
=
tdNewSmaEnv
(
pSma
,
smaType
,
path
)))
{
return
TSDB_CODE_FAILED
;
}
}
...
...
@@ -183,15 +76,16 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SDiskI
*/
void
tdDestroySmaEnv
(
SSmaEnv
*
pSmaEnv
)
{
if
(
pSmaEnv
)
{
tdDestroySmaState
(
pSmaEnv
->
pStat
,
SMA_ENV_TYPE
(
pSmaEnv
));
taosMemoryFreeClear
(
pSmaEnv
->
pStat
);
pSmaEnv
->
pStat
=
tdFreeSmaState
(
pSmaEnv
->
pStat
,
SMA_ENV_TYPE
(
pSmaEnv
));
taosThreadRwlockDestroy
(
&
(
pSmaEnv
->
lock
));
}
}
void
*
tdFreeSmaEnv
(
SSmaEnv
*
pSmaEnv
)
{
tdDestroySmaEnv
(
pSmaEnv
);
taosMemoryFreeClear
(
pSmaEnv
);
if
(
pSmaEnv
)
{
tdDestroySmaEnv
(
pSmaEnv
);
taosMemoryFreeClear
(
pSmaEnv
);
}
return
NULL
;
}
...
...
@@ -239,13 +133,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) {
return
TSDB_CODE_FAILED
;
}
}
else
if
(
smaType
==
TSDB_SMA_TYPE_TIME_RANGE
)
{
SMA_STAT_ITEMS
(
*
pSmaStat
)
=
taosHashInit
(
SMA_STATE_HASH_SLOT
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_BINARY
),
true
,
HASH_ENTRY_LOCK
);
if
(
!
SMA_STAT_ITEMS
(
*
pSmaStat
))
{
taosMemoryFreeClear
(
*
pSmaStat
);
return
TSDB_CODE_FAILED
;
}
// TODO
}
else
{
ASSERT
(
0
);
}
...
...
@@ -262,6 +150,12 @@ void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem) {
return
NULL
;
}
void
*
tdFreeSmaState
(
SSmaStat
*
pSmaStat
,
int8_t
smaType
)
{
tdDestroySmaState
(
pSmaStat
,
smaType
);
taosMemoryFreeClear
(
pSmaStat
);
return
NULL
;
}
/**
* @brief Release resources allocated for its member fields, not including itself.
*
...
...
@@ -270,16 +164,10 @@ void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem) {
*/
int32_t
tdDestroySmaState
(
SSmaStat
*
pSmaStat
,
int8_t
smaType
)
{
if
(
pSmaStat
)
{
// TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
if
(
smaType
==
TSDB_SMA_TYPE_TIME_RANGE
)
{
void
*
item
=
taosHashIterate
(
SMA_STAT_ITEMS
(
pSmaStat
),
NULL
);
while
(
item
)
{
SSmaStatItem
*
pItem
=
*
(
SSmaStatItem
**
)
item
;
tdFreeSmaStatItem
(
pItem
);
item
=
taosHashIterate
(
SMA_STAT_ITEMS
(
pSmaStat
),
item
);
}
taosHashCleanup
(
SMA_STAT_ITEMS
(
pSmaStat
));
tdFreeSmaStatItem
(
&
pSmaStat
->
tsmaStatItem
);
}
else
if
(
smaType
==
TSDB_SMA_TYPE_ROLLUP
)
{
// TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
void
*
infoHash
=
taosHashIterate
(
SMA_STAT_INFO_HASH
(
pSmaStat
),
NULL
);
while
(
infoHash
)
{
SRSmaInfo
*
pInfoHash
=
*
(
SRSmaInfo
**
)
infoHash
;
...
...
@@ -317,7 +205,7 @@ int32_t tdUnLockSma(SSma *pSma) {
return
0
;
}
int32_t
tdCheckAndInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
,
bool
onlyCheck
)
{
int32_t
tdCheckAndInitSmaEnv
(
SSma
*
pSma
,
int8_t
smaType
)
{
SSmaEnv
*
pEnv
=
NULL
;
// return if already init
...
...
@@ -344,26 +232,7 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType, bool onlyCheck) {
if
(
!
pEnv
)
{
char
rname
[
TSDB_FILENAME_LEN
]
=
{
0
};
SDiskID
did
=
{
0
};
if
(
tfsAllocDisk
(
SMA_TFS
(
pSma
),
TFS_PRIMARY_LEVEL
,
&
did
)
<
0
)
{
tdUnLockSma
(
pSma
);
return
TSDB_CODE_FAILED
;
}
if
(
did
.
level
<
0
||
did
.
id
<
0
)
{
tdUnLockSma
(
pSma
);
smaError
(
"vgId:%d, init sma env failed since invalid did(%d,%d)"
,
SMA_VID
(
pSma
),
did
.
level
,
did
.
id
);
return
TSDB_CODE_FAILED
;
}
tdGetSmaDir
(
SMA_VID
(
pSma
),
smaType
,
rname
);
if
(
tfsMkdirRecurAt
(
SMA_TFS
(
pSma
),
rname
,
did
)
<
0
)
{
tdUnLockSma
(
pSma
);
return
TSDB_CODE_FAILED
;
}
if
(
tdInitSmaEnv
(
pSma
,
smaType
,
rname
,
did
,
&
pEnv
)
<
0
)
{
if
(
tdInitSmaEnv
(
pSma
,
smaType
,
rname
,
&
pEnv
)
<
0
)
{
tdUnLockSma
(
pSma
);
return
TSDB_CODE_FAILED
;
}
...
...
source/dnode/vnode/src/sma/smaOpen.c
浏览文件 @
85847473
...
...
@@ -132,7 +132,9 @@ int32_t smaClose(SSma *pSma) {
if
SMA_RSMA_TSDB0
(
pSma
)
tsdbClose
(
&
SMA_RSMA_TSDB0
(
pSma
));
if
SMA_RSMA_TSDB1
(
pSma
)
tsdbClose
(
&
SMA_RSMA_TSDB1
(
pSma
));
if
SMA_RSMA_TSDB2
(
pSma
)
tsdbClose
(
&
SMA_RSMA_TSDB2
(
pSma
));
taosMemoryFree
(
pSma
);
// SMA_TSMA_ENV(pSma) = tdFreeSmaEnv(SMA_TSMA_ENV(pSma));
// SMA_RSMA_ENV(pSma) = tdFreeSmaEnv(SMA_RSMA_ENV(pSma));
taosMemoryFreeClear
(
pSma
);
}
return
0
;
}
\ No newline at end of file
source/dnode/vnode/src/sma/smaRollup.c
浏览文件 @
85847473
...
...
@@ -181,7 +181,7 @@ int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq *pReq) {
return
TSDB_CODE_SUCCESS
;
}
if
(
tdCheckAndInitSmaEnv
(
pSma
,
TSDB_SMA_TYPE_ROLLUP
,
false
)
!=
TSDB_CODE_SUCCESS
)
{
if
(
tdCheckAndInitSmaEnv
(
pSma
,
TSDB_SMA_TYPE_ROLLUP
)
!=
TSDB_CODE_SUCCESS
)
{
terrno
=
TSDB_CODE_TDB_INIT_FAILED
;
return
TSDB_CODE_FAILED
;
}
...
...
source/dnode/vnode/src/sma/smaTimeRange.c
浏览文件 @
85847473
...
...
@@ -142,7 +142,6 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
ASSERT
(
pItem
);
if
(
!
pItem
->
pTSma
)
{
// cache smaMeta
STSma
*
pTSma
=
metaGetSmaInfoByIndex
(
SMA_META
(
pSma
),
indexUid
);
if
(
!
pTSma
)
{
terrno
=
TSDB_CODE_TSMA_NO_INDEX_IN_META
;
...
...
@@ -150,27 +149,28 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
return
TSDB_CODE_FAILED
;
}
pItem
->
pTSma
=
pTSma
;
pItem
->
pTSchema
=
metaGetTbTSchema
(
SMA_META
(
pSma
),
pTSma
->
dstTbUid
,
-
1
);
ASSERT
(
pItem
->
pTSchema
);
// TODO
}
STSma
*
pTSma
=
pItem
->
pTSma
;
ASSERT
(
pItem
->
pTSma
->
indexUid
==
indexUid
)
;
ASSERT
(
pTSma
->
indexUid
==
indexUid
)
;
SSubmitReq
*
pSubmitReq
=
NULL
;
SMetaReader
mr
=
{
0
};
pSubmitReq
=
tdBlockToSubmit
((
const
SArray
*
)
msg
,
pItem
->
pTSchema
,
true
,
pItem
->
pTSma
->
dstTbUid
,
pItem
->
pTSma
->
dstTbName
,
pItem
->
pTSma
->
dstVgId
);
const
char
*
dbName
=
"testDb"
;
if
(
metaGetTableEntryByName
(
&
mr
,
dbName
)
!=
0
)
{
smaDebug
(
"vgId:%d, tsma no table testTb exists for smaIndex %"
PRIi64
" since %s"
,
SMA_VID
(
pSma
),
indexUid
,
tstrerror
(
terrno
));
SVCreateStbReq
pReq
=
{
0
};
pReq
.
name
=
dbName
;
pReq
.
suid
=
pTSma
->
dstTbUid
;
pReq
.
schemaRow
=
pCfg
->
schemaRow
;
pReq
.
schemaTag
=
pCfg
->
schemaTag
;
}
ASSERT
(
pSubmitReq
);
// TODO
SSubmitReq
*
pSubmitReq
=
NULL
;
buildSubmitReqFromDataBlock
(
&
pSubmitReq
,
(
const
SArray
*
)
msg
,
NULL
,
pItem
->
pTSma
->
dstVgId
,
pItem
->
pTSma
->
dstTbUid
);
ASSERT
(
!
strncasecmp
(
"td.tsma.rst.tb"
,
pItem
->
pTSma
->
dstTbName
,
14
));
SRpcMsg
submitReqMsg
=
{
.
msgType
=
TDMT_VND_SUBMIT
,
.
pCont
=
pSubmitReq
,
.
contLen
=
ntohl
(
pSubmitReq
->
length
),
};
ASSERT
(
tmsgPutToQueue
(
&
pSma
->
pVnode
->
msgCb
,
WRITE_QUEUE
,
&
submitReqMsg
)
==
0
);
tdUnRefSmaStat
(
pSma
,
pStat
);
...
...
source/dnode/vnode/src/sma/smaTimeRange2.c
已删除
100644 → 0
浏览文件 @
555469c7
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "sma.h"
#include "tsdb.h"
typedef
STsdbCfg
STSmaKeepCfg
;
#undef _TEST_SMA_PRINT_DEBUG_LOG_
#define SMA_STORAGE_MINUTES_MAX 86400
#define SMA_STORAGE_MINUTES_DAY 1440
#define SMA_STORAGE_MINUTES_MIN 1440
#define SMA_STORAGE_TSDB_MINUTES 86400
#define SMA_STORAGE_TSDB_TIMES 10
#define SMA_STORAGE_SPLIT_FACTOR 14400 // least records in tsma file TODO: the feasible value?
#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8
#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds
#define SMA_STATE_ITEM_HASH_SLOT 32
// static func
/**
* @brief Judge the tsma file split days
*
* @param pCfg
* @param pCont
* @param contLen
* @param days unit is minute
* @return int32_t
*/
int32_t
tdProcessTSmaGetDaysImpl
(
SVnodeCfg
*
pCfg
,
void
*
pCont
,
uint32_t
contLen
,
int32_t
*
days
)
{
SDecoder
coder
=
{
0
};
tDecoderInit
(
&
coder
,
pCont
,
contLen
);
STSma
tsma
=
{
0
};
if
(
tDecodeSVCreateTSmaReq
(
&
coder
,
&
tsma
)
<
0
)
{
terrno
=
TSDB_CODE_MSG_DECODE_ERROR
;
goto
_err
;
}
STsdbCfg
*
pTsdbCfg
=
&
pCfg
->
tsdbCfg
;
int64_t
sInterval
=
convertTimeFromPrecisionToUnit
(
tsma
.
interval
,
pTsdbCfg
->
precision
,
TIME_UNIT_SECOND
);
if
(
sInterval
<=
0
)
{
*
days
=
pTsdbCfg
->
days
;
return
0
;
}
int64_t
records
=
pTsdbCfg
->
days
*
60
/
sInterval
;
if
(
records
>=
SMA_STORAGE_SPLIT_FACTOR
)
{
*
days
=
pTsdbCfg
->
days
;
}
else
{
int64_t
mInterval
=
convertTimeFromPrecisionToUnit
(
tsma
.
interval
,
pTsdbCfg
->
precision
,
TIME_UNIT_MINUTE
);
int64_t
daysPerFile
=
mInterval
*
SMA_STORAGE_MINUTES_DAY
*
2
;
if
(
daysPerFile
>
SMA_STORAGE_MINUTES_MAX
)
{
*
days
=
SMA_STORAGE_MINUTES_MAX
;
}
else
{
*
days
=
(
int32_t
)
daysPerFile
;
}
if
(
*
days
<
pTsdbCfg
->
days
)
{
*
days
=
pTsdbCfg
->
days
;
}
}
tDecoderClear
(
&
coder
);
return
0
;
_err:
tDecoderClear
(
&
coder
);
return
-
1
;
}
// read data
// implementation
/**
* @brief Insert/Update Time-range-wise SMA data.
* - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g.
* v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files.
* - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The
* days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d).
* - The destination file of one data block for some interval is determined by its start TS key.
*
* @param pSma
* @param msg
* @return int32_t
*/
int32_t
tdProcessTSmaInsertImpl
(
SSma
*
pSma
,
int64_t
indexUid
,
const
char
*
msg
)
{
STsdbCfg
*
pCfg
=
SMA_TSDB_CFG
(
pSma
);
const
SArray
*
pDataBlocks
=
(
const
SArray
*
)
msg
;
// TODO: destroy SSDataBlocks(msg)
// For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus
// the sma data would arrive ahead of the update-expired-window msg.
if
(
tdCheckAndInitSmaEnv
(
pSma
,
TSDB_SMA_TYPE_TIME_RANGE
,
false
)
!=
TSDB_CODE_SUCCESS
)
{
terrno
=
TSDB_CODE_TDB_INIT_FAILED
;
return
TSDB_CODE_FAILED
;
}
if
(
!
pDataBlocks
)
{
terrno
=
TSDB_CODE_INVALID_PTR
;
smaWarn
(
"vgId:%d, insert tsma data failed since pDataBlocks is NULL"
,
SMA_VID
(
pSma
));
return
terrno
;
}
if
(
taosArrayGetSize
(
pDataBlocks
)
<=
0
)
{
terrno
=
TSDB_CODE_INVALID_PARA
;
smaWarn
(
"vgId:%d, insert tsma data failed since pDataBlocks is empty"
,
SMA_VID
(
pSma
));
return
TSDB_CODE_FAILED
;
}
SSmaEnv
*
pEnv
=
SMA_TSMA_ENV
(
pSma
);
SSmaStat
*
pStat
=
SMA_ENV_STAT
(
pEnv
);
SSmaStatItem
*
pItem
=
NULL
;
tdRefSmaStat
(
pSma
,
pStat
);
if
(
pStat
&&
SMA_STAT_ITEMS
(
pStat
))
{
pItem
=
taosHashGet
(
SMA_STAT_ITEMS
(
pStat
),
&
indexUid
,
sizeof
(
indexUid
));
}
if
(
!
pItem
||
!
(
pItem
=
*
(
SSmaStatItem
**
)
pItem
)
||
tdSmaStatIsDropped
(
pItem
))
{
terrno
=
TSDB_CODE_TSMA_INVALID_STAT
;
tdUnRefSmaStat
(
pSma
,
pStat
);
return
TSDB_CODE_FAILED
;
}
STSma
*
pTSma
=
pItem
->
pTSma
;
tdUnRefSmaStat
(
pSma
,
pStat
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
tdProcessTSmaCreateImpl
(
SSma
*
pSma
,
int64_t
version
,
const
char
*
pMsg
)
{
SSmaCfg
*
pCfg
=
(
SSmaCfg
*
)
pMsg
;
if
(
metaCreateTSma
(
SMA_META
(
pSma
),
version
,
pCfg
)
<
0
)
{
return
-
1
;
}
if
(
TD_VID
(
pSma
->
pVnode
)
==
pCfg
->
dstVgId
)
{
// create stable to save tsma result in dstVgId
SVCreateStbReq
pReq
=
{
0
};
pReq
.
name
=
pCfg
->
dstTbName
;
pReq
.
suid
=
pCfg
->
dstTbUid
;
pReq
.
schemaRow
=
pCfg
->
schemaRow
;
pReq
.
schemaTag
=
pCfg
->
schemaTag
;
if
(
metaCreateSTable
(
SMA_META
(
pSma
),
version
,
&
pReq
)
<
0
)
{
return
-
1
;
}
}
tdTSmaAdd
(
pSma
,
1
);
return
0
;
}
\ No newline at end of file
source/dnode/vnode/src/tq/tqSink.c
浏览文件 @
85847473
...
...
@@ -15,10 +15,7 @@
#include "tq.h"
static
SSubmitReq
*
tdBlockToSubmit
(
const
SArray
*
pBlocks
,
const
STSchema
*
pSchema
,
bool
createTb
,
int64_t
suid
,
const
char
*
stbFullName
,
int32_t
vgId
);
static
SSubmitReq
*
tdBlockToSubmit
(
const
SArray
*
pBlocks
,
const
STSchema
*
pTSchema
,
bool
createTb
,
int64_t
suid
,
SSubmitReq
*
tdBlockToSubmit
(
const
SArray
*
pBlocks
,
const
STSchema
*
pTSchema
,
bool
createTb
,
int64_t
suid
,
const
char
*
stbFullName
,
int32_t
vgId
)
{
SSubmitReq
*
ret
=
NULL
;
SArray
*
tagArray
=
taosArrayInit
(
1
,
sizeof
(
STagVal
));
...
...
source/dnode/vnode/src/vnd/vnodeSvr.c
浏览文件 @
85847473
...
...
@@ -284,7 +284,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, int64_t version, SRpcMsg *pMsg, SRp
void
smaHandleRes
(
void
*
pVnode
,
int64_t
smaId
,
const
SArray
*
data
)
{
// TODO
//
blockDebugShowData(data, __func__);
blockDebugShowData
(
data
,
__func__
);
tdProcessTSmaInsert
(((
SVnode
*
)
pVnode
)
->
pSma
,
smaId
,
(
const
char
*
)
data
);
}
...
...
source/dnode/vnode/test/tsdbSmaTest.cpp
浏览文件 @
85847473
...
...
@@ -121,7 +121,7 @@ TEST(testCase, tSma_Meta_Encode_Decode_Test) {
// decode
STSmaWrapper
dstTSmaWrapper
=
{
0
};
void
*
result
=
tDecodeTSmaWrapper
(
pSW
,
&
dstTSmaWrapper
);
void
*
result
=
tDecodeTSmaWrapper
(
pSW
,
&
dstTSmaWrapper
,
false
);
EXPECT_NE
(
result
,
nullptr
);
EXPECT_EQ
(
tSmaWrapper
.
number
,
dstTSmaWrapper
.
number
);
...
...
source/libs/parser/src/parAstCreater.c
浏览文件 @
85847473
...
...
@@ -804,10 +804,10 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
case
DB_OPTION_RETENTIONS
:
((
SDatabaseOptions
*
)
pOptions
)
->
pRetentions
=
pVal
;
break
;
case
DB_OPTION_SCHEMALESS
:
// ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
((
SDatabaseOptions
*
)
pOptions
)
->
schemaless
=
1
;
break
;
//
case DB_OPTION_SCHEMALESS:
// ((SDatabaseOptions*)pOptions)->schemaless = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
// ((SDatabaseOptions*)pOptions)->schemaless = 0
;
//
break;
default:
break
;
}
...
...
source/libs/parser/test/parInitialCTest.cpp
浏览文件 @
85847473
...
...
@@ -155,7 +155,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
ASSERT_EQ
(
req
.
replications
,
expect
.
replications
);
ASSERT_EQ
(
req
.
strict
,
expect
.
strict
);
ASSERT_EQ
(
req
.
cacheLastRow
,
expect
.
cacheLastRow
);
ASSERT_EQ
(
req
.
schemaless
,
expect
.
schemaless
);
//
ASSERT_EQ(req.schemaless, expect.schemaless);
ASSERT_EQ
(
req
.
ignoreExist
,
expect
.
ignoreExist
);
ASSERT_EQ
(
req
.
numOfRetensions
,
expect
.
numOfRetensions
);
if
(
expect
.
numOfRetensions
>
0
)
{
...
...
source/libs/planner/src/planSpliter.c
浏览文件 @
85847473
...
...
@@ -15,6 +15,7 @@
#include "functionMgt.h"
#include "planInt.h"
#include "tglobal.h"
#define SPLIT_FLAG_MASK(n) (1 << n)
...
...
@@ -37,7 +38,8 @@ typedef struct SSplitRule {
FSplit
splitFunc
;
}
SSplitRule
;
typedef
bool
(
*
FSplFindSplitNode
)(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
void
*
pInfo
);
// typedef bool (*FSplFindSplitNode)(SSplitContext* pCxt, SLogicSubplan* pSubplan, void* pInfo);
typedef
bool
(
*
FSplFindSplitNode
)(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
void
*
pInfo
);
static
void
splSetSubplanVgroups
(
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
)
{
if
(
QUERY_NODE_LOGIC_PLAN_SCAN
==
nodeType
(
pNode
))
{
...
...
@@ -95,9 +97,23 @@ static int32_t splCreateExchangeNodeForSubplan(SSplitContext* pCxt, SLogicSubpla
return
code
;
}
static
bool
splMatchByNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
FSplFindSplitNode
func
,
void
*
pInfo
)
{
if
(
func
(
pCxt
,
pSubplan
,
pNode
,
pInfo
))
{
return
true
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
if
(
splMatchByNode
(
pCxt
,
pSubplan
,
(
SLogicNode
*
)
pChild
,
func
,
pInfo
))
{
return
true
;
}
}
return
NULL
;
}
static
bool
splMatch
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
int32_t
flag
,
FSplFindSplitNode
func
,
void
*
pInfo
)
{
if
(
!
SPLIT_FLAG_TEST_MASK
(
pSubplan
->
splitFlag
,
flag
))
{
if
(
func
(
pCxt
,
pSubplan
,
pInfo
))
{
if
(
splMatchByNode
(
pCxt
,
pSubplan
,
pSubplan
->
pNode
,
func
,
pInfo
))
{
return
true
;
}
}
...
...
@@ -110,6 +126,11 @@ static bool splMatch(SSplitContext* pCxt, SLogicSubplan* pSubplan, int32_t flag,
return
false
;
}
static
void
splSetParent
(
SLogicNode
*
pNode
)
{
SNode
*
pChild
=
NULL
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
((
SLogicNode
*
)
pChild
)
->
pParent
=
pNode
;
}
}
typedef
struct
SStableSplitInfo
{
SLogicNode
*
pSplitNode
;
SLogicSubplan
*
pSubplan
;
...
...
@@ -136,11 +157,21 @@ static bool stbSplHasMultiTbScan(bool streamQuery, SLogicNode* pNode) {
return
false
;
}
SNode
*
pChild
=
nodesListGetNode
(
pNode
->
pChildren
,
0
);
if
(
QUERY_NODE_LOGIC_PLAN_PARTITION
==
nodeType
(
pChild
))
{
if
(
1
!=
LIST_LENGTH
(((
SLogicNode
*
)
pChild
)
->
pChildren
))
{
return
false
;
}
pChild
=
nodesListGetNode
(((
SLogicNode
*
)
pChild
)
->
pChildren
,
0
);
}
return
(
QUERY_NODE_LOGIC_PLAN_SCAN
==
nodeType
(
pChild
)
&&
stbSplIsMultiTbScan
(
streamQuery
,
(
SScanLogicNode
*
)
pChild
));
}
static
bool
stbSplNeedSplit
(
bool
streamQuery
,
SLogicNode
*
pNode
)
{
switch
(
nodeType
(
pNode
))
{
case
QUERY_NODE_LOGIC_PLAN_SCAN
:
return
stbSplIsMultiTbScan
(
streamQuery
,
(
SScanLogicNode
*
)
pNode
);
// case QUERY_NODE_LOGIC_PLAN_JOIN:
// return !(((SJoinLogicNode*)pNode)->isSingleTableJoin);
case
QUERY_NODE_LOGIC_PLAN_AGG
:
return
!
stbSplHasGatherExecFunc
(((
SAggLogicNode
*
)
pNode
)
->
pAggFuncs
)
&&
stbSplHasMultiTbScan
(
streamQuery
,
pNode
);
case
QUERY_NODE_LOGIC_PLAN_WINDOW
:
{
...
...
@@ -152,35 +183,20 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
}
case
QUERY_NODE_LOGIC_PLAN_SORT
:
return
stbSplHasMultiTbScan
(
streamQuery
,
pNode
);
case
QUERY_NODE_LOGIC_PLAN_SCAN
:
return
stbSplIsMultiTbScan
(
streamQuery
,
(
SScanLogicNode
*
)
pNode
);
default:
break
;
}
return
false
;
}
static
SLogicNode
*
stbSplMatchByNode
(
bool
streamQuery
,
SLogicNode
*
pNode
)
{
if
(
stbSplNeedSplit
(
streamQuery
,
pNode
))
{
return
pNode
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
SLogicNode
*
pSplitNode
=
stbSplMatchByNode
(
streamQuery
,
(
SLogicNode
*
)
pChild
);
if
(
NULL
!=
pSplitNode
)
{
return
pSplitNode
;
}
}
return
NULL
;
}
static
bool
stbSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SStableSplitInfo
*
pInfo
)
{
SLogicNode
*
pSplitNode
=
stbSplMatchByNode
(
pCxt
->
pPlanCxt
->
streamQuery
,
pSubplan
->
pNode
);
if
(
NULL
!=
pSplitNode
)
{
pInfo
->
pSplitNode
=
pSplitNode
;
static
bool
stbSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
SStableSplitInfo
*
pInfo
)
{
if
(
stbSplNeedSplit
(
pCxt
->
pPlanCxt
->
streamQuery
,
pNode
))
{
pInfo
->
pSplitNode
=
pNode
;
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
NULL
!=
pSplitNod
e
;
return
fals
e
;
}
static
int32_t
stbSplRewriteFuns
(
const
SNodeList
*
pFuncs
,
SNodeList
**
pPartialFuncs
,
SNodeList
**
pMergeFuncs
)
{
...
...
@@ -258,6 +274,7 @@ static int32_t stbSplCreatePartWindowNode(SWindowLogicNode* pMergeWindow, SLogic
if
(
TSDB_CODE_SUCCESS
==
code
)
{
pMergeWindow
->
node
.
pTargets
=
pTargets
;
pPartWin
->
node
.
pChildren
=
pChildren
;
splSetParent
((
SLogicNode
*
)
pPartWin
);
code
=
stbSplRewriteFuns
(
pFunc
,
&
pPartWin
->
pFuncs
,
&
pMergeWindow
->
pFuncs
);
}
int32_t
index
=
0
;
...
...
@@ -285,13 +302,24 @@ static int32_t stbSplCreatePartWindowNode(SWindowLogicNode* pMergeWindow, SLogic
return
code
;
}
static
int32_t
stbSplGetNumOfVgroups
(
SLogicNode
*
pNode
)
{
if
(
QUERY_NODE_LOGIC_PLAN_SCAN
==
nodeType
(
pNode
))
{
return
((
SScanLogicNode
*
)
pNode
)
->
pVgroupList
->
numOfVgroups
;
}
else
{
if
(
1
==
LIST_LENGTH
(
pNode
->
pChildren
))
{
return
stbSplGetNumOfVgroups
((
SLogicNode
*
)
nodesListGetNode
(
pNode
->
pChildren
,
0
));
}
}
return
0
;
}
static
int32_t
stbSplCreateMergeNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pSplitNode
,
SNodeList
*
pMergeKeys
,
SLogicNode
*
pPartChild
)
{
SMergeLogicNode
*
pMerge
=
(
SMergeLogicNode
*
)
nodesMakeNode
(
QUERY_NODE_LOGIC_PLAN_MERGE
);
if
(
NULL
==
pMerge
)
{
return
TSDB_CODE_OUT_OF_MEMORY
;
}
pMerge
->
numOfChannels
=
((
SScanLogicNode
*
)
nodesListGetNode
(
pPartChild
->
pChildren
,
0
))
->
pVgroupList
->
numOfVgroups
;
pMerge
->
numOfChannels
=
stbSplGetNumOfVgroups
(
pPartChild
)
;
pMerge
->
srcGroupId
=
pCxt
->
groupId
;
pMerge
->
node
.
precision
=
pPartChild
->
precision
;
pMerge
->
pMergeKeys
=
pMergeKeys
;
...
...
@@ -329,12 +357,12 @@ static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent
return
code
;
}
static
int32_t
stbSplCreateMergeKeys
ForInterval
(
SNode
*
pWStartTs
,
SNodeList
**
pMergeKeys
)
{
static
int32_t
stbSplCreateMergeKeys
ByPrimaryKey
(
SNode
*
pPrimaryKey
,
SNodeList
**
pMergeKeys
)
{
SOrderByExprNode
*
pMergeKey
=
(
SOrderByExprNode
*
)
nodesMakeNode
(
QUERY_NODE_ORDER_BY_EXPR
);
if
(
NULL
==
pMergeKey
)
{
return
TSDB_CODE_OUT_OF_MEMORY
;
}
pMergeKey
->
pExpr
=
nodesCloneNode
(
p
WStartTs
);
pMergeKey
->
pExpr
=
nodesCloneNode
(
p
PrimaryKey
);
if
(
NULL
==
pMergeKey
->
pExpr
)
{
nodesDestroyNode
((
SNode
*
)
pMergeKey
);
return
TSDB_CODE_OUT_OF_MEMORY
;
...
...
@@ -351,7 +379,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
((
SWindowLogicNode
*
)
pPartWindow
)
->
intervalAlgo
=
INTERVAL_ALGO_HASH
;
((
SWindowLogicNode
*
)
pInfo
->
pSplitNode
)
->
intervalAlgo
=
INTERVAL_ALGO_MERGE
;
SNodeList
*
pMergeKeys
=
NULL
;
code
=
stbSplCreateMergeKeys
ForInterval
(((
SWindowLogicNode
*
)
pInfo
->
pSplitNode
)
->
pTspk
,
&
pMergeKeys
);
code
=
stbSplCreateMergeKeys
ByPrimaryKey
(((
SWindowLogicNode
*
)
pInfo
->
pSplitNode
)
->
pTspk
,
&
pMergeKeys
);
if
(
TSDB_CODE_SUCCESS
==
code
)
{
code
=
stbSplCreateMergeNode
(
pCxt
,
NULL
,
pInfo
->
pSplitNode
,
pMergeKeys
,
pPartWindow
);
}
...
...
@@ -439,6 +467,7 @@ static int32_t stbSplCreatePartAggNode(SAggLogicNode* pMergeAgg, SLogicNode** pO
pMergeAgg
->
node
.
pConditions
=
pConditions
;
pMergeAgg
->
node
.
pTargets
=
pTargets
;
pPartAgg
->
node
.
pChildren
=
pChildren
;
splSetParent
((
SLogicNode
*
)
pPartAgg
);
code
=
stbSplRewriteFuns
(
pFunc
,
&
pPartAgg
->
pAggFuncs
,
&
pMergeAgg
->
pAggFuncs
);
}
...
...
@@ -553,6 +582,7 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut
SNodeList
*
pMergeKeys
=
NULL
;
if
(
TSDB_CODE_SUCCESS
==
code
)
{
pPartSort
->
node
.
pChildren
=
pChildren
;
splSetParent
((
SLogicNode
*
)
pPartSort
);
pPartSort
->
pSortKeys
=
pSortKeys
;
code
=
stbSplCreateMergeKeys
(
pPartSort
->
pSortKeys
,
pPartSort
->
node
.
pTargets
,
&
pMergeKeys
);
}
...
...
@@ -592,6 +622,56 @@ static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
return
code
;
}
static
SNode
*
stbSplFindPrimaryKeyFromScan
(
SScanLogicNode
*
pScan
)
{
SNode
*
pCol
=
NULL
;
FOREACH
(
pCol
,
pScan
->
pScanCols
)
{
if
(
PRIMARYKEY_TIMESTAMP_COL_ID
==
((
SColumnNode
*
)
pCol
)
->
colId
)
{
return
pCol
;
}
}
return
NULL
;
}
static
int32_t
stbSplSplitScanNodeForJoin
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SScanLogicNode
*
pScan
)
{
SNodeList
*
pMergeKeys
=
NULL
;
int32_t
code
=
stbSplCreateMergeKeysByPrimaryKey
(
stbSplFindPrimaryKeyFromScan
(
pScan
),
&
pMergeKeys
);
if
(
TSDB_CODE_SUCCESS
==
code
)
{
code
=
stbSplCreateMergeNode
(
pCxt
,
pSubplan
,
(
SLogicNode
*
)
pScan
,
pMergeKeys
,
(
SLogicNode
*
)
pScan
);
}
if
(
TSDB_CODE_SUCCESS
==
code
)
{
code
=
nodesListMakeStrictAppend
(
&
pSubplan
->
pChildren
,
(
SNode
*
)
splCreateScanSubplan
(
pCxt
,
(
SLogicNode
*
)
pScan
,
SPLIT_FLAG_STABLE_SPLIT
));
}
return
code
;
}
static
int32_t
stbSplSplitJoinNodeImpl
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SJoinLogicNode
*
pJoin
)
{
int32_t
code
=
TSDB_CODE_SUCCESS
;
SNode
*
pChild
=
NULL
;
FOREACH
(
pChild
,
pJoin
->
node
.
pChildren
)
{
if
(
QUERY_NODE_LOGIC_PLAN_SCAN
==
nodeType
(
pChild
))
{
code
=
stbSplSplitScanNodeForJoin
(
pCxt
,
pSubplan
,
(
SScanLogicNode
*
)
pChild
);
}
else
if
(
QUERY_NODE_LOGIC_PLAN_JOIN
==
nodeType
(
pChild
))
{
code
=
stbSplSplitJoinNodeImpl
(
pCxt
,
pSubplan
,
(
SJoinLogicNode
*
)
pChild
);
}
else
{
code
=
TSDB_CODE_PLAN_INTERNAL_ERROR
;
}
if
(
TSDB_CODE_SUCCESS
!=
code
)
{
break
;
}
}
return
code
;
}
static
int32_t
stbSplSplitJoinNode
(
SSplitContext
*
pCxt
,
SStableSplitInfo
*
pInfo
)
{
int32_t
code
=
stbSplSplitJoinNodeImpl
(
pCxt
,
pInfo
->
pSubplan
,
(
SJoinLogicNode
*
)
pInfo
->
pSplitNode
);
if
(
TSDB_CODE_SUCCESS
==
code
)
{
pInfo
->
pSubplan
->
subplanType
=
SUBPLAN_TYPE_MERGE
;
SPLIT_FLAG_SET_MASK
(
pInfo
->
pSubplan
->
splitFlag
,
SPLIT_FLAG_STABLE_SPLIT
);
}
return
code
;
}
static
int32_t
stableSplit
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
)
{
if
(
pCxt
->
pPlanCxt
->
rSmaQuery
)
{
return
TSDB_CODE_SUCCESS
;
...
...
@@ -604,6 +684,12 @@ static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
int32_t
code
=
TSDB_CODE_SUCCESS
;
switch
(
nodeType
(
info
.
pSplitNode
))
{
case
QUERY_NODE_LOGIC_PLAN_SCAN
:
code
=
stbSplSplitScanNode
(
pCxt
,
&
info
);
break
;
case
QUERY_NODE_LOGIC_PLAN_JOIN
:
code
=
stbSplSplitJoinNode
(
pCxt
,
&
info
);
break
;
case
QUERY_NODE_LOGIC_PLAN_AGG
:
code
=
stbSplSplitAggNode
(
pCxt
,
&
info
);
break
;
...
...
@@ -613,9 +699,6 @@ static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
case
QUERY_NODE_LOGIC_PLAN_SORT
:
code
=
stbSplSplitSortNode
(
pCxt
,
&
info
);
break
;
case
QUERY_NODE_LOGIC_PLAN_SCAN
:
code
=
stbSplSplitScanNode
(
pCxt
,
&
info
);
break
;
default:
break
;
}
...
...
@@ -631,7 +714,12 @@ typedef struct SSigTbJoinSplitInfo {
SLogicSubplan
*
pSubplan
;
}
SSigTbJoinSplitInfo
;
static
bool
sigTbJoinSplNeedSplit
(
SJoinLogicNode
*
pJoin
)
{
static
bool
sigTbJoinSplNeedSplit
(
SLogicNode
*
pNode
)
{
if
(
QUERY_NODE_LOGIC_PLAN_JOIN
!=
nodeType
(
pNode
))
{
return
false
;
}
SJoinLogicNode
*
pJoin
=
(
SJoinLogicNode
*
)
pNode
;
if
(
!
pJoin
->
isSingleTableJoin
)
{
return
false
;
}
...
...
@@ -639,28 +727,15 @@ static bool sigTbJoinSplNeedSplit(SJoinLogicNode* pJoin) {
QUERY_NODE_LOGIC_PLAN_EXCHANGE
!=
nodeType
(
nodesListGetNode
(
pJoin
->
node
.
pChildren
,
1
));
}
static
SJoinLogicNode
*
sigTbJoinSplMatchByNode
(
SLogicNode
*
pNode
)
{
if
(
QUERY_NODE_LOGIC_PLAN_JOIN
==
nodeType
(
pNode
)
&&
sigTbJoinSplNeedSplit
((
SJoinLogicNode
*
)
pNode
))
{
return
(
SJoinLogicNode
*
)
pNode
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
SJoinLogicNode
*
pSplitNode
=
sigTbJoinSplMatchByNode
((
SLogicNode
*
)
pChild
);
if
(
NULL
!=
pSplitNode
)
{
return
pSplitNode
;
}
}
return
NULL
;
}
static
bool
sigTbJoinSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SSigTbJoinSplitInfo
*
pInfo
)
{
SJoinLogicNode
*
pJoin
=
sigTbJoinSplMatchByNode
(
pSubplan
->
pNode
);
if
(
NULL
!=
pJoin
)
{
pInfo
->
pJoin
=
pJoin
;
pInfo
->
pSplitNode
=
(
SLogicNode
*
)
nodesListGetNode
(
pJoin
->
node
.
pChildren
,
1
);
static
bool
sigTbJoinSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
SSigTbJoinSplitInfo
*
pInfo
)
{
if
(
sigTbJoinSplNeedSplit
(
pNode
))
{
pInfo
->
pJoin
=
(
SJoinLogicNode
*
)
pNode
;
pInfo
->
pSplitNode
=
(
SLogicNode
*
)
nodesListGetNode
(
pNode
->
pChildren
,
1
);
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
NULL
!=
pJoin
;
return
false
;
}
static
int32_t
singleTableJoinSplit
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
)
{
...
...
@@ -753,27 +828,14 @@ typedef struct SUnionAllSplitInfo {
SLogicSubplan
*
pSubplan
;
}
SUnionAllSplitInfo
;
static
SLogicNode
*
unAllSplMatchByNode
(
SLogicNode
*
pNode
)
{
static
bool
unAllSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
SUnionAllSplitInfo
*
pInfo
)
{
if
(
QUERY_NODE_LOGIC_PLAN_PROJECT
==
nodeType
(
pNode
)
&&
LIST_LENGTH
(
pNode
->
pChildren
)
>
1
)
{
return
pNode
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
SLogicNode
*
pSplitNode
=
unAllSplMatchByNode
((
SLogicNode
*
)
pChild
);
if
(
NULL
!=
pSplitNode
)
{
return
pSplitNode
;
}
}
return
NULL
;
}
static
bool
unAllSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SUnionAllSplitInfo
*
pInfo
)
{
SLogicNode
*
pSplitNode
=
unAllSplMatchByNode
(
pSubplan
->
pNode
);
if
(
NULL
!=
pSplitNode
)
{
pInfo
->
pProject
=
(
SProjectLogicNode
*
)
pSplitNode
;
pInfo
->
pProject
=
(
SProjectLogicNode
*
)
pNode
;
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
NULL
!=
pSplitNod
e
;
return
fals
e
;
}
static
int32_t
unAllSplCreateExchangeNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SProjectLogicNode
*
pProject
)
{
...
...
@@ -828,20 +890,6 @@ typedef struct SUnionDistinctSplitInfo {
SLogicSubplan
*
pSubplan
;
}
SUnionDistinctSplitInfo
;
static
SLogicNode
*
unDistSplMatchByNode
(
SLogicNode
*
pNode
)
{
if
(
QUERY_NODE_LOGIC_PLAN_AGG
==
nodeType
(
pNode
)
&&
LIST_LENGTH
(
pNode
->
pChildren
)
>
1
)
{
return
pNode
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
SLogicNode
*
pSplitNode
=
unDistSplMatchByNode
((
SLogicNode
*
)
pChild
);
if
(
NULL
!=
pSplitNode
)
{
return
pSplitNode
;
}
}
return
NULL
;
}
static
int32_t
unDistSplCreateExchangeNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SAggLogicNode
*
pAgg
)
{
SExchangeLogicNode
*
pExchange
=
(
SExchangeLogicNode
*
)
nodesMakeNode
(
QUERY_NODE_LOGIC_PLAN_EXCHANGE
);
if
(
NULL
==
pExchange
)
{
...
...
@@ -859,13 +907,14 @@ static int32_t unDistSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* p
return
nodesListMakeAppend
(
&
pAgg
->
node
.
pChildren
,
(
SNode
*
)
pExchange
);
}
static
bool
unDistSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
S
UnionDistinctSplitInfo
*
pInfo
)
{
SLogicNode
*
pSplitNode
=
unDistSplMatchByNode
(
pSubplan
->
pNode
);
if
(
NULL
!=
pSplitNode
)
{
pInfo
->
pAgg
=
(
SAggLogicNode
*
)
p
Split
Node
;
static
bool
unDistSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
S
LogicNode
*
pNode
,
SUnionDistinctSplitInfo
*
pInfo
)
{
if
(
QUERY_NODE_LOGIC_PLAN_AGG
==
nodeType
(
pNode
)
&&
LIST_LENGTH
(
pNode
->
pChildren
)
>
1
)
{
pInfo
->
pAgg
=
(
SAggLogicNode
*
)
pNode
;
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
NULL
!=
pSplitNod
e
;
return
fals
e
;
}
static
int32_t
unionDistinctSplit
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
)
{
...
...
@@ -888,27 +937,14 @@ typedef struct SSmaIndexSplitInfo {
SLogicSubplan
*
pSubplan
;
}
SSmaIndexSplitInfo
;
static
SLogicNode
*
smaIdxSplMatchByNode
(
SLogicNode
*
pNode
)
{
static
bool
smaIdxSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
SSmaIndexSplitInfo
*
pInfo
)
{
if
(
QUERY_NODE_LOGIC_PLAN_MERGE
==
nodeType
(
pNode
)
&&
LIST_LENGTH
(
pNode
->
pChildren
)
>
1
)
{
return
pNode
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pNode
->
pChildren
)
{
SLogicNode
*
pSplitNode
=
smaIdxSplMatchByNode
((
SLogicNode
*
)
pChild
);
if
(
NULL
!=
pSplitNode
)
{
return
pSplitNode
;
}
}
return
NULL
;
}
static
bool
smaIdxSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SSmaIndexSplitInfo
*
pInfo
)
{
SLogicNode
*
pSplitNode
=
smaIdxSplMatchByNode
(
pSubplan
->
pNode
);
if
(
NULL
!=
pSplitNode
)
{
pInfo
->
pMerge
=
(
SMergeLogicNode
*
)
pSplitNode
;
pInfo
->
pMerge
=
(
SMergeLogicNode
*
)
pNode
;
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
NULL
!=
pSplitNod
e
;
return
fals
e
;
}
static
int32_t
smaIndexSplit
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
)
{
...
...
@@ -926,13 +962,47 @@ static int32_t smaIndexSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
return
code
;
}
typedef
struct
SQnodeSplitInfo
{
SLogicNode
*
pSplitNode
;
SLogicSubplan
*
pSubplan
;
}
SQnodeSplitInfo
;
static
bool
qndSplFindSplitNode
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
,
SLogicNode
*
pNode
,
SQnodeSplitInfo
*
pInfo
)
{
if
(
QUERY_NODE_LOGIC_PLAN_SCAN
==
nodeType
(
pNode
)
&&
NULL
!=
pNode
->
pParent
)
{
pInfo
->
pSplitNode
=
pNode
;
pInfo
->
pSubplan
=
pSubplan
;
return
true
;
}
return
false
;
}
static
int32_t
qnodeSplit
(
SSplitContext
*
pCxt
,
SLogicSubplan
*
pSubplan
)
{
if
(
QUERY_POLICY_QNODE
!=
tsQueryPolicy
)
{
return
TSDB_CODE_SUCCESS
;
}
SQnodeSplitInfo
info
=
{
0
};
if
(
!
splMatch
(
pCxt
,
pSubplan
,
0
,
(
FSplFindSplitNode
)
qndSplFindSplitNode
,
&
info
))
{
return
TSDB_CODE_SUCCESS
;
}
int32_t
code
=
splCreateExchangeNodeForSubplan
(
pCxt
,
info
.
pSubplan
,
info
.
pSplitNode
,
info
.
pSubplan
->
subplanType
);
if
(
TSDB_CODE_SUCCESS
==
code
)
{
code
=
nodesListMakeStrictAppend
(
&
info
.
pSubplan
->
pChildren
,
(
SNode
*
)
splCreateScanSubplan
(
pCxt
,
info
.
pSplitNode
,
0
));
}
++
(
pCxt
->
groupId
);
pCxt
->
split
=
true
;
return
code
;
}
// clang-format off
static
const
SSplitRule
splitRuleSet
[]
=
{
{.
pName
=
"SuperTableSplit"
,
.
splitFunc
=
stableSplit
},
{.
pName
=
"SingleTableJoinSplit"
,
.
splitFunc
=
singleTableJoinSplit
},
{.
pName
=
"UnionAllSplit"
,
.
splitFunc
=
unionAllSplit
},
{.
pName
=
"UnionDistinctSplit"
,
.
splitFunc
=
unionDistinctSplit
},
{.
pName
=
"SmaIndexSplit"
,
.
splitFunc
=
smaIndexSplit
}
{.
pName
=
"SmaIndexSplit"
,
.
splitFunc
=
smaIndexSplit
},
{.
pName
=
"QnodeSplit"
,
.
splitFunc
=
qnodeSplit
}
};
// clang-format on
...
...
source/libs/planner/test/planGroupByTest.cpp
浏览文件 @
85847473
...
...
@@ -83,5 +83,7 @@ TEST_F(PlanGroupByTest, stable) {
run
(
"SELECT COUNT(*) FROM st1 GROUP BY c1"
);
run
(
"SELECT COUNT(*) FROM st1 PARTITION BY c2 GROUP BY c1"
);
run
(
"SELECT SUM(c1) FROM st1 GROUP BY c2 HAVING SUM(c1) IS NOT NULL"
);
}
source/libs/planner/test/planIntervalTest.cpp
浏览文件 @
85847473
...
...
@@ -60,4 +60,6 @@ TEST_F(PlanIntervalTest, stable) {
run
(
"SELECT COUNT(*) FROM st1 INTERVAL(10s)"
);
run
(
"SELECT _WSTARTTS, COUNT(*) FROM st1 INTERVAL(10s)"
);
run
(
"SELECT _WSTARTTS, COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)"
);
}
source/libs/planner/test/planJoinTest.cpp
浏览文件 @
85847473
...
...
@@ -50,3 +50,9 @@ TEST_F(PlanJoinTest, multiJoin) {
run
(
"SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts JOIN st1s3 t3 ON t1.ts = t3.ts"
);
}
TEST_F
(
PlanJoinTest
,
stable
)
{
useDb
(
"root"
,
"test"
);
run
(
"SELECT t1.c1, t2.c1 FROM st1 t1 JOIN st2 t2 ON t1.ts = t2.ts "
);
}
source/libs/planner/test/planOrderByTest.cpp
浏览文件 @
85847473
...
...
@@ -49,4 +49,6 @@ TEST_F(PlanOrderByTest, stable) {
// ORDER BY key is not in the projection list
run
(
"SELECT c2 FROM st1 ORDER BY c1"
);
run
(
"SELECT c2 FROM st1 PARTITION BY c2 ORDER BY c1"
);
}
source/libs/planner/test/planOtherTest.cpp
浏览文件 @
85847473
...
...
@@ -83,3 +83,10 @@ TEST_F(PlanOtherTest, delete) {
run
(
"DELETE FROM st1 WHERE ts > now - 2d and ts < now - 1d AND tag1 = 10"
);
}
TEST_F
(
PlanOtherTest
,
queryPolicy
)
{
useDb
(
"root"
,
"test"
);
tsQueryPolicy
=
QUERY_POLICY_QNODE
;
run
(
"SELECT COUNT(*) FROM st1"
);
}
source/libs/planner/test/planTestUtil.h
浏览文件 @
85847473
...
...
@@ -18,6 +18,10 @@
#include <gtest/gtest.h>
#define ALLOW_FORBID_FUNC
#include "planInt.h"
class
PlannerTestBaseImpl
;
struct
TAOS_MULTI_BIND
;
...
...
source/libs/sync/src/syncMain.c
浏览文件 @
85847473
...
...
@@ -561,8 +561,6 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, const SRpcMsg* pMsg, bool isWeak)
stub
.
createTime
=
taosGetTimestampMs
();
stub
.
rpcMsg
=
*
pMsg
;
uint64_t
seqNum
=
syncRespMgrAdd
(
pSyncNode
->
pSyncRespMgr
,
&
stub
);
sDebug
(
"vgId:%d sync event propose, type:%s seq:%"
PRIu64
" handle:%p"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pMsg
->
msgType
),
seqNum
,
pMsg
->
info
.
handle
);
SyncClientRequest
*
pSyncMsg
=
syncClientRequestBuild2
(
pMsg
,
seqNum
,
isWeak
,
pSyncNode
->
vgId
);
SRpcMsg
rpcMsg
;
...
...
@@ -771,7 +769,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
}
// tools
pSyncNode
->
pSyncRespMgr
=
syncRespMgrCreate
(
NULL
,
0
);
pSyncNode
->
pSyncRespMgr
=
syncRespMgrCreate
(
pSyncNode
,
0
);
assert
(
pSyncNode
->
pSyncRespMgr
!=
NULL
);
// restore state
...
...
source/libs/sync/src/syncRespMgr.c
浏览文件 @
85847473
...
...
@@ -44,6 +44,10 @@ int64_t syncRespMgrAdd(SSyncRespMgr *pObj, SRespStub *pStub) {
uint64_t
keyCode
=
++
(
pObj
->
seqNum
);
taosHashPut
(
pObj
->
pRespHash
,
&
keyCode
,
sizeof
(
keyCode
),
pStub
,
sizeof
(
SRespStub
));
SSyncNode
*
pSyncNode
=
pObj
->
data
;
sDebug
(
"vgId:%d sync event resp mgr add, type:%s seq:%lu handle:%p"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pStub
->
rpcMsg
.
msgType
),
keyCode
,
pStub
->
rpcMsg
.
info
.
handle
);
taosThreadMutexUnlock
(
&
(
pObj
->
mutex
));
return
keyCode
;
}
...
...
@@ -63,6 +67,11 @@ int32_t syncRespMgrGet(SSyncRespMgr *pObj, uint64_t index, SRespStub *pStub) {
void
*
pTmp
=
taosHashGet
(
pObj
->
pRespHash
,
&
index
,
sizeof
(
index
));
if
(
pTmp
!=
NULL
)
{
memcpy
(
pStub
,
pTmp
,
sizeof
(
SRespStub
));
SSyncNode
*
pSyncNode
=
pObj
->
data
;
sDebug
(
"vgId:%d sync event resp mgr get, type:%s seq:%lu handle:%p"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pStub
->
rpcMsg
.
msgType
),
index
,
pStub
->
rpcMsg
.
info
.
handle
);
taosThreadMutexUnlock
(
&
(
pObj
->
mutex
));
return
1
;
// get one object
}
...
...
@@ -76,6 +85,11 @@ int32_t syncRespMgrGetAndDel(SSyncRespMgr *pObj, uint64_t index, SRespStub *pStu
void
*
pTmp
=
taosHashGet
(
pObj
->
pRespHash
,
&
index
,
sizeof
(
index
));
if
(
pTmp
!=
NULL
)
{
memcpy
(
pStub
,
pTmp
,
sizeof
(
SRespStub
));
SSyncNode
*
pSyncNode
=
pObj
->
data
;
sDebug
(
"vgId:%d sync event resp mgr get and del, type:%s seq:%lu handle:%p"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pStub
->
rpcMsg
.
msgType
),
index
,
pStub
->
rpcMsg
.
info
.
handle
);
taosHashRemove
(
pObj
->
pRespHash
,
&
index
,
sizeof
(
index
));
taosThreadMutexUnlock
(
&
(
pObj
->
mutex
));
return
1
;
// get one object
...
...
source/libs/sync/src/syncUtil.c
浏览文件 @
85847473
...
...
@@ -14,6 +14,8 @@
*/
#include "syncUtil.h"
#include <stdio.h>
#include "syncEnv.h"
void
addEpIntoEpSet
(
SEpSet
*
pEpSet
,
const
char
*
fqdn
,
uint16_t
port
);
...
...
@@ -21,8 +23,31 @@ void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);
// ---- encode / decode
uint64_t
syncUtilAddr2U64
(
const
char
*
host
,
uint16_t
port
)
{
uint64_t
u64
;
uint32_t
hostU32
=
taosGetIpv4FromFqdn
(
host
);
if
(
hostU32
==
(
uint32_t
)
-
1
)
{
sError
(
"Get IP address error"
);
return
-
1
;
}
/*
uint32_t hostU32 = (uint32_t)taosInetAddr(host);
// assert(hostU32 != (uint32_t)-1);
if (hostU32 == (uint32_t)-1) {
struct hostent* hostEnt = gethostbyname(host);
if (hostEnt == NULL) {
sError("Get IP address error");
return -1;
}
const char* newHost = taosInetNtoa(*(struct in_addr*)(hostEnt->h_addr_list[0]));
hostU32 = (uint32_t)taosInetAddr(newHost);
if (hostU32 == (uint32_t)-1) {
sError("change %s to id, error", newHost);
}
// ASSERT(hostU32 != (uint32_t)-1);
}
*/
u64
=
(((
uint64_t
)
hostU32
)
<<
32
)
|
(((
uint32_t
)
port
)
<<
16
);
return
u64
;
}
...
...
source/libs/transport/src/transCli.c
浏览文件 @
85847473
...
...
@@ -41,7 +41,7 @@ typedef struct SCliConn {
// debug and log info
struct
sockaddr_in
addr
;
struct
sockaddr_in
locaddr
;
struct
sockaddr_in
loca
lA
ddr
;
}
SCliConn
;
typedef
struct
SCliMsg
{
...
...
@@ -54,7 +54,8 @@ typedef struct SCliMsg {
}
SCliMsg
;
typedef
struct
SCliThrdObj
{
TdThread
thread
;
TdThread
thread
;
// tid
int64_t
pid
;
// pid
uv_loop_t
*
loop
;
SAsyncPool
*
asyncPool
;
uv_timer_t
timer
;
...
...
@@ -325,7 +326,7 @@ void cliHandleResp(SCliConn* conn) {
tDebug
(
"%s cli conn %p %s received from %s:%d, local info: %s:%d, msg size: %d"
,
pTransInst
->
label
,
conn
,
TMSG_INFO
(
pHead
->
msgType
),
taosInetNtoa
(
conn
->
addr
.
sin_addr
),
ntohs
(
conn
->
addr
.
sin_port
),
taosInetNtoa
(
conn
->
loca
ddr
.
sin_addr
),
ntohs
(
conn
->
loca
ddr
.
sin_port
),
transMsg
.
contLen
);
taosInetNtoa
(
conn
->
loca
lAddr
.
sin_addr
),
ntohs
(
conn
->
localA
ddr
.
sin_port
),
transMsg
.
contLen
);
if
(
pCtx
==
NULL
&&
CONN_NO_PERSIST_BY_APP
(
conn
))
{
tTrace
(
"except, server continue send while cli ignore it"
);
...
...
@@ -643,7 +644,7 @@ void cliSend(SCliConn* pConn) {
uv_buf_t
wb
=
uv_buf_init
((
char
*
)
pHead
,
msgLen
);
tDebug
(
"%s cli conn %p %s is send to %s:%d, local info %s:%d"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
,
TMSG_INFO
(
pHead
->
msgType
),
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
loca
ddr
.
sin_addr
),
ntohs
(
pConn
->
loca
ddr
.
sin_port
));
taosInetNtoa
(
pConn
->
loca
lAddr
.
sin_addr
),
ntohs
(
pConn
->
localA
ddr
.
sin_port
));
if
(
pHead
->
persist
==
1
)
{
CONN_SET_PERSIST_BY_APP
(
pConn
);
...
...
@@ -668,8 +669,8 @@ void cliConnCb(uv_connect_t* req, int status) {
int
addrlen
=
sizeof
(
pConn
->
addr
);
uv_tcp_getpeername
((
uv_tcp_t
*
)
pConn
->
stream
,
(
struct
sockaddr
*
)
&
pConn
->
addr
,
&
addrlen
);
addrlen
=
sizeof
(
pConn
->
locaddr
);
uv_tcp_getsockname
((
uv_tcp_t
*
)
pConn
->
stream
,
(
struct
sockaddr
*
)
&
pConn
->
locaddr
,
&
addrlen
);
addrlen
=
sizeof
(
pConn
->
loca
lA
ddr
);
uv_tcp_getsockname
((
uv_tcp_t
*
)
pConn
->
stream
,
(
struct
sockaddr
*
)
&
pConn
->
loca
lA
ddr
,
&
addrlen
);
tTrace
(
"%s cli conn %p connect to server successfully"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
);
assert
(
pConn
->
stream
==
req
->
handle
);
...
...
@@ -742,8 +743,7 @@ void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
void
cliHandleReq
(
SCliMsg
*
pMsg
,
SCliThrdObj
*
pThrd
)
{
uint64_t
et
=
taosGetTimestampUs
();
uint64_t
el
=
et
-
pMsg
->
st
;
tTrace
(
"%s cli msg tran time cost: %"
PRIu64
"us, threadID: %"
PRId64
""
,
((
STrans
*
)
pThrd
->
pTransInst
)
->
label
,
el
,
pThrd
->
thread
);
// tTrace("%s cli msg tran time cost: %" PRIu64 "us", ((STrans*)pThrd->pTransInst)->label, el);
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
STrans
*
pTransInst
=
pThrd
->
pTransInst
;
...
...
@@ -822,6 +822,7 @@ static void cliAsyncCb(uv_async_t* handle) {
static
void
*
cliWorkThread
(
void
*
arg
)
{
SCliThrdObj
*
pThrd
=
(
SCliThrdObj
*
)
arg
;
pThrd
->
pid
=
taosGetSelfPthreadId
();
setThreadName
(
"trans-cli-work"
);
uv_run
(
pThrd
->
loop
,
UV_RUN_DEFAULT
);
return
NULL
;
...
...
@@ -966,30 +967,31 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
pMsg
->
st
=
taosGetTimestampUs
();
pCtx
->
retryCount
+=
1
;
if
(
pResp
->
code
==
TSDB_CODE_RPC_NETWORK_UNAVAIL
)
{
if
(
pCtx
->
retryCount
<
pEpSet
->
numOfEps
)
{
if
(
pCtx
->
retryCount
<
pEpSet
->
numOfEps
*
3
)
{
pEpSet
->
inUse
=
(
++
pEpSet
->
inUse
)
%
pEpSet
->
numOfEps
;
STaskArg
*
arg
=
taosMemoryMalloc
(
sizeof
(
STaskArg
));
arg
->
param1
=
pMsg
;
arg
->
param2
=
pThrd
;
transDQSched
(
pThrd
->
delayQueue
,
doDelayTask
,
arg
,
TRANS_RETRY_INTERVAL
);
tTrace
(
"use local epset, current in use: %d, retry count:%d, limit: %d"
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
pEpSet
->
numOfEps
*
3
);
transUnrefCliHandle
(
pConn
);
return
-
1
;
}
}
else
if
(
pCtx
->
retryCount
<
TRANS_RETRY_COUNT_LIMIT
)
{
if
(
pResp
->
contLen
==
0
)
{
pEpSet
->
inUse
=
(
++
pEpSet
->
inUse
)
%
pEpSet
->
numOfEps
;
tTrace
(
"use local epset, current in use: %d, retry count:%d, limit: %d"
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
TRANS_RETRY_COUNT_LIMIT
);
}
else
{
SEpSet
epSet
=
{
0
};
tDeserializeSEpSet
(
pResp
->
pCont
,
pResp
->
contLen
,
&
epSet
);
pCtx
->
epSet
=
epSet
;
if
(
!
transEpSetIsEqual
(
&
epSet
,
&
pCtx
->
epSet
))
{
pCtx
->
retryCount
=
0
;
}
tTrace
(
"use remote epset, current in use: %d, retry count:%d, limit: %d"
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
TRANS_RETRY_COUNT_LIMIT
);
}
addConnToPool
(
pThrd
->
pool
,
pConn
);
tTrace
(
"use remote epset, current in use: %d, retry count:%d, try limit: %d"
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
TRANS_RETRY_COUNT_LIMIT
);
STaskArg
*
arg
=
taosMemoryMalloc
(
sizeof
(
STaskArg
));
arg
->
param1
=
pMsg
;
...
...
@@ -1089,7 +1091,7 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra
SCliThrdObj
*
thrd
=
((
SCliObj
*
)
pTransInst
->
tcphandle
)
->
pThreadObj
[
index
];
tDebug
(
"send request at thread:%d, threadID: %
"
PRId64
", msg: %p, dst: %s:%d, app:%p"
,
index
,
thrd
->
threa
d
,
pReq
,
tDebug
(
"send request at thread:%d, threadID: %
08"
PRId64
", msg: %p, dst: %s:%d, app:%p"
,
index
,
thrd
->
pi
d
,
pReq
,
EPSET_GET_INUSE_IP
(
&
pCtx
->
epSet
),
EPSET_GET_INUSE_PORT
(
&
pCtx
->
epSet
),
pReq
->
info
.
ahandle
);
ASSERT
(
transSendAsync
(
thrd
->
asyncPool
,
&
(
cliMsg
->
q
))
==
0
);
}
...
...
@@ -1118,7 +1120,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM
cliMsg
->
type
=
Normal
;
SCliThrdObj
*
thrd
=
((
SCliObj
*
)
pTransInst
->
tcphandle
)
->
pThreadObj
[
index
];
tDebug
(
"send request at thread:%d, threadID:%
"
PRId64
", msg: %p, dst: %s:%d, app:%p"
,
index
,
thrd
->
threa
d
,
pReq
,
tDebug
(
"send request at thread:%d, threadID:%
08"
PRId64
", msg: %p, dst: %s:%d, app:%p"
,
index
,
thrd
->
pi
d
,
pReq
,
EPSET_GET_INUSE_IP
(
&
pCtx
->
epSet
),
EPSET_GET_INUSE_PORT
(
&
pCtx
->
epSet
),
pReq
->
info
.
ahandle
);
transSendAsync
(
thrd
->
asyncPool
,
&
(
cliMsg
->
q
));
...
...
@@ -1149,7 +1151,7 @@ void transSetDefaultAddr(void* ahandle, const char* ip, const char* fqdn) {
cliMsg
->
type
=
Update
;
SCliThrdObj
*
thrd
=
((
SCliObj
*
)
pTransInst
->
tcphandle
)
->
pThreadObj
[
i
];
tDebug
(
"update epset at thread:%d, threadID:%
"
PRId64
""
,
i
,
thrd
->
threa
d
);
tDebug
(
"update epset at thread:%d, threadID:%
08"
PRId64
""
,
i
,
thrd
->
pi
d
);
transSendAsync
(
thrd
->
asyncPool
,
&
(
cliMsg
->
q
));
}
...
...
source/libs/transport/src/transSvr.c
浏览文件 @
85847473
...
...
@@ -48,7 +48,7 @@ typedef struct SSvrConn {
ConnStatus
status
;
struct
sockaddr_in
addr
;
struct
sockaddr_in
locaddr
;
struct
sockaddr_in
loca
lA
ddr
;
int64_t
refId
;
int
spi
;
...
...
@@ -286,12 +286,12 @@ static void uvHandleReq(SSvrConn* pConn) {
if
(
pConn
->
status
==
ConnNormal
&&
pHead
->
noResp
==
0
)
{
transRefSrvHandle
(
pConn
);
tDebug
(
"server conn %p %s received from %s:%d, local info: %s:%d, msg size: %d"
,
pConn
,
TMSG_INFO
(
transMsg
.
msgType
),
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
locaddr
.
sin_addr
),
ntohs
(
pConn
->
locaddr
.
sin_port
),
transMsg
.
contLen
);
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
loca
lA
ddr
.
sin_addr
),
ntohs
(
pConn
->
loca
lA
ddr
.
sin_port
),
transMsg
.
contLen
);
}
else
{
tDebug
(
"server conn %p %s received from %s:%d, local info: %s:%d, msg size: %d, resp:%d "
,
pConn
,
TMSG_INFO
(
transMsg
.
msgType
),
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
loca
ddr
.
sin_addr
),
ntohs
(
pConn
->
loca
ddr
.
sin_port
),
transMsg
.
contLen
,
pHead
->
noResp
);
taosInetNtoa
(
pConn
->
loca
lAddr
.
sin_addr
),
ntohs
(
pConn
->
localA
ddr
.
sin_port
),
transMsg
.
contLen
,
pHead
->
noResp
);
// no ref here
}
...
...
@@ -454,8 +454,8 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
char
*
msg
=
(
char
*
)
pHead
;
int32_t
len
=
transMsgLenFromCont
(
pMsg
->
contLen
);
tDebug
(
"server conn %p %s is sent to %s:%d, local info: %s:%d, msglen:%d"
,
pConn
,
TMSG_INFO
(
pHead
->
msgType
),
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
locaddr
.
sin_addr
),
ntohs
(
pConn
->
locaddr
.
sin_port
),
len
);
taosInetNtoa
(
pConn
->
addr
.
sin_addr
),
ntohs
(
pConn
->
addr
.
sin_port
),
taosInetNtoa
(
pConn
->
loca
lA
ddr
.
sin_addr
),
ntohs
(
pConn
->
loca
lA
ddr
.
sin_port
),
len
);
pHead
->
msgLen
=
htonl
(
len
);
wb
->
base
=
msg
;
...
...
@@ -686,8 +686,8 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
return
;
}
addrlen
=
sizeof
(
pConn
->
locaddr
);
if
(
0
!=
uv_tcp_getsockname
(
pConn
->
pTcp
,
(
struct
sockaddr
*
)
&
pConn
->
locaddr
,
&
addrlen
))
{
addrlen
=
sizeof
(
pConn
->
loca
lA
ddr
);
if
(
0
!=
uv_tcp_getsockname
(
pConn
->
pTcp
,
(
struct
sockaddr
*
)
&
pConn
->
loca
lA
ddr
,
&
addrlen
))
{
tError
(
"server conn %p failed to get local info"
,
pConn
);
transUnrefSrvHandle
(
pConn
);
return
;
...
...
source/libs/transport/test/transUT.cpp
浏览文件 @
85847473
...
...
@@ -381,28 +381,29 @@ TEST_F(TransEnv, srvReleaseHandle) {
}
//////////////////
}
TEST_F
(
TransEnv
,
cliReleaseHandleExcept
)
{
SRpcMsg
resp
=
{
0
};
SRpcMsg
req
=
{
0
};
for
(
int
i
=
0
;
i
<
3
;
i
++
)
{
memset
(
&
req
,
0
,
sizeof
(
req
));
req
.
info
=
resp
.
info
;
req
.
info
.
persistHandle
=
1
;
req
.
info
.
ahandle
=
(
void
*
)
1234
;
req
.
msgType
=
1
;
req
.
pCont
=
rpcMallocCont
(
10
);
req
.
contLen
=
10
;
tr
->
cliSendAndRecv
(
&
req
,
&
resp
);
if
(
i
==
1
)
{
std
::
cout
<<
"stop server"
<<
std
::
endl
;
tr
->
StopSrv
();
}
if
(
i
>
1
)
{
EXPECT_TRUE
(
resp
.
code
!=
0
);
}
}
//////////////////
}
// reopen later
// TEST_F(TransEnv, cliReleaseHandleExcept) {
// SRpcMsg resp = {0};
// SRpcMsg req = {0};
// for (int i = 0; i < 3; i++) {
// memset(&req, 0, sizeof(req));
// req.info = resp.info;
// req.info.persistHandle = 1;
// req.info.ahandle = (void *)1234;
// req.msgType = 1;
// req.pCont = rpcMallocCont(10);
// req.contLen = 10;
// tr->cliSendAndRecv(&req, &resp);
// if (i == 1) {
// std::cout << "stop server" << std::endl;
// tr->StopSrv();
// }
// if (i > 1) {
// EXPECT_TRUE(resp.code != 0);
// }
// }
// //////////////////
//}
TEST_F
(
TransEnv
,
srvContinueSend
)
{
tr
->
SetSrvContinueSend
(
processContinueSend
);
SRpcMsg
req
=
{
0
},
resp
=
{
0
};
...
...
source/util/src/terror.c
浏览文件 @
85847473
...
...
@@ -560,13 +560,16 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_ALREADY_EXIST, "Tsma already exists
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_NO_INDEX_IN_META
,
"No tsma index in meta"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_INVALID_ENV
,
"Invalid tsma env"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_INVALID_STAT
,
"Invalid tsma state"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_INVALID_PTR
,
"Invalid tsma pointer"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_INVALID_PARA
,
"Invalid tsma parameters"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_NO_INDEX_IN_CACHE
,
"No tsma index in cache"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSMA_RM_SKEY_IN_HASH
,
"Rm tsma skey in cache"
)
//rsma
TAOS_DEFINE_ERROR
(
TSDB_CODE_RSMA_INVALID_ENV
,
"Invalid rsma env"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_RSMA_INVALID_STAT
,
"Invalid rsma state"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_INDEX_REBUILDING
,
"Index is rebuilding"
)
#ifdef TAOS_ERROR_C
...
...
tests/script/jenkins/basic.txt
浏览文件 @
85847473
...
...
@@ -58,7 +58,7 @@
# ---- mnode
./test.sh -f tsim/mnode/basic1.sim
./test.sh -f tsim/mnode/basic2.sim
./test.sh -f tsim/mnode/basic3.sim
#
./test.sh -f tsim/mnode/basic3.sim
./test.sh -f tsim/mnode/basic4.sim
./test.sh -f tsim/mnode/basic5.sim
...
...
tests/script/tsim/testsuit.sim
浏览文件 @
85847473
...
...
@@ -37,7 +37,7 @@ run tsim/db/error1.sim
run tsim/db/taosdlog.sim
run tsim/db/alter_option.sim
run tsim/mnode/basic1.sim
run tsim/mnode/basic3.sim
#
run tsim/mnode/basic3.sim
run tsim/mnode/basic2.sim
run tsim/parser/fourArithmetic-basic.sim
run tsim/parser/groupby-basic.sim
...
...
tests/system-test/1-insert/create_retentions.py
0 → 100644
浏览文件 @
85847473
import
datetime
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
PRIMARY_COL
=
"ts"
INT_COL
=
"c_int"
BINT_COL
=
"c_bint"
SINT_COL
=
"c_sint"
TINT_COL
=
"c_tint"
FLOAT_COL
=
"c_float"
DOUBLE_COL
=
"c_double"
BOOL_COL
=
"c_bool"
TINT_UN_COL
=
"c_tint_un"
SINT_UN_COL
=
"c_sint_un"
BINT_UN_COL
=
"c_bint_un"
INT_UN_COL
=
"c_int_un"
BINARY_COL
=
"c8"
NCHAR_COL
=
"c9"
TS_COL
=
"c10"
NUM_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
]
CHAR_COL
=
[
BINARY_COL
,
NCHAR_COL
,
]
BOOLEAN_COL
=
[
BOOL_COL
,
]
TS_TYPE_COL
=
[
TS_COL
,
]
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
True
)
@
property
def
create_databases_sql_err
(
self
):
return
[
"create database if not exists db1 retentions 0s:1d"
,
"create database if not exists db1 retentions 1s:1y"
,
"create database if not exists db1 retentions 1s:1n"
,
"create database if not exists db1 retentions 1s:1n,2s:2d,3s:3d,4s:4d"
,
]
@
property
def
create_databases_sql_current
(
self
):
return
[
"create database db1 retentions 1s:1d"
,
"create database db2 retentions 1s:1d,2m:2d,3h:3d"
,
]
@
property
def
alter_database_sql
(
self
):
return
[
"alter database db1 retentions 99h:99d"
,
"alter database db2 retentions 97h:97d,98h:98d,99h:99d,"
,
]
@
property
def
create_stable_sql_err
(
self
):
return
[
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(ceil) delay 1"
,
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(count) delay 1"
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
BINARY_COL
}
binary(16)) tags (tag1 int) rollup(avg) delay 1"
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
BINARY_COL
}
nchar(16)) tags (tag1 int) rollup(avg) delay 1"
,
]
@
property
def
create_stable_sql_current
(
self
):
return
[
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(avg) delay 5"
,
]
def
test_create_stb
(
self
):
tdSql
.
execute
(
"use db2"
)
for
err_sql
in
self
.
create_stable_sql_err
:
tdSql
.
error
(
err_sql
)
for
cur_sql
in
self
.
create_stable_sql_current
:
tdSql
.
execute
(
cur_sql
)
tdSql
.
query
(
"show stables"
)
tdSql
.
checkRows
(
len
(
self
.
create_stable_sql_current
))
def
test_create_databases
(
self
):
for
err_sql
in
self
.
create_databases_sql_err
:
tdSql
.
error
(
err_sql
)
for
cur_sql
in
self
.
create_databases_sql_current
:
tdSql
.
execute
(
cur_sql
)
tdSql
.
query
(
"show databases"
)
for
alter_sql
in
self
.
alter_database_sql
:
tdSql
.
error
(
alter_sql
)
def
all_test
(
self
):
self
.
test_create_databases
()
self
.
test_create_stb
()
def
__create_tb
(
self
):
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
create_stb_sql
=
f
'''create table stb1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp,
{
TINT_UN_COL
}
tinyint unsigned,
{
SINT_UN_COL
}
smallint unsigned,
{
INT_UN_COL
}
int unsigned,
{
BINT_UN_COL
}
bigint unsigned
) tags (t1 int)
'''
create_ntb_sql
=
f
'''create table t1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp,
{
TINT_UN_COL
}
tinyint unsigned,
{
SINT_UN_COL
}
smallint unsigned,
{
INT_UN_COL
}
int unsigned,
{
BINT_UN_COL
}
bigint unsigned
)
'''
tdSql
.
execute
(
create_stb_sql
)
tdSql
.
execute
(
create_ntb_sql
)
for
i
in
range
(
4
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags (
{
i
+
1
}
)'
)
def
__create_data_set
(
self
,
rows
):
now_time
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
pos_data
=
[]
neg_data
=
[]
spec_data
=
[]
for
i
in
range
(
rows
):
pos_data
.
append
(
(
now_time
-
i
*
1000
,
i
,
11111
*
i
,
111
*
i
%
32767
,
11
*
i
%
127
,
1.11
*
i
,
1100.0011
*
i
,
i
%
2
,
f
'binary
{
i
}
'
,
f
'nchar_测试_
{
i
}
'
,
now_time
+
1
*
i
,
11
*
i
%
127
,
111
*
i
%
32767
,
i
,
11111
*
i
)
)
neg_data
.
append
(
(
now_time
-
i
*
7776000000
,
-
i
,
-
11111
*
i
,
-
111
*
i
%
32767
,
-
11
*
i
%
127
,
-
1.11
*
i
,
-
1100.0011
*
i
,
i
%
2
,
f
'binary
{
i
}
'
,
f
'nchar_测试_
{
i
}
'
,
now_time
+
1
*
i
,
11
*
i
%
127
,
111
*
i
%
32767
,
i
,
11111
*
i
)
)
def
__insert_data
(
self
,
rows
):
now_time
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
for
i
in
range
(
rows
):
tdSql
.
execute
(
f
'''insert into ct1 values (
{
now_time
-
i
*
1000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
,
{
11
*
i
%
127
}
,
{
111
*
i
%
32767
}
,
{
i
}
,
{
11111
*
i
}
)'''
)
tdSql
.
execute
(
f
'''insert into ct4 values (
{
now_time
-
i
*
7776000000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
,
{
11
*
i
%
127
}
,
{
111
*
i
%
32767
}
,
{
i
}
,
{
11111
*
i
}
)'''
)
tdSql
.
execute
(
f
'''insert into ct2 values (
{
now_time
-
i
*
7776000000
}
,
{
-
i
}
,
{
-
11111
*
i
}
,
{
-
111
*
i
%
32767
}
,
{
-
11
*
i
%
127
}
,
{
-
1.11
*
i
}
,
{
-
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
,
{
11
*
i
%
127
}
,
{
111
*
i
%
32767
}
,
{
i
}
,
{
11111
*
i
}
)'''
)
tdSql
.
execute
(
f
'''insert into ct1 values
(
{
now_time
-
rows
*
5
}
, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0',
{
now_time
+
8
}
, 0, 0, 0, 0)
(
{
now_time
+
10000
}
,
{
rows
}
, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9',
{
now_time
+
9
}
, 0, 0, 0, 0 )
'''
)
tdSql
.
execute
(
f
'''insert into ct4 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
,
254, 65534,
{
pow
(
2
,
32
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
64
)
-
pow
(
2
,
31
)
}
)
(
{
now_time
+
2592000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
,
255, 65535,
{
pow
(
2
,
32
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
64
)
-
pow
(
2
,
30
)
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct2 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
15
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
30
)
}
, -32766, -126,
{
-
1
*
3.2
*
pow
(
10
,
38
)
}
,
{
-
1.2
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
, 1, 1, 1, 1
)
(
{
now_time
+
2592000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
16
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
31
)
}
, -32767, -127,
{
-
3.3
*
pow
(
10
,
38
)
}
,
{
-
1.3
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
, 1, 1, 1, 1
)
'''
)
for
i
in
range
(
rows
):
insert_data
=
f
'''insert into t1 values
(
{
now_time
-
i
*
3600000
}
,
{
i
}
,
{
i
*
11111
}
,
{
i
%
32767
}
,
{
i
%
127
}
,
{
i
*
1.11111
}
,
{
i
*
1000.1111
}
,
{
i
%
2
}
,
"binary_
{
i
}
", "nchar_测试_
{
i
}
",
{
now_time
-
1000
*
i
}
,
{
i
%
127
}
,
{
i
%
32767
}
,
{
i
}
,
{
i
*
11111
}
)
'''
tdSql
.
execute
(
insert_data
)
tdSql
.
execute
(
f
'''insert into t1 values
(
{
now_time
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
((
rows
//
2
)
*
60
+
30
)
*
60000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
(
{
now_time
-
rows
*
3600000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7200000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
,
254, 65534,
{
pow
(
2
,
32
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
64
)
-
pow
(
2
,
31
)
}
)
(
{
now_time
+
3600000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
,
255, 65535,
{
pow
(
2
,
32
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
64
)
-
pow
(
2
,
30
)
}
)
'''
)
def
run
(
self
):
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
self
.
__create_tb
()
tdLog
.
printNoPrefix
(
"==========step2:insert data"
)
self
.
rows
=
10
self
.
__insert_data
(
self
.
rows
)
tdLog
.
printNoPrefix
(
"==========step3:all check"
)
self
.
all_test
()
tdSql
.
execute
(
"drop database if exists db1 "
)
tdSql
.
execute
(
"drop database if exists db2 "
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
tdLog
.
printNoPrefix
(
"==========step4:after wal, all check again "
)
self
.
all_test
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tests/system-test/2-query/explain.py
0 → 100644
浏览文件 @
85847473
import
datetime
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
PRIMARY_COL
=
"ts"
INT_COL
=
"c1"
BINT_COL
=
"c2"
SINT_COL
=
"c3"
TINT_COL
=
"c4"
FLOAT_COL
=
"c5"
DOUBLE_COL
=
"c6"
BOOL_COL
=
"c7"
BINARY_COL
=
"c8"
NCHAR_COL
=
"c9"
TS_COL
=
"c10"
NUM_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
]
CHAR_COL
=
[
BINARY_COL
,
NCHAR_COL
,
]
BOOLEAN_COL
=
[
BOOL_COL
,
]
TS_TYPE_COL
=
[
TS_COL
,
]
ALL_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
BOOL_COL
,
BINARY_COL
,
NCHAR_COL
,
TS_COL
]
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
def
__query_condition
(
self
,
tbname
):
query_condition
=
[
f
"
{
tbname
}
.
{
col
}
"
for
col
in
ALL_COL
]
for
num_col
in
NUM_COL
:
query_condition
.
extend
(
(
f
"abs(
{
tbname
}
.
{
num_col
}
)"
,
f
"acos(
{
tbname
}
.
{
num_col
}
)"
,
f
"asin(
{
tbname
}
.
{
num_col
}
)"
,
f
"atan(
{
tbname
}
.
{
num_col
}
)"
,
f
"avg(
{
tbname
}
.
{
num_col
}
)"
,
f
"ceil(
{
tbname
}
.
{
num_col
}
)"
,
f
"cos(
{
tbname
}
.
{
num_col
}
)"
,
f
"count(
{
tbname
}
.
{
num_col
}
)"
,
f
"floor(
{
tbname
}
.
{
num_col
}
)"
,
f
"log(
{
tbname
}
.
{
num_col
}
,
{
tbname
}
.
{
num_col
}
)"
,
f
"max(
{
tbname
}
.
{
num_col
}
)"
,
f
"min(
{
tbname
}
.
{
num_col
}
)"
,
f
"pow(
{
tbname
}
.
{
num_col
}
, 2)"
,
f
"round(
{
tbname
}
.
{
num_col
}
)"
,
f
"sum(
{
tbname
}
.
{
num_col
}
)"
,
f
"sin(
{
tbname
}
.
{
num_col
}
)"
,
f
"sqrt(
{
tbname
}
.
{
num_col
}
)"
,
f
"tan(
{
tbname
}
.
{
num_col
}
)"
,
f
"cast(
{
tbname
}
.
{
num_col
}
as timestamp)"
,
)
)
query_condition
.
extend
((
f
"
{
num_col
}
+
{
any_col
}
"
for
any_col
in
ALL_COL
))
for
char_col
in
CHAR_COL
:
query_condition
.
extend
(
(
f
"sum(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"max(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"min(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"avg(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
)
)
query_condition
.
extend
(
(
1010
,
''' "test1234!@#$%^&*():'><?/.,][}{" '''
,
"null"
)
)
return
query_condition
def
__join_condition
(
self
,
tb_list
,
filter
=
PRIMARY_COL
,
INNER
=
False
):
table_reference
=
tb_list
[
0
]
join_condition
=
table_reference
join
=
"inner join"
if
INNER
else
"join"
for
i
in
range
(
len
(
tb_list
[
1
:])):
join_condition
+=
f
"
{
join
}
{
tb_list
[
i
+
1
]
}
on
{
table_reference
}
.
{
filter
}
=
{
tb_list
[
i
+
1
]
}
.
{
filter
}
"
return
join_condition
def
__where_condition
(
self
,
col
=
None
,
tbname
=
None
,
query_conditon
=
None
):
if
query_conditon
and
isinstance
(
query_conditon
,
str
):
if
query_conditon
.
startswith
(
"count"
):
query_conditon
=
query_conditon
[
6
:
-
1
]
elif
query_conditon
.
startswith
(
"max"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"sum"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"min"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"avg"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
if
query_conditon
:
return
f
" where
{
query_conditon
}
is not null"
if
col
in
NUM_COL
:
return
f
" where abs(
{
tbname
}
.
{
col
}
) >= 0"
if
col
in
CHAR_COL
:
return
f
" where lower(
{
tbname
}
.
{
col
}
) like 'bina%' or lower(
{
tbname
}
.
{
col
}
) like '_cha%' "
if
col
in
BOOLEAN_COL
:
return
f
" where
{
tbname
}
.
{
col
}
in (false, true) "
if
col
in
TS_TYPE_COL
or
col
in
PRIMARY_COL
:
return
f
" where cast(
{
tbname
}
.
{
col
}
as binary(16) ) is not null "
return
""
def
__group_condition
(
self
,
col
,
having
=
None
):
if
isinstance
(
col
,
str
):
if
col
.
startswith
(
"count"
):
col
=
col
[
6
:
-
1
]
elif
col
.
startswith
(
"max"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"sum"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"min"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"avg"
):
col
=
col
[
4
:
-
1
]
return
f
" group by
{
col
}
having
{
having
}
"
if
having
else
f
" group by
{
col
}
"
def
__single_sql
(
self
,
select_clause
,
from_clause
,
where_condition
=
""
,
group_condition
=
""
):
if
isinstance
(
select_clause
,
str
)
and
"on"
not
in
from_clause
and
select_clause
.
split
(
"."
)[
0
].
split
(
"("
)[
-
1
]
!=
from_clause
.
split
(
"."
)[
0
]:
return
return
f
"explain select
{
select_clause
}
from
{
from_clause
}
{
where_condition
}
{
group_condition
}
"
@
property
def
__tb_list
(
self
):
return
[
"ct1"
,
"ct4"
,
"t1"
,
"ct2"
,
"stb1"
,
]
def
sql_list
(
self
):
sqls
=
[]
__no_join_tblist
=
self
.
__tb_list
for
tb
in
__no_join_tblist
:
select_claus_list
=
self
.
__query_condition
(
tb
)
for
select_claus
in
select_claus_list
:
group_claus
=
self
.
__group_condition
(
col
=
select_claus
)
where_claus
=
self
.
__where_condition
(
query_conditon
=
select_claus
)
having_claus
=
self
.
__group_condition
(
col
=
select_claus
,
having
=
f
"
{
select_claus
}
is not null"
)
sqls
.
extend
(
(
self
.
__single_sql
(
select_claus
,
tb
,
where_claus
,
having_claus
),
self
.
__single_sql
(
select_claus
,
tb
,),
self
.
__single_sql
(
select_claus
,
tb
,
where_condition
=
where_claus
),
self
.
__single_sql
(
select_claus
,
tb
,
group_condition
=
group_claus
),
)
)
# return filter(None, sqls)
return
list
(
filter
(
None
,
sqls
))
def
__get_type
(
self
,
col
):
if
tdSql
.
cursor
.
istype
(
col
,
"BOOL"
):
return
"BOOL"
if
tdSql
.
cursor
.
istype
(
col
,
"INT"
):
return
"INT"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT"
):
return
"BIGINT"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT"
):
return
"TINYINT"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT"
):
return
"SMALLINT"
if
tdSql
.
cursor
.
istype
(
col
,
"FLOAT"
):
return
"FLOAT"
if
tdSql
.
cursor
.
istype
(
col
,
"DOUBLE"
):
return
"DOUBLE"
if
tdSql
.
cursor
.
istype
(
col
,
"BINARY"
):
return
"BINARY"
if
tdSql
.
cursor
.
istype
(
col
,
"NCHAR"
):
return
"NCHAR"
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
return
"TIMESTAMP"
if
tdSql
.
cursor
.
istype
(
col
,
"JSON"
):
return
"JSON"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT UNSIGNED"
):
return
"TINYINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT UNSIGNED"
):
return
"SMALLINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"INT UNSIGNED"
):
return
"INT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT UNSIGNED"
):
return
"BIGINT UNSIGNED"
def
explain_check
(
self
):
sqls
=
self
.
sql_list
()
tdLog
.
printNoPrefix
(
"===step 1: curent case, must return query OK"
)
for
i
in
range
(
len
(
sqls
)):
tdLog
.
info
(
f
"sql:
{
sqls
[
i
]
}
"
)
tdSql
.
query
(
sqls
[
i
])
def
__test_current
(
self
):
tdSql
.
query
(
"explain select c1 from ct1"
)
tdSql
.
query
(
"explain select 1 from ct2"
)
tdSql
.
query
(
"explain select cast(ceil(c6) as bigint) from ct4 group by c6"
)
tdSql
.
query
(
"explain select count(c3) from ct4 group by c7 having count(c3) > 0"
)
tdSql
.
query
(
"explain select ct2.c3 from ct4 join ct2 on ct4.ts=ct2.ts"
)
tdSql
.
query
(
"explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 "
)
self
.
explain_check
()
def
__test_error
(
self
):
tdLog
.
printNoPrefix
(
"===step 0: err case, must return err"
)
tdSql
.
error
(
"explain select hyperloglog(c1) from ct8"
)
tdSql
.
error
(
"explain show databases "
)
tdSql
.
error
(
"explain show stables "
)
tdSql
.
error
(
"explain show tables "
)
tdSql
.
error
(
"explain show vgroups "
)
tdSql
.
error
(
"explain show dnodes "
)
tdSql
.
error
(
'''explain select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
from ct1
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null '''
)
def
all_test
(
self
):
self
.
__test_error
()
self
.
__test_current
()
def
__create_tb
(
self
):
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
create_stb_sql
=
f
'''create table stb1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
) tags (t1 int)
'''
create_ntb_sql
=
f
'''create table t1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
)
'''
tdSql
.
execute
(
create_stb_sql
)
tdSql
.
execute
(
create_ntb_sql
)
for
i
in
range
(
4
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags (
{
i
+
1
}
)'
)
{
i
%
32767
},
{
i
%
127
},
{
i
*
1.11111
},
{
i
*
1000.1111
},
{
i
%
2
}
def
__insert_data
(
self
,
rows
):
now_time
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
for
i
in
range
(
rows
):
tdSql
.
execute
(
f
"insert into ct1 values (
{
now_time
-
i
*
1000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
now_time
-
i
*
7776000000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
now_time
-
i
*
7776000000
}
,
{
-
i
}
,
{
-
11111
*
i
}
,
{
-
111
*
i
%
32767
}
,
{
-
11
*
i
%
127
}
,
{
-
1.11
*
i
}
,
{
-
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
'''insert into ct1 values
(
{
now_time
-
rows
*
5
}
, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0',
{
now_time
+
8
}
)
(
{
now_time
+
10000
}
,
{
rows
}
, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9',
{
now_time
+
9
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct4 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct2 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
15
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
30
)
}
, -32766, -126,
{
-
1
*
3.2
*
pow
(
10
,
38
)
}
,
{
-
1.2
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
16
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
31
)
}
, -32767, -127,
{
-
3.3
*
pow
(
10
,
38
)
}
,
{
-
1.3
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
for
i
in
range
(
rows
):
insert_data
=
f
'''insert into t1 values
(
{
now_time
-
i
*
3600000
}
,
{
i
}
,
{
i
*
11111
}
,
{
i
%
32767
}
,
{
i
%
127
}
,
{
i
*
1.11111
}
,
{
i
*
1000.1111
}
,
{
i
%
2
}
,
"binary_
{
i
}
", "nchar_测试_
{
i
}
",
{
now_time
-
1000
*
i
}
)
'''
tdSql
.
execute
(
insert_data
)
tdSql
.
execute
(
f
'''insert into t1 values
(
{
now_time
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
((
rows
//
2
)
*
60
+
30
)
*
60000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3600000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7200000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
,
"binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
3600000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
,
"binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
def
run
(
self
):
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
self
.
__create_tb
()
tdLog
.
printNoPrefix
(
"==========step2:insert data"
)
self
.
rows
=
10
self
.
__insert_data
(
self
.
rows
)
tdLog
.
printNoPrefix
(
"==========step3:all check"
)
self
.
all_test
()
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
tdLog
.
printNoPrefix
(
"==========step4:after wal, all check again "
)
self
.
all_test
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tests/system-test/2-query/histogram.py
浏览文件 @
85847473
import
datetime
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
PRIMARY_COL
=
"ts"
INT_COL
=
"c1"
BINT_COL
=
"c2"
SINT_COL
=
"c3"
TINT_COL
=
"c4"
FLOAT_COL
=
"c5"
DOUBLE_COL
=
"c6"
BOOL_COL
=
"c7"
BINARY_COL
=
"c8"
NCHAR_COL
=
"c9"
TS_COL
=
"c10"
NUM_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
]
CHAR_COL
=
[
BINARY_COL
,
NCHAR_COL
,
]
BOOLEAN_COL
=
[
BOOL_COL
,
]
TS_TYPE_COL
=
[
TS_COL
,
]
ALL_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
BOOL_COL
,
BINARY_COL
,
NCHAR_COL
,
TS_COL
]
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
def
__query_condition
(
self
,
tbname
):
query_condition
=
[
f
"cast(
{
col
}
as bigint)"
for
col
in
ALL_COL
]
for
num_col
in
NUM_COL
:
query_condition
.
extend
(
(
f
"
{
tbname
}
.
{
num_col
}
"
,
f
"abs(
{
tbname
}
.
{
num_col
}
)"
,
f
"acos(
{
tbname
}
.
{
num_col
}
)"
,
f
"asin(
{
tbname
}
.
{
num_col
}
)"
,
f
"atan(
{
tbname
}
.
{
num_col
}
)"
,
f
"avg(
{
tbname
}
.
{
num_col
}
)"
,
f
"ceil(
{
tbname
}
.
{
num_col
}
)"
,
f
"cos(
{
tbname
}
.
{
num_col
}
)"
,
f
"count(
{
tbname
}
.
{
num_col
}
)"
,
f
"floor(
{
tbname
}
.
{
num_col
}
)"
,
f
"log(
{
tbname
}
.
{
num_col
}
,
{
tbname
}
.
{
num_col
}
)"
,
f
"max(
{
tbname
}
.
{
num_col
}
)"
,
f
"min(
{
tbname
}
.
{
num_col
}
)"
,
f
"pow(
{
tbname
}
.
{
num_col
}
, 2)"
,
f
"round(
{
tbname
}
.
{
num_col
}
)"
,
f
"sum(
{
tbname
}
.
{
num_col
}
)"
,
f
"sin(
{
tbname
}
.
{
num_col
}
)"
,
f
"sqrt(
{
tbname
}
.
{
num_col
}
)"
,
f
"tan(
{
tbname
}
.
{
num_col
}
)"
,
f
"cast(
{
tbname
}
.
{
num_col
}
as timestamp)"
,
)
)
[
query_condition
.
append
(
f
"
{
num_col
}
+
{
any_col
}
"
)
for
any_col
in
ALL_COL
]
for
char_col
in
CHAR_COL
:
query_condition
.
extend
(
(
f
"count(
{
tbname
}
.
{
char_col
}
)"
,
f
"sum(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"max(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"min(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"avg(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
)
)
query_condition
.
extend
(
(
1010
,
)
)
return
query_condition
def
__join_condition
(
self
,
tb_list
,
filter
=
PRIMARY_COL
,
INNER
=
False
):
table_reference
=
tb_list
[
0
]
join_condition
=
table_reference
join
=
"inner join"
if
INNER
else
"join"
for
i
in
range
(
len
(
tb_list
[
1
:])):
join_condition
+=
f
"
{
join
}
{
tb_list
[
i
+
1
]
}
on
{
table_reference
}
.
{
filter
}
=
{
tb_list
[
i
+
1
]
}
.
{
filter
}
"
return
join_condition
def
__where_condition
(
self
,
col
=
None
,
tbname
=
None
,
query_conditon
=
None
):
if
query_conditon
and
isinstance
(
query_conditon
,
str
):
if
query_conditon
.
startswith
(
"count"
):
query_conditon
=
query_conditon
[
6
:
-
1
]
elif
query_conditon
.
startswith
(
"max"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"sum"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"min"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
if
query_conditon
:
return
f
" where
{
query_conditon
}
is not null"
if
col
in
NUM_COL
:
return
f
" where abs(
{
tbname
}
.
{
col
}
) >= 0"
if
col
in
CHAR_COL
:
return
f
" where lower(
{
tbname
}
.
{
col
}
) like 'bina%' or lower(
{
tbname
}
.
{
col
}
) like '_cha%' "
if
col
in
BOOLEAN_COL
:
return
f
" where
{
tbname
}
.
{
col
}
in (false, true) "
if
col
in
TS_TYPE_COL
or
col
in
PRIMARY_COL
:
return
f
" where cast(
{
tbname
}
.
{
col
}
as binary(16) ) is not null "
return
""
def
__group_condition
(
self
,
col
,
having
=
None
):
if
isinstance
(
col
,
str
):
if
col
.
startswith
(
"count"
):
col
=
col
[
6
:
-
1
]
elif
col
.
startswith
(
"max"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"sum"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"min"
):
col
=
col
[
4
:
-
1
]
return
f
" group by
{
col
}
having
{
having
}
"
if
having
else
f
" group by
{
col
}
"
def
__single_sql
(
self
,
select_clause
,
from_clause
,
where_condition
=
""
,
group_condition
=
""
):
if
isinstance
(
select_clause
,
str
)
and
"on"
not
in
from_clause
and
select_clause
.
split
(
"."
)[
0
]
!=
from_clause
.
split
(
"."
)[
0
]:
return
return
f
"select spread(
{
select_clause
}
) from
{
from_clause
}
{
where_condition
}
{
group_condition
}
"
@
property
def
__tb_list
(
self
):
return
[
"ct1"
,
"ct4"
,
"t1"
,
"ct2"
,
"stb1"
,
]
def
sql_list
(
self
):
sqls
=
[]
__no_join_tblist
=
self
.
__tb_list
for
tb
in
__no_join_tblist
:
select_claus_list
=
self
.
__query_condition
(
tb
)
for
select_claus
in
select_claus_list
:
group_claus
=
self
.
__group_condition
(
col
=
select_claus
)
where_claus
=
self
.
__where_condition
(
query_conditon
=
select_claus
)
having_claus
=
self
.
__group_condition
(
col
=
select_claus
,
having
=
f
"
{
select_claus
}
is not null"
)
sqls
.
extend
(
(
self
.
__single_sql
(
select_claus
,
tb
,
where_claus
,
having_claus
),
self
.
__single_sql
(
select_claus
,
tb
,),
self
.
__single_sql
(
select_claus
,
tb
,
where_condition
=
where_claus
),
self
.
__single_sql
(
select_claus
,
tb
,
group_condition
=
group_claus
),
)
)
# return filter(None, sqls)
return
list
(
filter
(
None
,
sqls
))
def
__get_type
(
self
,
col
):
if
tdSql
.
cursor
.
istype
(
col
,
"BOOL"
):
return
"BOOL"
if
tdSql
.
cursor
.
istype
(
col
,
"INT"
):
return
"INT"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT"
):
return
"BIGINT"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT"
):
return
"TINYINT"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT"
):
return
"SMALLINT"
if
tdSql
.
cursor
.
istype
(
col
,
"FLOAT"
):
return
"FLOAT"
if
tdSql
.
cursor
.
istype
(
col
,
"DOUBLE"
):
return
"DOUBLE"
if
tdSql
.
cursor
.
istype
(
col
,
"BINARY"
):
return
"BINARY"
if
tdSql
.
cursor
.
istype
(
col
,
"NCHAR"
):
return
"NCHAR"
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
return
"TIMESTAMP"
if
tdSql
.
cursor
.
istype
(
col
,
"JSON"
):
return
"JSON"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT UNSIGNED"
):
return
"TINYINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT UNSIGNED"
):
return
"SMALLINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"INT UNSIGNED"
):
return
"INT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT UNSIGNED"
):
return
"BIGINT UNSIGNED"
def
spread_check
(
self
):
sqls
=
self
.
sql_list
()
tdLog
.
printNoPrefix
(
"===step 1: curent case, must return query OK"
)
for
i
in
range
(
len
(
sqls
)):
tdLog
.
info
(
f
"sql:
{
sqls
[
i
]
}
"
)
tdSql
.
query
(
sqls
[
i
])
def
__test_current
(
self
):
tdSql
.
query
(
"select spread(ts) from ct1"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select spread(c1) from ct2"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select spread(c1) from ct4 group by c1"
)
tdSql
.
checkRows
(
self
.
rows
+
3
)
tdSql
.
query
(
"select spread(c1) from ct4 group by c7"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts"
)
tdSql
.
checkRows
(
1
)
self
.
spread_check
()
def
__test_error
(
self
):
tdLog
.
printNoPrefix
(
"===step 0: err case, must return err"
)
tdSql
.
error
(
"select spread() from ct1"
)
tdSql
.
error
(
"select spread(1, 2) from ct2"
)
tdSql
.
error
(
f
"select spread(
{
NUM_COL
[
0
]
}
,
{
NUM_COL
[
1
]
}
) from ct4"
)
tdSql
.
error
(
f
"select spread(
{
BOOLEAN_COL
[
0
]
}
) from t1"
)
tdSql
.
error
(
f
"select spread(
{
CHAR_COL
[
0
]
}
) from stb1"
)
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
# from ct1
# where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
# group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
def
all_test
(
self
):
self
.
__test_error
()
self
.
__test_current
()
def
__create_tb
(
self
):
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
create_stb_sql
=
f
'''create table stb1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
) tags (t1 int)
'''
create_ntb_sql
=
f
'''create table t1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
)
'''
tdSql
.
execute
(
create_stb_sql
)
tdSql
.
execute
(
create_ntb_sql
)
for
i
in
range
(
4
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags (
{
i
+
1
}
)'
)
{
i
%
32767
},
{
i
%
127
},
{
i
*
1.11111
},
{
i
*
1000.1111
},
{
i
%
2
}
def
__insert_data
(
self
,
rows
):
now_time
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
for
i
in
range
(
rows
):
tdSql
.
execute
(
f
"insert into ct1 values (
{
now_time
-
i
*
1000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
now_time
-
i
*
7776000000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
now_time
-
i
*
7776000000
}
,
{
-
i
}
,
{
-
11111
*
i
}
,
{
-
111
*
i
%
32767
}
,
{
-
11
*
i
%
127
}
,
{
-
1.11
*
i
}
,
{
-
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
'''insert into ct1 values
(
{
now_time
-
rows
*
5
}
, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0',
{
now_time
+
8
}
)
(
{
now_time
+
10000
}
,
{
rows
}
, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9',
{
now_time
+
9
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct4 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct2 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
15
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
30
)
}
, -32766, -126,
{
-
1
*
3.2
*
pow
(
10
,
38
)
}
,
{
-
1.2
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
16
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
31
)
}
, -32767, -127,
{
-
3.3
*
pow
(
10
,
38
)
}
,
{
-
1.3
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
for
i
in
range
(
rows
):
insert_data
=
f
'''insert into t1 values
(
{
now_time
-
i
*
3600000
}
,
{
i
}
,
{
i
*
11111
}
,
{
i
%
32767
}
,
{
i
%
127
}
,
{
i
*
1.11111
}
,
{
i
*
1000.1111
}
,
{
i
%
2
}
,
"binary_
{
i
}
", "nchar_测试_
{
i
}
",
{
now_time
-
1000
*
i
}
)
'''
tdSql
.
execute
(
insert_data
)
tdSql
.
execute
(
f
'''insert into t1 values
(
{
now_time
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
((
rows
//
2
)
*
60
+
30
)
*
60000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3600000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7200000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
,
"binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
3600000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
,
"binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
def
run
(
self
):
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
self
.
__create_tb
()
tdLog
.
printNoPrefix
(
"==========step2:insert data"
)
self
.
rows
=
10
self
.
__insert_data
(
self
.
rows
)
tdLog
.
printNoPrefix
(
"==========step3:all check"
)
self
.
all_test
()
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
tdLog
.
printNoPrefix
(
"==========step4:after wal, all check again "
)
self
.
all_test
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
###################################################################
# Copyright (c) 2021 by TAOS Technologies, Inc.
...
...
@@ -532,21 +170,21 @@ class TDTestCase:
tdSql
.
error
(
'select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
error
(
'select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
error
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
error
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
error
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
query
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
query
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
query
(
'select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
error
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
error
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
error
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
query
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
query
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
query
(
'select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
error
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
error
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
error
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
query
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
query
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
query
(
'select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
error
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
error
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
error
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
query
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
query
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from ctb;'
)
tdSql
.
query
(
'select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from tb;'
)
tdSql
.
error
(
'select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from stb;'
)
tdSql
.
error
(
'select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from ctb;'
)
...
...
tests/system-test/2-query/hyperloglog.py
浏览文件 @
85847473
...
...
@@ -33,50 +33,7 @@ class TDTestCase:
tdSql
.
init
(
conn
.
cursor
())
def
__query_condition
(
self
,
tbname
):
query_condition
=
[
f
"cast(
{
col
}
as bigint)"
for
col
in
ALL_COL
]
for
num_col
in
NUM_COL
:
query_condition
.
extend
(
(
f
"
{
tbname
}
.
{
num_col
}
"
,
f
"abs(
{
tbname
}
.
{
num_col
}
)"
,
f
"acos(
{
tbname
}
.
{
num_col
}
)"
,
f
"asin(
{
tbname
}
.
{
num_col
}
)"
,
f
"atan(
{
tbname
}
.
{
num_col
}
)"
,
f
"avg(
{
tbname
}
.
{
num_col
}
)"
,
f
"ceil(
{
tbname
}
.
{
num_col
}
)"
,
f
"cos(
{
tbname
}
.
{
num_col
}
)"
,
f
"count(
{
tbname
}
.
{
num_col
}
)"
,
f
"floor(
{
tbname
}
.
{
num_col
}
)"
,
f
"log(
{
tbname
}
.
{
num_col
}
,
{
tbname
}
.
{
num_col
}
)"
,
f
"max(
{
tbname
}
.
{
num_col
}
)"
,
f
"min(
{
tbname
}
.
{
num_col
}
)"
,
f
"pow(
{
tbname
}
.
{
num_col
}
, 2)"
,
f
"round(
{
tbname
}
.
{
num_col
}
)"
,
f
"sum(
{
tbname
}
.
{
num_col
}
)"
,
f
"sin(
{
tbname
}
.
{
num_col
}
)"
,
f
"sqrt(
{
tbname
}
.
{
num_col
}
)"
,
f
"tan(
{
tbname
}
.
{
num_col
}
)"
,
f
"cast(
{
tbname
}
.
{
num_col
}
as timestamp)"
,
)
)
query_condition
.
extend
((
f
"
{
num_col
}
+
{
any_col
}
"
for
any_col
in
ALL_COL
))
for
char_col
in
CHAR_COL
:
query_condition
.
extend
(
(
f
"count(
{
tbname
}
.
{
char_col
}
)"
,
f
"sum(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"max(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"min(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
f
"avg(cast(
{
tbname
}
.
{
char_col
}
) as bigint)"
,
)
)
# query_condition.extend(
# (
# 1010,
# )
# )
return
query_condition
return
[
f
"
{
any_col
}
"
for
any_col
in
ALL_COL
]
def
__join_condition
(
self
,
tb_list
,
filter
=
PRIMARY_COL
,
INNER
=
False
):
table_reference
=
tb_list
[
0
]
...
...
@@ -124,7 +81,7 @@ class TDTestCase:
return
f
" group by
{
col
}
having
{
having
}
"
if
having
else
f
" group by
{
col
}
"
def
__single_sql
(
self
,
select_clause
,
from_clause
,
where_condition
=
""
,
group_condition
=
""
):
if
isinstance
(
select_clause
,
str
)
and
"on"
not
in
from_clause
and
select_clause
.
split
(
"."
)[
0
]
!=
from_clause
.
split
(
"."
)[
0
]:
if
isinstance
(
select_clause
,
str
)
and
"on"
not
in
from_clause
and
select_clause
.
split
(
"."
)[
0
]
.
split
(
"("
)[
-
1
]
!=
from_clause
.
split
(
"."
)[
0
]:
return
return
f
"select hyperloglog(
{
select_clause
}
) from
{
from_clause
}
{
where_condition
}
{
group_condition
}
"
...
...
@@ -191,7 +148,7 @@ class TDTestCase:
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT UNSIGNED"
):
return
"BIGINT UNSIGNED"
def
spread
_check
(
self
):
def
hyperloglog
_check
(
self
):
sqls
=
self
.
sql_list
()
tdLog
.
printNoPrefix
(
"===step 1: curent case, must return query OK"
)
for
i
in
range
(
len
(
sqls
)):
...
...
@@ -214,15 +171,16 @@ class TDTestCase:
for
i
in
range
(
tdSql
.
queryRows
):
tdSql
.
checkData
(
i
,
0
,
1
)
if
tdSql
.
queryResult
[
i
][
1
]
is
not
None
else
tdSql
.
checkData
(
i
,
0
,
0
)
self
.
spread_check
()
self
.
hyperloglog_check
()
def
__test_error
(
self
):
tdLog
.
printNoPrefix
(
"===step 0: err case, must return err"
)
tdSql
.
error
(
"select hyperloglog() from ct1"
)
tdSql
.
error
(
"select hyperloglog(c1, c2) from ct2"
)
# tdSql.error( "select hyperloglog(1) from stb1" )
# tdSql.error( "select hyperloglog(abs(c1)) from ct4" )
tdSql
.
error
(
"select hyperloglog(count(c1)) from t1"
)
# tdSql.error( "select hyperloglog(1) from ct2" )
tdSql
.
error
(
f
"select hyperloglog(
{
NUM_COL
[
0
]
}
,
{
NUM_COL
[
1
]
}
) from ct4"
)
tdSql
.
error
(
''' select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
...
...
tests/system-test/2-query/leastsquares.py
0 → 100644
浏览文件 @
85847473
import
datetime
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
PRIMARY_COL
=
"ts"
INT_COL
=
"c1"
BINT_COL
=
"c2"
SINT_COL
=
"c3"
TINT_COL
=
"c4"
FLOAT_COL
=
"c5"
DOUBLE_COL
=
"c6"
BOOL_COL
=
"c7"
BINARY_COL
=
"c8"
NCHAR_COL
=
"c9"
TS_COL
=
"c10"
NUM_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
]
CHAR_COL
=
[
BINARY_COL
,
NCHAR_COL
,
]
BOOLEAN_COL
=
[
BOOL_COL
,
]
TS_TYPE_COL
=
[
TS_COL
,
]
ALL_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
BOOL_COL
,
BINARY_COL
,
NCHAR_COL
,
TS_COL
]
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
def
__query_condition
(
self
,
tbname
):
query_condition
=
[
f
"
{
tbname
}
.
{
col
}
"
for
col
in
ALL_COL
]
for
num_col
in
NUM_COL
:
query_condition
.
extend
(
(
f
"abs(
{
tbname
}
.
{
num_col
}
)"
,
f
"acos(
{
tbname
}
.
{
num_col
}
)"
,
f
"asin(
{
tbname
}
.
{
num_col
}
)"
,
f
"atan(
{
tbname
}
.
{
num_col
}
)"
,
f
"avg(
{
tbname
}
.
{
num_col
}
)"
,
f
"ceil(
{
tbname
}
.
{
num_col
}
)"
,
f
"cos(
{
tbname
}
.
{
num_col
}
)"
,
f
"count(
{
tbname
}
.
{
num_col
}
)"
,
f
"floor(
{
tbname
}
.
{
num_col
}
)"
,
f
"log(
{
tbname
}
.
{
num_col
}
,
{
tbname
}
.
{
num_col
}
)"
,
f
"max(
{
tbname
}
.
{
num_col
}
)"
,
f
"min(
{
tbname
}
.
{
num_col
}
)"
,
f
"pow(
{
tbname
}
.
{
num_col
}
, 2)"
,
f
"round(
{
tbname
}
.
{
num_col
}
)"
,
f
"sum(
{
tbname
}
.
{
num_col
}
)"
,
f
"sin(
{
tbname
}
.
{
num_col
}
)"
,
f
"sqrt(
{
tbname
}
.
{
num_col
}
)"
,
f
"tan(
{
tbname
}
.
{
num_col
}
)"
,
f
"cast(
{
tbname
}
.
{
num_col
}
as timestamp)"
,
)
)
query_condition
.
extend
((
f
"
{
num_col
}
+
{
any_col
}
"
for
any_col
in
ALL_COL
))
for
char_col
in
CHAR_COL
:
query_condition
.
extend
(
(
f
"sum(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"max(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"min(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
f
"avg(cast(
{
tbname
}
.
{
char_col
}
as bigint ))"
,
)
)
query_condition
.
extend
(
(
1010
,
''' "test1234!@#$%^&*():'><?/.,][}{" '''
,
"null"
)
)
return
query_condition
def
__join_condition
(
self
,
tb_list
,
filter
=
PRIMARY_COL
,
INNER
=
False
):
table_reference
=
tb_list
[
0
]
join_condition
=
table_reference
join
=
"inner join"
if
INNER
else
"join"
for
i
in
range
(
len
(
tb_list
[
1
:])):
join_condition
+=
f
"
{
join
}
{
tb_list
[
i
+
1
]
}
on
{
table_reference
}
.
{
filter
}
=
{
tb_list
[
i
+
1
]
}
.
{
filter
}
"
return
join_condition
def
__where_condition
(
self
,
col
=
None
,
tbname
=
None
,
query_conditon
=
None
):
if
query_conditon
and
isinstance
(
query_conditon
,
str
):
if
query_conditon
.
startswith
(
"count"
):
query_conditon
=
query_conditon
[
6
:
-
1
]
elif
query_conditon
.
startswith
(
"max"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"sum"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"min"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
elif
query_conditon
.
startswith
(
"avg"
):
query_conditon
=
query_conditon
[
4
:
-
1
]
if
query_conditon
:
return
f
" where
{
query_conditon
}
is not null"
if
col
in
NUM_COL
:
return
f
" where abs(
{
tbname
}
.
{
col
}
) >= 0"
if
col
in
CHAR_COL
:
return
f
" where lower(
{
tbname
}
.
{
col
}
) like 'bina%' or lower(
{
tbname
}
.
{
col
}
) like '_cha%' "
if
col
in
BOOLEAN_COL
:
return
f
" where
{
tbname
}
.
{
col
}
in (false, true) "
if
col
in
TS_TYPE_COL
or
col
in
PRIMARY_COL
:
return
f
" where cast(
{
tbname
}
.
{
col
}
as binary(16) ) is not null "
return
""
def
__group_condition
(
self
,
col
,
having
=
None
):
if
isinstance
(
col
,
str
):
if
col
.
startswith
(
"count"
):
col
=
col
[
6
:
-
1
]
elif
col
.
startswith
(
"max"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"sum"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"min"
):
col
=
col
[
4
:
-
1
]
elif
col
.
startswith
(
"avg"
):
col
=
col
[
4
:
-
1
]
return
f
" group by
{
col
}
having
{
having
}
"
if
having
else
f
" group by
{
col
}
"
def
__single_sql
(
self
,
select_clause
,
from_clause
,
start_val
=
None
,
step_val
=
None
,
where_condition
=
""
,
group_condition
=
""
):
if
isinstance
(
select_clause
,
str
)
and
"on"
not
in
from_clause
and
select_clause
.
split
(
"."
)[
0
].
split
(
"("
)[
-
1
]
!=
from_clause
.
split
(
"."
)[
0
]:
return
return
f
"select leastsquares(
{
select_clause
}
,
{
start_val
}
,
{
step_val
}
) from
{
from_clause
}
{
where_condition
}
{
group_condition
}
"
@
property
def
__tb_list
(
self
):
return
[
"ct1"
,
"ct4"
,
"t1"
,
"ct2"
,
"stb1"
,
]
@
property
def
start_step_val
(
self
):
return
[
1
,
0
,
1.25
,
-
2.5
,
True
,
False
,
None
,
""
,
"str"
,
]
def
sql_list
(
self
):
current_sqls
=
[]
err_sqls
=
[]
__no_join_tblist
=
self
.
__tb_list
for
tb
in
__no_join_tblist
:
select_claus_list
=
self
.
__query_condition
(
tb
)
for
select_claus
in
select_claus_list
:
group_claus
=
self
.
__group_condition
(
col
=
select_claus
)
where_claus
=
self
.
__where_condition
(
query_conditon
=
select_claus
)
having_claus
=
self
.
__group_condition
(
col
=
select_claus
,
having
=
f
"
{
select_claus
}
is not null"
)
for
arg
in
self
.
start_step_val
:
if
not
isinstance
(
arg
,
int
)
or
isinstance
(
arg
,
bool
)
:
err_sqls
.
extend
(
(
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
step_val
=
arg
,
group_condition
=
group_claus
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
,
where_condition
=
where_claus
,
group_condition
=
having_claus
),
)
)
elif
isinstance
(
select_claus
,
str
)
and
any
([
BOOL_COL
in
select_claus
,
BINARY_COL
in
select_claus
,
NCHAR_COL
in
select_claus
,
TS_COL
in
select_claus
]):
err_sqls
.
extend
(
(
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
step_val
=
arg
,
group_condition
=
group_claus
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
,
where_condition
=
where_claus
,
group_condition
=
having_claus
),
)
)
else
:
current_sqls
.
extend
(
(
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
,
step_val
=
0
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
0
,
step_val
=
arg
,
group_condition
=
group_claus
),
self
.
__single_sql
(
select_clause
=
select_claus
,
from_clause
=
tb
,
start_val
=
arg
,
step_val
=
arg
,
where_condition
=
where_claus
,
group_condition
=
having_claus
),
)
)
# return filter(None, sqls)
return
list
(
filter
(
None
,
current_sqls
)),
list
(
filter
(
None
,
err_sqls
))
def
__get_type
(
self
,
col
):
if
tdSql
.
cursor
.
istype
(
col
,
"BOOL"
):
return
"BOOL"
if
tdSql
.
cursor
.
istype
(
col
,
"INT"
):
return
"INT"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT"
):
return
"BIGINT"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT"
):
return
"TINYINT"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT"
):
return
"SMALLINT"
if
tdSql
.
cursor
.
istype
(
col
,
"FLOAT"
):
return
"FLOAT"
if
tdSql
.
cursor
.
istype
(
col
,
"DOUBLE"
):
return
"DOUBLE"
if
tdSql
.
cursor
.
istype
(
col
,
"BINARY"
):
return
"BINARY"
if
tdSql
.
cursor
.
istype
(
col
,
"NCHAR"
):
return
"NCHAR"
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
return
"TIMESTAMP"
if
tdSql
.
cursor
.
istype
(
col
,
"JSON"
):
return
"JSON"
if
tdSql
.
cursor
.
istype
(
col
,
"TINYINT UNSIGNED"
):
return
"TINYINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"SMALLINT UNSIGNED"
):
return
"SMALLINT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"INT UNSIGNED"
):
return
"INT UNSIGNED"
if
tdSql
.
cursor
.
istype
(
col
,
"BIGINT UNSIGNED"
):
return
"BIGINT UNSIGNED"
def
leastsquares_check
(
self
):
current_sqls
,
err_sqls
=
self
.
sql_list
()
for
i
in
range
(
len
(
err_sqls
)):
tdSql
.
error
(
err_sqls
[
i
])
tdLog
.
printNoPrefix
(
"===step 1: curent case, must return query OK"
)
for
i
in
range
(
len
(
current_sqls
)):
tdLog
.
info
(
f
"sql:
{
current_sqls
[
i
]
}
"
)
tdSql
.
query
(
current_sqls
[
i
])
def
__test_current
(
self
):
# tdSql.query("explain select c1 from ct1")
# tdSql.query("explain select 1 from ct2")
# tdSql.query("explain select cast(ceil(c6) as bigint) from ct4 group by c6")
# tdSql.query("explain select count(c3) from ct4 group by c7 having count(c3) > 0")
# tdSql.query("explain select ct2.c3 from ct4 join ct2 on ct4.ts=ct2.ts")
# tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ")
self
.
leastsquares_check
()
def
__test_error
(
self
):
tdLog
.
printNoPrefix
(
"===step 0: err case, must return err"
)
tdSql
.
error
(
"select leastsquares(c1) from ct8"
)
tdSql
.
error
(
"select leastsquares(c1, 1) from ct1 "
)
tdSql
.
error
(
"select leastsquares(c1, null, 1) from ct1 "
)
tdSql
.
error
(
"select leastsquares(c1, 1, null) from ct1 "
)
tdSql
.
error
(
"select leastsquares(null, 1, 1) from ct1 "
)
tdSql
.
error
(
'''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
from ct1
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null '''
)
def
all_test
(
self
):
self
.
__test_error
()
self
.
__test_current
()
def
__create_tb
(
self
):
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
create_stb_sql
=
f
'''create table stb1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
) tags (t1 int)
'''
create_ntb_sql
=
f
'''create table t1(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp
)
'''
tdSql
.
execute
(
create_stb_sql
)
tdSql
.
execute
(
create_ntb_sql
)
for
i
in
range
(
4
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags (
{
i
+
1
}
)'
)
{
i
%
32767
},
{
i
%
127
},
{
i
*
1.11111
},
{
i
*
1000.1111
},
{
i
%
2
}
def
__insert_data
(
self
,
rows
):
now_time
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
for
i
in
range
(
rows
):
tdSql
.
execute
(
f
"insert into ct1 values (
{
now_time
-
i
*
1000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
now_time
-
i
*
7776000000
}
,
{
i
}
,
{
11111
*
i
}
,
{
111
*
i
%
32767
}
,
{
11
*
i
%
127
}
,
{
1.11
*
i
}
,
{
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
now_time
-
i
*
7776000000
}
,
{
-
i
}
,
{
-
11111
*
i
}
,
{
-
111
*
i
%
32767
}
,
{
-
11
*
i
%
127
}
,
{
-
1.11
*
i
}
,
{
-
1100.0011
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar_测试_
{
i
}
',
{
now_time
+
1
*
i
}
)"
)
tdSql
.
execute
(
f
'''insert into ct1 values
(
{
now_time
-
rows
*
5
}
, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0',
{
now_time
+
8
}
)
(
{
now_time
+
10000
}
,
{
rows
}
, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9',
{
now_time
+
9
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct4 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
tdSql
.
execute
(
f
'''insert into ct2 values
(
{
now_time
-
rows
*
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3888000000
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7776000000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
5184000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
15
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
30
)
}
, -32766, -126,
{
-
1
*
3.2
*
pow
(
10
,
38
)
}
,
{
-
1.2
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
, "binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
2592000000
}
,
{
-
1
*
pow
(
2
,
31
)
+
pow
(
2
,
16
)
}
,
{
-
1
*
pow
(
2
,
63
)
+
pow
(
2
,
31
)
}
, -32767, -127,
{
-
3.3
*
pow
(
10
,
38
)
}
,
{
-
1.3
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
, "binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
for
i
in
range
(
rows
):
insert_data
=
f
'''insert into t1 values
(
{
now_time
-
i
*
3600000
}
,
{
i
}
,
{
i
*
11111
}
,
{
i
%
32767
}
,
{
i
%
127
}
,
{
i
*
1.11111
}
,
{
i
*
1000.1111
}
,
{
i
%
2
}
,
"binary_
{
i
}
", "nchar_测试_
{
i
}
",
{
now_time
-
1000
*
i
}
)
'''
tdSql
.
execute
(
insert_data
)
tdSql
.
execute
(
f
'''insert into t1 values
(
{
now_time
+
10800000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
((
rows
//
2
)
*
60
+
30
)
*
60000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
-
rows
*
3600000
}
, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{
now_time
+
7200000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
15
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
30
)
}
, 32767, 127,
{
3.3
*
pow
(
10
,
38
)
}
,
{
1.3
*
pow
(
10
,
308
)
}
,
{
rows
%
2
}
,
"binary_limit-1", "nchar_测试_limit-1",
{
now_time
-
86400000
}
)
(
{
now_time
+
3600000
}
,
{
pow
(
2
,
31
)
-
pow
(
2
,
16
)
}
,
{
pow
(
2
,
63
)
-
pow
(
2
,
31
)
}
, 32766, 126,
{
3.2
*
pow
(
10
,
38
)
}
,
{
1.2
*
pow
(
10
,
308
)
}
,
{
(
rows
-
1
)
%
2
}
,
"binary_limit-2", "nchar_测试_limit-2",
{
now_time
-
172800000
}
)
'''
)
def
run
(
self
):
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create table"
)
self
.
__create_tb
()
tdLog
.
printNoPrefix
(
"==========step2:insert data"
)
self
.
rows
=
10
self
.
__insert_data
(
self
.
rows
)
tdLog
.
printNoPrefix
(
"==========step3:all check"
)
self
.
all_test
()
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
tdLog
.
printNoPrefix
(
"==========step4:after wal, all check again "
)
self
.
all_test
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tests/system-test/fulltest.sh
浏览文件 @
85847473
...
...
@@ -44,6 +44,8 @@ python3 ./test.py -f 2-query/concat_ws2.py
python3 ./test.py
-f
2-query/check_tsdb.py
python3 ./test.py
-f
2-query/spread.py
python3 ./test.py
-f
2-query/hyperloglog.py
python3 ./test.py
-f
2-query/explain.py
python3 ./test.py
-f
2-query/leastsquares.py
python3 ./test.py
-f
2-query/timezone.py
...
...
@@ -80,7 +82,7 @@ python3 ./test.py -f 2-query/arccos.py
python3 ./test.py
-f
2-query/arctan.py
python3 ./test.py
-f
2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
# python3 ./test.py -f 2-query/nestedQuery_str.py
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录