Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
ba72ce2b
T
TDengine
项目概览
taosdata
/
TDengine
接近 2 年 前同步成功
通知
1191
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ba72ce2b
编写于
7月 04, 2022
作者:
dengyihao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dev' into idx
上级
bd77e5b1
2450d598
变更
46
展开全部
隐藏空白更改
内联
并排
Showing
46 changed file
with
2081 addition
and
1131 deletion
+2081
-1131
Jenkinsfile2
Jenkinsfile2
+30
-0
include/libs/nodes/nodes.h
include/libs/nodes/nodes.h
+3
-2
include/libs/scalar/scalar.h
include/libs/scalar/scalar.h
+2
-0
include/util/taoserror.h
include/util/taoserror.h
+3
-0
source/dnode/vnode/src/sma/smaRollup.c
source/dnode/vnode/src/sma/smaRollup.c
+2
-3
source/libs/index/inc/indexFst.h
source/libs/index/inc/indexFst.h
+7
-7
source/libs/index/inc/indexFstFile.h
source/libs/index/inc/indexFstFile.h
+96
-0
source/libs/index/inc/indexFstNode.h
source/libs/index/inc/indexFstNode.h
+4
-4
source/libs/index/inc/indexTfile.h
source/libs/index/inc/indexTfile.h
+5
-5
source/libs/index/src/index.c
source/libs/index/src/index.c
+1
-1
source/libs/index/src/indexFst.c
source/libs/index/src/indexFst.c
+30
-32
source/libs/index/src/indexFstDfa.c
source/libs/index/src/indexFstDfa.c
+2
-1
source/libs/index/src/indexFstFile.c
source/libs/index/src/indexFstFile.c
+38
-46
source/libs/index/src/indexFstNode.c
source/libs/index/src/indexFstNode.c
+1
-1
source/libs/index/src/indexFstUtil.c
source/libs/index/src/indexFstUtil.c
+0
-14
source/libs/index/src/indexTfile.c
source/libs/index/src/indexTfile.c
+17
-16
source/libs/index/test/fstTest.cc
source/libs/index/test/fstTest.cc
+13
-14
source/libs/index/test/fstUT.cc
source/libs/index/test/fstUT.cc
+13
-14
source/libs/index/test/indexTests.cc
source/libs/index/test/indexTests.cc
+15
-16
source/libs/index/test/jsonUT.cc
source/libs/index/test/jsonUT.cc
+0
-1
source/libs/index/test/utilUT.cc
source/libs/index/test/utilUT.cc
+0
-1
source/libs/nodes/src/nodesUtilFuncs.c
source/libs/nodes/src/nodesUtilFuncs.c
+19
-10
source/libs/parser/inc/parAst.h
source/libs/parser/inc/parAst.h
+1
-0
source/libs/parser/inc/parUtil.h
source/libs/parser/inc/parUtil.h
+1
-0
source/libs/parser/inc/sql.y
source/libs/parser/inc/sql.y
+2
-2
source/libs/parser/src/parAstCreater.c
source/libs/parser/src/parAstCreater.c
+5
-0
source/libs/parser/src/parAstParser.c
source/libs/parser/src/parAstParser.c
+6
-0
source/libs/parser/src/parInsert.c
source/libs/parser/src/parInsert.c
+67
-25
source/libs/parser/src/parTranslater.c
source/libs/parser/src/parTranslater.c
+203
-140
source/libs/parser/src/parUtil.c
source/libs/parser/src/parUtil.c
+9
-1
source/libs/parser/src/sql.c
source/libs/parser/src/sql.c
+695
-667
source/libs/parser/test/parInitialATest.cpp
source/libs/parser/test/parInitialATest.cpp
+14
-16
source/libs/planner/src/planOptimizer.c
source/libs/planner/src/planOptimizer.c
+4
-2
source/libs/planner/src/planSpliter.c
source/libs/planner/src/planSpliter.c
+4
-0
source/libs/planner/test/planOptimizeTest.cpp
source/libs/planner/test/planOptimizeTest.cpp
+2
-0
source/libs/planner/test/planSetOpTest.cpp
source/libs/planner/test/planSetOpTest.cpp
+9
-1
source/libs/transport/src/transComm.c
source/libs/transport/src/transComm.c
+11
-6
source/util/src/terror.c
source/util/src/terror.c
+1
-0
tests/script/tsim/insert/update0.sim
tests/script/tsim/insert/update0.sim
+2
-2
tests/system-test/1-insert/block_wise.py
tests/system-test/1-insert/block_wise.py
+442
-0
tests/system-test/1-insert/create_retentions.py
tests/system-test/1-insert/create_retentions.py
+34
-58
tests/system-test/1-insert/time_range_wise.py
tests/system-test/1-insert/time_range_wise.py
+13
-14
tests/system-test/2-query/json_tag.py
tests/system-test/2-query/json_tag.py
+1
-1
tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
+241
-0
tests/system-test/7-tmq/tmqConsFromTsdb1.py
tests/system-test/7-tmq/tmqConsFromTsdb1.py
+1
-1
tests/system-test/fulltest.sh
tests/system-test/fulltest.sh
+12
-7
未找到文件。
Jenkinsfile2
浏览文件 @
ba72ce2b
...
...
@@ -127,6 +127,25 @@ def pre_test(){
'''
return 1
}
def pre_test_build_mac() {
sh '''
hostname
date
'''
sh '''
cd ${WK}
rm -rf debug
mkdir debug
'''
sh '''
cd ${WK}/debug
cmake ..
make -j8
'''
sh '''
date
'''
}
def pre_test_win(){
bat '''
hostname
...
...
@@ -334,6 +353,17 @@ pipeline {
}
}
}
stage('mac test') {
agent{label " Mac_catalina "}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES'){
pre_test()
pre_test_build_mac()
}
}
}
}
stage('linux test') {
agent{label " worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
...
...
include/libs/nodes/nodes.h
浏览文件 @
ba72ce2b
...
...
@@ -22,8 +22,8 @@ extern "C" {
#include "tdef.h"
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr,
type) (((SNode*)(nodeptr))->type = (
type))
#define nodeType(nodeptr)
(((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr,
nodetype) (((SNode*)(nodeptr))->type = (node
type))
#define LIST_LENGTH(l) (NULL != (l) ? (l)->length : 0)
...
...
@@ -118,6 +118,7 @@ typedef enum ENodeType {
QUERY_NODE_DROP_TABLE_STMT
,
QUERY_NODE_DROP_SUPER_TABLE_STMT
,
QUERY_NODE_ALTER_TABLE_STMT
,
QUERY_NODE_ALTER_SUPER_TABLE_STMT
,
QUERY_NODE_CREATE_USER_STMT
,
QUERY_NODE_ALTER_USER_STMT
,
QUERY_NODE_DROP_USER_STMT
,
...
...
include/libs/scalar/scalar.h
浏览文件 @
ba72ce2b
...
...
@@ -25,6 +25,8 @@ extern "C" {
typedef
struct
SFilterInfo
SFilterInfo
;
int32_t
scalarGetOperatorResultType
(
SDataType
left
,
SDataType
right
,
EOperatorType
op
,
SDataType
*
pRes
);
/*
pNode will be freed in API;
*pRes need to freed in caller
...
...
include/util/taoserror.h
浏览文件 @
ba72ce2b
...
...
@@ -578,6 +578,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x265C)
#define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D)
#define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E)
#define TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE TAOS_DEF_ERROR_CODE(0, 0x265F)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
...
...
@@ -627,6 +628,8 @@ int32_t* taosGetErrno();
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201)
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
...
...
source/dnode/vnode/src/sma/smaRollup.c
浏览文件 @
ba72ce2b
...
...
@@ -915,9 +915,9 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *
return
TSDB_CODE_SUCCESS
;
}
if
(
pItem
->
type
==
1
)
{
if
(
pItem
->
type
==
TSDB_RETENTION_L
1
)
{
qTaskInfo
=
pRSmaInfo
->
items
[
0
].
taskInfo
;
}
else
if
(
pItem
->
type
==
2
)
{
}
else
if
(
pItem
->
type
==
TSDB_RETENTION_L
2
)
{
qTaskInfo
=
pRSmaInfo
->
items
[
1
].
taskInfo
;
}
else
{
ASSERT
(
0
);
...
...
@@ -1233,7 +1233,6 @@ static void tdRSmaPersistTask(SRSmaStat *pRSmaStat) {
}
else
{
smaWarn
(
"vgId:%d, persist task in abnormal stat %"
PRIi8
,
SMA_VID
(
pRSmaStat
->
pSma
),
atomic_load_8
(
RSMA_TRIGGER_STAT
(
pRSmaStat
)));
ASSERT
(
0
);
}
atomic_store_8
(
RSMA_RUNNING_STAT
(
pRSmaStat
),
0
);
taosReleaseRef
(
smaMgmt
.
smaRef
,
pRSmaStat
->
refId
);
...
...
source/libs/index/inc/indexFst.h
浏览文件 @
ba72ce2b
...
...
@@ -21,7 +21,7 @@ extern "C" {
#endif
#include "indexFstAutomation.h"
#include "indexFst
CountingWriter
.h"
#include "indexFst
File
.h"
#include "indexFstNode.h"
#include "indexFstRegistry.h"
#include "indexFstUtil.h"
...
...
@@ -90,8 +90,8 @@ FstBuilderNode* fstUnFinishedNodesPopEmpty(FstUnFinishedNodes* nodes);
uint64_t
fstUnFinishedNodesFindCommPrefixAndSetOutput
(
FstUnFinishedNodes
*
node
,
FstSlice
bs
,
Output
in
,
Output
*
out
);
typedef
struct
FstBuilder
{
FstCountingWriter
*
wrt
;
// The FST raw data is written directly to `wtr`.
FstUnFinishedNodes
*
unfinished
;
// The stack of unfinished nodes
IdxFstFile
*
wrt
;
// The FST raw data is written directly to `wtr`.
FstUnFinishedNodes
*
unfinished
;
// The stack of unfinished
nodes
FstRegistry
*
registry
;
// A map of finished nodes.
FstSlice
last
;
// The last word added
CompiledAddr
lastAddr
;
// The address of the last compiled node
...
...
@@ -125,9 +125,9 @@ FstState fstStateCreateFrom(FstSlice* data, CompiledAddr addr);
FstState
fstStateCreate
(
State
state
);
// compile
void
fstStateCompileForOneTransNext
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
uint8_t
inp
);
void
fstStateCompileForOneTrans
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
FstTransition
*
trn
);
void
fstStateCompileForAnyTrans
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
FstBuilderNode
*
node
);
void
fstStateCompileForOneTransNext
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
uint8_t
inp
);
void
fstStateCompileForOneTrans
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
FstTransition
*
trn
);
void
fstStateCompileForAnyTrans
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
FstBuilderNode
*
node
);
// set_comm_input
void
fstStateSetCommInput
(
FstState
*
state
,
uint8_t
inp
);
...
...
@@ -282,7 +282,7 @@ FStmSt* stmBuilderIntoStm(FStmBuilder* sb);
bool
fstVerify
(
Fst
*
fst
);
// refactor this function
bool
fstBuilderNodeCompileTo
(
FstBuilderNode
*
b
,
FstCountingWriter
*
wrt
,
CompiledAddr
lastAddr
,
CompiledAddr
startAddr
);
bool
fstBuilderNodeCompileTo
(
FstBuilderNode
*
b
,
IdxFstFile
*
wrt
,
CompiledAddr
lastAddr
,
CompiledAddr
startAddr
);
typedef
struct
StreamState
{
FstNode
*
node
;
...
...
source/libs/index/inc/indexFst
CountingWriter
.h
→
source/libs/index/inc/indexFst
File
.h
浏览文件 @
ba72ce2b
...
...
@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __INDEX_FST_
COUNTING_WRITER
_H__
#define __INDEX_FST_
COUNTING_WRITER
_H__
#ifndef __INDEX_FST_
FILE
_H__
#define __INDEX_FST_
FILE
_H__
#include "indexInt.h"
...
...
@@ -29,65 +29,65 @@ extern "C" {
static
char
tmpFile
[]
=
"./index"
;
typedef
enum
WriterType
{
TMemory
,
TFile
}
WriterType
;
typedef
struct
Writer
Ctx
{
int
(
*
write
)(
struct
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
int
(
*
read
)(
struct
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
int
(
*
flush
)(
struct
Writer
Ctx
*
ctx
);
int
(
*
readFrom
)(
struct
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
);
int
(
*
size
)(
struct
Writer
Ctx
*
ctx
);
typedef
struct
IFile
Ctx
{
int
(
*
write
)(
struct
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
int
(
*
read
)(
struct
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
int
(
*
flush
)(
struct
IFile
Ctx
*
ctx
);
int
(
*
readFrom
)(
struct
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
);
int
(
*
size
)(
struct
IFile
Ctx
*
ctx
);
WriterType
type
;
union
{
struct
{
TdFilePtr
pFile
;
bool
readOnly
;
char
buf
[
256
];
int
size
;
bool
readOnly
;
char
buf
[
256
];
int
64_t
size
;
#ifdef USE_MMAP
char
*
ptr
;
#endif
}
file
;
struct
{
int32_t
cap
a
;
int32_t
cap
;
char
*
buf
;
}
mem
;
};
int32_t
offset
;
int32_t
limit
;
}
Writer
Ctx
;
}
IFile
Ctx
;
static
int
writeCtxDoWrite
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
static
int
writeCtxDoRead
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
static
int
writeCtxDoReadFrom
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
);
static
int
writeCtxDoFlush
(
Writer
Ctx
*
ctx
);
static
int
idxFileCtxDoWrite
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
static
int
idxFileCtxDoRead
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
);
static
int
idxFileCtxDoReadFrom
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
);
static
int
idxFileCtxDoFlush
(
IFile
Ctx
*
ctx
);
WriterCtx
*
writer
CtxCreate
(
WriterType
type
,
const
char
*
path
,
bool
readOnly
,
int32_t
capacity
);
void
writerCtxDestroy
(
Writer
Ctx
*
w
,
bool
remove
);
IFileCtx
*
idxFile
CtxCreate
(
WriterType
type
,
const
char
*
path
,
bool
readOnly
,
int32_t
capacity
);
void
idxFileCtxDestroy
(
IFile
Ctx
*
w
,
bool
remove
);
typedef
uint32_t
CheckSummer
;
typedef
struct
FstCountingWriter
{
typedef
struct
IdxFstFile
{
void
*
wrt
;
// wrap any writer that counts and checksum bytes written
uint64_t
count
;
CheckSummer
summer
;
}
FstCountingWriter
;
}
IdxFstFile
;
int
fstCountingWriterWrite
(
FstCountingWriter
*
write
,
uint8_t
*
buf
,
uint32_t
len
);
int
idxFileWrite
(
IdxFstFile
*
write
,
uint8_t
*
buf
,
uint32_t
len
);
int
fstCountingWriterRead
(
FstCountingWriter
*
write
,
uint8_t
*
buf
,
uint32_t
len
);
int
idxFileRead
(
IdxFstFile
*
write
,
uint8_t
*
buf
,
uint32_t
len
);
int
fstCountingWriterFlush
(
FstCountingWriter
*
write
);
int
idxFileFlush
(
IdxFstFile
*
write
);
uint32_t
fstCountingWriterMaskedCheckSum
(
FstCountingWriter
*
write
);
uint32_t
idxFileMaskedCheckSum
(
IdxFstFile
*
write
);
FstCountingWriter
*
fstCountingWriter
Create
(
void
*
wtr
);
void
fstCountingWriterDestroy
(
FstCountingWriter
*
w
);
IdxFstFile
*
idxFile
Create
(
void
*
wtr
);
void
idxFileDestroy
(
IdxFstFile
*
w
);
void
fstCountingWriterPackUintIn
(
FstCountingWriter
*
writer
,
uint64_t
n
,
uint8_t
nBytes
);
uint8_t
fstCountingWriterPackUint
(
FstCountingWriter
*
writer
,
uint64_t
n
);
void
idxFilePackUintIn
(
IdxFstFile
*
writer
,
uint64_t
n
,
uint8_t
nBytes
);
uint8_t
idxFilePackUint
(
IdxFstFile
*
writer
,
uint64_t
n
);
#define FST_WRITER_COUNT(writer) (writer->count)
#define FST_WRITER_COUNT(writer)
(writer->count)
#define FST_WRITER_INTER_WRITER(writer) (writer->wtr)
#define FST_WRITE_CHECK_SUMMER(writer) (writer->summer)
#define FST_WRITE_CHECK_SUMMER(writer)
(writer->summer)
#ifdef __cplusplus
}
...
...
source/libs/index/inc/indexFstNode.h
浏览文件 @
ba72ce2b
...
...
@@ -20,12 +20,12 @@
extern
"C"
{
#endif
#include "indexFst
CountingWriter
.h"
#include "indexFst
File
.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#define FST_BUILDER_NODE_IS_FINAL(bn) (bn->isFinal)
#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn) (taosArrayGetSize(bn->trans) == 0)
#define FST_BUILDER_NODE_IS_FINAL(bn)
(bn->isFinal)
#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn)
(taosArrayGetSize(bn->trans) == 0)
#define FST_BUILDER_NODE_FINALOUTPUT_ISZERO(bn) (bn->finalOutput == 0)
typedef
struct
FstTransition
{
...
...
@@ -46,7 +46,7 @@ FstBuilderNode* fstBuilderNodeClone(FstBuilderNode* src);
void
fstBuilderNodeCloneFrom
(
FstBuilderNode
*
dst
,
FstBuilderNode
*
src
);
// bool fstBuilderNodeCompileTo(FstBuilderNode *b,
FstCountingWriter
*wrt,
// bool fstBuilderNodeCompileTo(FstBuilderNode *b,
IdxFile'
*wrt,
// CompiledAddr lastAddr, CompiledAddr startAddr);
bool
fstBuilderNodeEqual
(
FstBuilderNode
*
n1
,
FstBuilderNode
*
n2
);
...
...
source/libs/index/inc/indexTfile.h
浏览文件 @
ba72ce2b
...
...
@@ -16,7 +16,7 @@
#define __INDEX_TFILE_H__
#include "indexFst.h"
#include "indexFst
CountingWriter
.h"
#include "indexFst
File
.h"
#include "indexInt.h"
#include "indexTfile.h"
#include "indexUtil.h"
...
...
@@ -59,7 +59,7 @@ typedef struct TFileCache {
typedef
struct
TFileWriter
{
FstBuilder
*
fb
;
WriterCtx
*
ctx
;
IFileCtx
*
ctx
;
TFileHeader
header
;
uint32_t
offset
;
}
TFileWriter
;
...
...
@@ -68,7 +68,7 @@ typedef struct TFileWriter {
typedef
struct
TFileReader
{
T_REF_DECLARE
()
Fst
*
fst
;
WriterCtx
*
ctx
;
IFileCtx
*
ctx
;
TFileHeader
header
;
bool
remove
;
}
TFileReader
;
...
...
@@ -103,7 +103,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read
TFileReader
*
tfileGetReaderByCol
(
IndexTFile
*
tf
,
uint64_t
suid
,
char
*
colName
);
TFileReader
*
tfileReaderOpen
(
char
*
path
,
uint64_t
suid
,
int64_t
version
,
const
char
*
colName
);
TFileReader
*
tfileReaderCreate
(
Writer
Ctx
*
ctx
);
TFileReader
*
tfileReaderCreate
(
IFile
Ctx
*
ctx
);
void
tfileReaderDestroy
(
TFileReader
*
reader
);
int
tfileReaderSearch
(
TFileReader
*
reader
,
SIndexTermQuery
*
query
,
SIdxTRslt
*
tr
);
void
tfileReaderRef
(
TFileReader
*
reader
);
...
...
@@ -111,7 +111,7 @@ void tfileReaderUnRef(TFileReader* reader);
TFileWriter
*
tfileWriterOpen
(
char
*
path
,
uint64_t
suid
,
int64_t
version
,
const
char
*
colName
,
uint8_t
type
);
void
tfileWriterClose
(
TFileWriter
*
tw
);
TFileWriter
*
tfileWriterCreate
(
Writer
Ctx
*
ctx
,
TFileHeader
*
header
);
TFileWriter
*
tfileWriterCreate
(
IFile
Ctx
*
ctx
,
TFileHeader
*
header
);
void
tfileWriterDestroy
(
TFileWriter
*
tw
);
int
tfileWriterPut
(
TFileWriter
*
tw
,
void
*
data
,
bool
order
);
int
tfileWriterFinish
(
TFileWriter
*
tw
);
...
...
source/libs/index/src/index.c
浏览文件 @
ba72ce2b
...
...
@@ -663,7 +663,7 @@ static int idxGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) {
return
ret
;
END:
if
(
tw
!=
NULL
)
{
writer
CtxDestroy
(
tw
->
ctx
,
true
);
idxFile
CtxDestroy
(
tw
->
ctx
,
true
);
taosMemoryFree
(
tw
);
}
return
-
1
;
...
...
source/libs/index/src/indexFst.c
浏览文件 @
ba72ce2b
...
...
@@ -19,11 +19,11 @@
#include "tchecksum.h"
#include "tcoding.h"
static
void
fstPackDeltaIn
(
FstCountingWriter
*
wrt
,
CompiledAddr
nodeAddr
,
CompiledAddr
transAddr
,
uint8_t
nBytes
)
{
static
void
fstPackDeltaIn
(
IdxFstFile
*
wrt
,
CompiledAddr
nodeAddr
,
CompiledAddr
transAddr
,
uint8_t
nBytes
)
{
CompiledAddr
deltaAddr
=
(
transAddr
==
EMPTY_ADDRESS
)
?
EMPTY_ADDRESS
:
nodeAddr
-
transAddr
;
fstCountingWriter
PackUintIn
(
wrt
,
deltaAddr
,
nBytes
);
idxFile
PackUintIn
(
wrt
,
deltaAddr
,
nBytes
);
}
static
uint8_t
fstPackDetla
(
FstCountingWriter
*
wrt
,
CompiledAddr
nodeAddr
,
CompiledAddr
transAddr
)
{
static
uint8_t
fstPackDetla
(
IdxFstFile
*
wrt
,
CompiledAddr
nodeAddr
,
CompiledAddr
transAddr
)
{
uint8_t
nBytes
=
packDeltaSize
(
nodeAddr
,
transAddr
);
fstPackDeltaIn
(
wrt
,
nodeAddr
,
transAddr
,
nBytes
);
return
nBytes
;
...
...
@@ -208,7 +208,7 @@ FstState fstStateCreate(State state) {
return
fstStateDict
[
idx
];
}
// compile
void
fstStateCompileForOneTransNext
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
uint8_t
inp
)
{
void
fstStateCompileForOneTransNext
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
uint8_t
inp
)
{
FstState
s
=
fstStateCreate
(
OneTransNext
);
fstStateSetCommInput
(
&
s
,
inp
);
...
...
@@ -216,21 +216,21 @@ void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uin
uint8_t
v
=
fstStateCommInput
(
&
s
,
&
null
);
if
(
null
)
{
// w->write_all(&[inp])
fstCountingWriter
Write
(
w
,
&
inp
,
1
);
idxFile
Write
(
w
,
&
inp
,
1
);
}
fstCountingWriter
Write
(
w
,
&
(
s
.
val
),
1
);
idxFile
Write
(
w
,
&
(
s
.
val
),
1
);
// w->write_all(&[s.val])
return
;
}
void
fstStateCompileForOneTrans
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
FstTransition
*
trn
)
{
void
fstStateCompileForOneTrans
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
FstTransition
*
trn
)
{
Output
out
=
trn
->
out
;
uint8_t
outPackSize
=
(
out
==
0
?
0
:
fstCountingWriter
PackUint
(
w
,
out
));
uint8_t
outPackSize
=
(
out
==
0
?
0
:
idxFile
PackUint
(
w
,
out
));
uint8_t
transPackSize
=
fstPackDetla
(
w
,
addr
,
trn
->
addr
);
PackSizes
packSizes
=
0
;
FST_SET_OUTPUT_PACK_SIZE
(
packSizes
,
outPackSize
);
FST_SET_TRANSITION_PACK_SIZE
(
packSizes
,
transPackSize
);
fstCountingWriter
Write
(
w
,
(
char
*
)
&
packSizes
,
sizeof
(
packSizes
));
idxFile
Write
(
w
,
(
char
*
)
&
packSizes
,
sizeof
(
packSizes
));
FstState
st
=
fstStateCreate
(
OneTrans
);
...
...
@@ -239,12 +239,12 @@ void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTran
bool
null
=
false
;
uint8_t
inp
=
fstStateCommInput
(
&
st
,
&
null
);
if
(
null
==
true
)
{
fstCountingWriter
Write
(
w
,
(
char
*
)
&
trn
->
inp
,
sizeof
(
trn
->
inp
));
idxFile
Write
(
w
,
(
char
*
)
&
trn
->
inp
,
sizeof
(
trn
->
inp
));
}
fstCountingWriter
Write
(
w
,
(
char
*
)(
&
(
st
.
val
)),
sizeof
(
st
.
val
));
idxFile
Write
(
w
,
(
char
*
)(
&
(
st
.
val
)),
sizeof
(
st
.
val
));
return
;
}
void
fstStateCompileForAnyTrans
(
FstCountingWriter
*
w
,
CompiledAddr
addr
,
FstBuilderNode
*
node
)
{
void
fstStateCompileForAnyTrans
(
IdxFstFile
*
w
,
CompiledAddr
addr
,
FstBuilderNode
*
node
)
{
int32_t
sz
=
taosArrayGetSize
(
node
->
trans
);
assert
(
sz
<=
256
);
...
...
@@ -275,11 +275,11 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
if
(
anyOuts
)
{
if
(
FST_BUILDER_NODE_IS_FINAL
(
node
))
{
fstCountingWriter
PackUintIn
(
w
,
node
->
finalOutput
,
oSize
);
idxFile
PackUintIn
(
w
,
node
->
finalOutput
,
oSize
);
}
for
(
int32_t
i
=
sz
-
1
;
i
>=
0
;
i
--
)
{
FstTransition
*
t
=
taosArrayGet
(
node
->
trans
,
i
);
fstCountingWriter
PackUintIn
(
w
,
t
->
out
,
oSize
);
idxFile
PackUintIn
(
w
,
t
->
out
,
oSize
);
}
}
for
(
int32_t
i
=
sz
-
1
;
i
>=
0
;
i
--
)
{
...
...
@@ -288,7 +288,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
}
for
(
int32_t
i
=
sz
-
1
;
i
>=
0
;
i
--
)
{
FstTransition
*
t
=
taosArrayGet
(
node
->
trans
,
i
);
fstCountingWriter
Write
(
w
,
(
char
*
)
&
t
->
inp
,
1
);
idxFile
Write
(
w
,
(
char
*
)
&
t
->
inp
,
1
);
// fstPackDeltaIn(w, addr, t->addr, tSize);
}
if
(
sz
>
TRANS_INDEX_THRESHOLD
)
{
...
...
@@ -306,10 +306,10 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
index
[
t
->
inp
]
=
i
;
// fstPackDeltaIn(w, addr, t->addr, tSize);
}
fstCountingWriter
Write
(
w
,
(
char
*
)
index
,
256
);
idxFile
Write
(
w
,
(
char
*
)
index
,
256
);
taosMemoryFree
(
index
);
}
fstCountingWriter
Write
(
w
,
(
char
*
)
&
packSizes
,
1
);
idxFile
Write
(
w
,
(
char
*
)
&
packSizes
,
1
);
bool
null
=
false
;
fstStateStateNtrans
(
&
st
,
&
null
);
if
(
null
==
true
)
{
...
...
@@ -318,12 +318,12 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
// encoded in the state byte.
uint8_t
v
=
1
;
if
(
sz
==
256
)
{
fstCountingWriter
Write
(
w
,
(
char
*
)
&
v
,
1
);
idxFile
Write
(
w
,
(
char
*
)
&
v
,
1
);
}
else
{
fstCountingWriter
Write
(
w
,
(
char
*
)
&
sz
,
1
);
idxFile
Write
(
w
,
(
char
*
)
&
sz
,
1
);
}
}
fstCountingWriter
Write
(
w
,
(
char
*
)(
&
(
st
.
val
)),
1
);
idxFile
Write
(
w
,
(
char
*
)(
&
(
st
.
val
)),
1
);
return
;
}
...
...
@@ -753,7 +753,7 @@ bool fstNodeCompile(FstNode* node, void* w, CompiledAddr lastAddr, CompiledAddr
return
true
;
}
bool
fstBuilderNodeCompileTo
(
FstBuilderNode
*
b
,
FstCountingWriter
*
wrt
,
CompiledAddr
lastAddr
,
CompiledAddr
startAddr
)
{
bool
fstBuilderNodeCompileTo
(
FstBuilderNode
*
b
,
IdxFstFile
*
wrt
,
CompiledAddr
lastAddr
,
CompiledAddr
startAddr
)
{
return
fstNodeCompile
(
NULL
,
wrt
,
lastAddr
,
startAddr
,
b
);
}
...
...
@@ -763,7 +763,7 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) {
return
b
;
}
b
->
wrt
=
fstCountingWriter
Create
(
w
);
b
->
wrt
=
idxFile
Create
(
w
);
b
->
unfinished
=
fstUnFinishedNodesCreate
();
b
->
registry
=
fstRegistryCreate
(
10000
,
2
);
b
->
last
=
fstSliceCreate
(
NULL
,
0
);
...
...
@@ -773,12 +773,12 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) {
char
buf64
[
8
]
=
{
0
};
void
*
pBuf64
=
buf64
;
taosEncodeFixedU64
(
&
pBuf64
,
VERSION
);
fstCountingWriter
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
idxFile
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
pBuf64
=
buf64
;
memset
(
buf64
,
0
,
sizeof
(
buf64
));
taosEncodeFixedU64
(
&
pBuf64
,
ty
);
fstCountingWriter
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
idxFile
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
return
b
;
}
...
...
@@ -787,7 +787,7 @@ void fstBuilderDestroy(FstBuilder* b) {
return
;
}
fstCountingWriter
Destroy
(
b
->
wrt
);
idxFile
Destroy
(
b
->
wrt
);
fstUnFinishedNodesDestroy
(
b
->
unfinished
);
fstRegistryDestroy
(
b
->
registry
);
fstSliceDestroy
(
&
b
->
last
);
...
...
@@ -905,21 +905,19 @@ void* fstBuilderInsertInner(FstBuilder* b) {
void
*
pBuf64
=
buf64
;
taosEncodeFixedU64
(
&
pBuf64
,
b
->
len
);
fstCountingWriter
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
idxFile
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
pBuf64
=
buf64
;
taosEncodeFixedU64
(
&
pBuf64
,
rootAddr
);
fstCountingWriter
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
idxFile
Write
(
b
->
wrt
,
buf64
,
sizeof
(
buf64
));
char
buf32
[
4
]
=
{
0
};
void
*
pBuf32
=
buf32
;
uint32_t
sum
=
fstCountingWriter
MaskedCheckSum
(
b
->
wrt
);
uint32_t
sum
=
idxFile
MaskedCheckSum
(
b
->
wrt
);
taosEncodeFixedU32
(
&
pBuf32
,
sum
);
fstCountingWriter
Write
(
b
->
wrt
,
buf32
,
sizeof
(
buf32
));
idxFile
Write
(
b
->
wrt
,
buf32
,
sizeof
(
buf32
));
fstCountingWriterFlush
(
b
->
wrt
);
// fstCountingWriterDestroy(b->wrt);
// b->wrt = NULL;
idxFileFlush
(
b
->
wrt
);
return
b
->
wrt
;
}
void
fstBuilderFinish
(
FstBuilder
*
b
)
{
fstBuilderInsertInner
(
b
);
}
...
...
source/libs/index/src/indexFstDfa.c
浏览文件 @
ba72ce2b
...
...
@@ -61,9 +61,10 @@ void dfaBuilderDestroy(FstDfaBuilder *builder) {
pIter
=
taosHashIterate
(
builder
->
cache
,
pIter
);
}
taosHashCleanup
(
builder
->
cache
);
taosMemoryFree
(
builder
);
}
FstDfa
*
dfaBuilder
Build
(
FstDfaBuilder
*
builder
)
{
FstDfa
*
dfaBuilder
(
FstDfaBuilder
*
builder
)
{
uint32_t
sz
=
taosArrayGetSize
(
builder
->
dfa
->
insts
);
FstSparseSet
*
cur
=
sparSetCreate
(
sz
);
FstSparseSet
*
nxt
=
sparSetCreate
(
sz
);
...
...
source/libs/index/src/indexFst
CountingWriter
.c
→
source/libs/index/src/indexFst
File
.c
浏览文件 @
ba72ce2b
...
...
@@ -13,13 +13,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "indexFst
CountingWriter
.h"
#include "indexFst
File
.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "os.h"
#include "tutil.h"
static
int
writeCtxDoWrite
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
)
{
static
int
idxFileCtxDoWrite
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
)
{
if
(
ctx
->
type
==
TFile
)
{
assert
(
len
==
taosWriteFile
(
ctx
->
file
.
pFile
,
buf
,
len
));
}
else
{
...
...
@@ -28,7 +28,7 @@ static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len) {
ctx
->
offset
+=
len
;
return
len
;
}
static
int
writeCtxDoRead
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
)
{
static
int
idxFileCtxDoRead
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
)
{
int
nRead
=
0
;
if
(
ctx
->
type
==
TFile
)
{
#ifdef USE_MMAP
...
...
@@ -44,7 +44,7 @@ static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len) {
return
nRead
;
}
static
int
writeCtxDoReadFrom
(
Writer
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
)
{
static
int
idxFileCtxDoReadFrom
(
IFile
Ctx
*
ctx
,
uint8_t
*
buf
,
int
len
,
int32_t
offset
)
{
int
nRead
=
0
;
if
(
ctx
->
type
==
TFile
)
{
// tfLseek(ctx->file.pFile, offset, 0);
...
...
@@ -61,7 +61,7 @@ static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t off
}
return
nRead
;
}
static
int
writeCtxGetSize
(
Writer
Ctx
*
ctx
)
{
static
int
idxFileCtxGetSize
(
IFile
Ctx
*
ctx
)
{
if
(
ctx
->
type
==
TFile
)
{
int64_t
file_size
=
0
;
taosStatFile
(
ctx
->
file
.
buf
,
&
file_size
,
NULL
);
...
...
@@ -69,7 +69,7 @@ static int writeCtxGetSize(WriterCtx* ctx) {
}
return
0
;
}
static
int
writeCtxDoFlush
(
Writer
Ctx
*
ctx
)
{
static
int
idxFileCtxDoFlush
(
IFile
Ctx
*
ctx
)
{
if
(
ctx
->
type
==
TFile
)
{
// taosFsyncFile(ctx->file.pFile);
taosFsyncFile
(
ctx
->
file
.
pFile
);
...
...
@@ -80,8 +80,8 @@ static int writeCtxDoFlush(WriterCtx* ctx) {
return
1
;
}
WriterCtx
*
writer
CtxCreate
(
WriterType
type
,
const
char
*
path
,
bool
readOnly
,
int32_t
capacity
)
{
WriterCtx
*
ctx
=
taosMemoryCalloc
(
1
,
sizeof
(
Writer
Ctx
));
IFileCtx
*
idxFile
CtxCreate
(
WriterType
type
,
const
char
*
path
,
bool
readOnly
,
int32_t
capacity
)
{
IFileCtx
*
ctx
=
taosMemoryCalloc
(
1
,
sizeof
(
IFile
Ctx
));
if
(
ctx
==
NULL
)
{
return
NULL
;
}
...
...
@@ -90,39 +90,36 @@ WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int
if
(
ctx
->
type
==
TFile
)
{
// ugly code, refactor later
ctx
->
file
.
readOnly
=
readOnly
;
memcpy
(
ctx
->
file
.
buf
,
path
,
strlen
(
path
));
if
(
readOnly
==
false
)
{
// ctx->file.pFile = open(path, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
ctx
->
file
.
pFile
=
taosOpenFile
(
path
,
TD_FILE_CREATE
|
TD_FILE_WRITE
|
TD_FILE_APPEND
);
taosFtruncateFile
(
ctx
->
file
.
pFile
,
0
);
int64_t
file_size
;
taosStatFile
(
path
,
&
file_size
,
NULL
);
ctx
->
file
.
size
=
(
int
)
file_size
;
taosStatFile
(
path
,
&
ctx
->
file
.
size
,
NULL
);
// ctx->file.size = (int)size;
}
else
{
// ctx->file.pFile = open(path, O_RDONLY, S_IRWXU | S_IRWXG | S_IRWXO);
ctx
->
file
.
pFile
=
taosOpenFile
(
path
,
TD_FILE_READ
);
int64_t
file_
size
=
0
;
taosFStatFile
(
ctx
->
file
.
pFile
,
&
file_
size
,
NULL
);
ctx
->
file
.
size
=
(
int
)
file_
size
;
int64_t
size
=
0
;
taosFStatFile
(
ctx
->
file
.
pFile
,
&
ctx
->
file
.
size
,
NULL
);
ctx
->
file
.
size
=
(
int
)
size
;
#ifdef USE_MMAP
ctx
->
file
.
ptr
=
(
char
*
)
tfMmapReadOnly
(
ctx
->
file
.
pFile
,
ctx
->
file
.
size
);
#endif
}
memcpy
(
ctx
->
file
.
buf
,
path
,
strlen
(
path
));
if
(
ctx
->
file
.
pFile
==
NULL
)
{
indexError
(
"failed to open file, error %d"
,
errno
);
goto
END
;
}
}
else
if
(
ctx
->
type
==
TMemory
)
{
ctx
->
mem
.
buf
=
taosMemoryCalloc
(
1
,
sizeof
(
char
)
*
capacity
);
ctx
->
mem
.
cap
a
=
capacity
;
ctx
->
mem
.
cap
=
capacity
;
}
ctx
->
write
=
writ
eCtxDoWrite
;
ctx
->
read
=
writ
eCtxDoRead
;
ctx
->
flush
=
writ
eCtxDoFlush
;
ctx
->
readFrom
=
writ
eCtxDoReadFrom
;
ctx
->
size
=
writ
eCtxGetSize
;
ctx
->
write
=
idxFil
eCtxDoWrite
;
ctx
->
read
=
idxFil
eCtxDoRead
;
ctx
->
flush
=
idxFil
eCtxDoFlush
;
ctx
->
readFrom
=
idxFil
eCtxDoReadFrom
;
ctx
->
size
=
idxFil
eCtxGetSize
;
ctx
->
offset
=
0
;
ctx
->
limit
=
capacity
;
...
...
@@ -135,7 +132,7 @@ END:
taosMemoryFree
(
ctx
);
return
NULL
;
}
void
writerCtxDestroy
(
Writer
Ctx
*
ctx
,
bool
remove
)
{
void
idxFileCtxDestroy
(
IFile
Ctx
*
ctx
,
bool
remove
)
{
if
(
ctx
->
type
==
TMemory
)
{
taosMemoryFree
(
ctx
->
mem
.
buf
);
}
else
{
...
...
@@ -149,9 +146,6 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) {
if
(
ctx
->
file
.
readOnly
==
false
)
{
int64_t
file_size
=
0
;
taosStatFile
(
ctx
->
file
.
buf
,
&
file_size
,
NULL
);
// struct stat fstat;
// stat(ctx->file.buf, &fstat);
// indexError("write file size: %d", (int)(fstat.st_size));
}
if
(
remove
)
{
unlink
(
ctx
->
file
.
buf
);
...
...
@@ -160,30 +154,29 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) {
taosMemoryFree
(
ctx
);
}
FstCountingWriter
*
fstCountingWriter
Create
(
void
*
wrt
)
{
FstCountingWriter
*
cw
=
taosMemoryCalloc
(
1
,
sizeof
(
FstCountingWriter
));
IdxFstFile
*
idxFile
Create
(
void
*
wrt
)
{
IdxFstFile
*
cw
=
taosMemoryCalloc
(
1
,
sizeof
(
IdxFstFile
));
if
(
cw
==
NULL
)
{
return
NULL
;
}
cw
->
wrt
=
wrt
;
//(void *)(writerCtxCreate(TFile, readOnly));
return
cw
;
}
void
fstCountingWriterDestroy
(
FstCountingWriter
*
cw
)
{
void
idxFileDestroy
(
IdxFstFile
*
cw
)
{
// free wrt object: close fd or free mem
fstCountingWriter
Flush
(
cw
);
//
writerCtxDestroy((Writer
Ctx *)(cw->wrt));
idxFile
Flush
(
cw
);
//
idxFileCtxDestroy((IFile
Ctx *)(cw->wrt));
taosMemoryFree
(
cw
);
}
int
fstCountingWriterWrite
(
FstCountingWriter
*
write
,
uint8_t
*
buf
,
uint32_t
len
)
{
int
idxFileWrite
(
IdxFstFile
*
write
,
uint8_t
*
buf
,
uint32_t
len
)
{
if
(
write
==
NULL
)
{
return
0
;
}
// update checksum
// write data to file/socket or mem
Writer
Ctx
*
ctx
=
write
->
wrt
;
IFile
Ctx
*
ctx
=
write
->
wrt
;
int
nWrite
=
ctx
->
write
(
ctx
,
buf
,
len
);
assert
(
nWrite
==
len
);
...
...
@@ -192,42 +185,41 @@ int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len)
write
->
summer
=
taosCalcChecksum
(
write
->
summer
,
buf
,
len
);
return
len
;
}
int
fstCountingWriterRead
(
FstCountingWriter
*
write
,
uint8_t
*
buf
,
uint32_t
len
)
{
int
idxFileRead
(
IdxFstFile
*
write
,
uint8_t
*
buf
,
uint32_t
len
)
{
if
(
write
==
NULL
)
{
return
0
;
}
Writer
Ctx
*
ctx
=
write
->
wrt
;
int
nRead
=
ctx
->
read
(
ctx
,
buf
,
len
);
IFile
Ctx
*
ctx
=
write
->
wrt
;
int
nRead
=
ctx
->
read
(
ctx
,
buf
,
len
);
// assert(nRead == len);
return
nRead
;
}
uint32_t
fstCountingWriterMaskedCheckSum
(
FstCountingWriter
*
write
)
{
uint32_t
idxFileMaskedCheckSum
(
IdxFstFile
*
write
)
{
// opt
return
write
->
summer
;
}
int
fstCountingWriterFlush
(
FstCountingWriter
*
write
)
{
Writer
Ctx
*
ctx
=
write
->
wrt
;
int
idxFileFlush
(
IdxFstFile
*
write
)
{
IFile
Ctx
*
ctx
=
write
->
wrt
;
ctx
->
flush
(
ctx
);
// write->wtr->flush
return
1
;
}
void
fstCountingWriterPackUintIn
(
FstCountingWriter
*
writer
,
uint64_t
n
,
uint8_t
nBytes
)
{
void
idxFilePackUintIn
(
IdxFstFile
*
writer
,
uint64_t
n
,
uint8_t
nBytes
)
{
assert
(
1
<=
nBytes
&&
nBytes
<=
8
);
uint8_t
*
buf
=
taosMemoryCalloc
(
8
,
sizeof
(
uint8_t
));
for
(
uint8_t
i
=
0
;
i
<
nBytes
;
i
++
)
{
buf
[
i
]
=
(
uint8_t
)
n
;
n
=
n
>>
8
;
}
fstCountingWriter
Write
(
writer
,
buf
,
nBytes
);
idxFile
Write
(
writer
,
buf
,
nBytes
);
taosMemoryFree
(
buf
);
return
;
}
uint8_t
fstCountingWriterPackUint
(
FstCountingWriter
*
writer
,
uint64_t
n
)
{
uint8_t
idxFilePackUint
(
IdxFstFile
*
writer
,
uint64_t
n
)
{
uint8_t
nBytes
=
packSize
(
n
);
fstCountingWriter
PackUintIn
(
writer
,
n
,
nBytes
);
idxFile
PackUintIn
(
writer
,
n
,
nBytes
);
return
nBytes
;
}
source/libs/index/src/indexFstNode.c
浏览文件 @
ba72ce2b
...
...
@@ -95,7 +95,7 @@ void fstBuilderNodeCloneFrom(FstBuilderNode* dst, FstBuilderNode* src) {
}
}
// bool fstBuilderNodeCompileTo(FstBuilderNode *b,
FstCountingWriter
*wrt, CompiledAddr lastAddr, CompiledAddr
// bool fstBuilderNodeCompileTo(FstBuilderNode *b,
IdxFile
*wrt, CompiledAddr lastAddr, CompiledAddr
// startAddr) {
// size_t sz = taosArrayGetSize(b->trans);
...
...
source/libs/index/src/indexFstUtil.c
浏览文件 @
ba72ce2b
...
...
@@ -75,7 +75,6 @@ CompiledAddr unpackDelta(char* data, uint64_t len, uint64_t nodeAddr) {
}
// fst slice func
//
FstSlice
fstSliceCreate
(
uint8_t
*
data
,
uint64_t
len
)
{
FstString
*
str
=
(
FstString
*
)
taosMemoryMalloc
(
sizeof
(
FstString
));
...
...
@@ -164,16 +163,3 @@ int fstSliceCompare(FstSlice* a, FstSlice* b) {
return
0
;
}
}
// FstStack* fstStackCreate(size_t elemSize, StackFreeElem freeFn) {
// FstStack *s = taosMemoryCalloc(1, sizeof(FstStack));
// if (s == NULL) { return NULL; }
// s->
// s->freeFn
//
//}
// void *fstStackPush(FstStack *s, void *elem);
// void *fstStackTop(FstStack *s);
// size_t fstStackLen(FstStack *s);
// void *fstStackGetAt(FstStack *s, size_t i);
// void fstStackDestory(FstStack *);
source/libs/index/src/indexTfile.c
浏览文件 @
ba72ce2b
...
...
@@ -16,7 +16,7 @@
#include "index.h"
#include "indexComm.h"
#include "indexFst.h"
#include "indexFst
CountingWriter
.h"
#include "indexFst
File
.h"
#include "indexUtil.h"
#include "taosdef.h"
#include "taoserror.h"
...
...
@@ -103,7 +103,7 @@ TFileCache* tfileCacheCreate(const char* path) {
for
(
size_t
i
=
0
;
i
<
taosArrayGetSize
(
files
);
i
++
)
{
char
*
file
=
taosArrayGetP
(
files
,
i
);
WriterCtx
*
wc
=
writer
CtxCreate
(
TFile
,
file
,
true
,
1024
*
1024
*
64
);
IFileCtx
*
wc
=
idxFile
CtxCreate
(
TFile
,
file
,
true
,
1024
*
1024
*
64
);
if
(
wc
==
NULL
)
{
indexError
(
"failed to open index:%s"
,
file
);
goto
End
;
...
...
@@ -175,7 +175,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) {
tfileReaderRef
(
reader
);
return
;
}
TFileReader
*
tfileReaderCreate
(
Writer
Ctx
*
ctx
)
{
TFileReader
*
tfileReaderCreate
(
IFile
Ctx
*
ctx
)
{
TFileReader
*
reader
=
taosMemoryCalloc
(
1
,
sizeof
(
TFileReader
));
if
(
reader
==
NULL
)
{
return
NULL
;
...
...
@@ -216,7 +216,7 @@ void tfileReaderDestroy(TFileReader* reader) {
}
else
{
indexInfo
(
"%s is not removed"
,
reader
->
ctx
->
file
.
buf
);
}
writer
CtxDestroy
(
reader
->
ctx
,
reader
->
remove
);
idxFile
CtxDestroy
(
reader
->
ctx
,
reader
->
remove
);
taosMemoryFree
(
reader
);
}
...
...
@@ -490,7 +490,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const c
char
fullname
[
256
]
=
{
0
};
tfileGenFileFullName
(
fullname
,
path
,
suid
,
colName
,
version
);
// indexInfo("open write file name %s", fullname);
WriterCtx
*
wcx
=
writer
CtxCreate
(
TFile
,
fullname
,
false
,
1024
*
1024
*
64
);
IFileCtx
*
wcx
=
idxFile
CtxCreate
(
TFile
,
fullname
,
false
,
1024
*
1024
*
64
);
if
(
wcx
==
NULL
)
{
return
NULL
;
}
...
...
@@ -507,18 +507,18 @@ TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const c
char
fullname
[
256
]
=
{
0
};
tfileGenFileFullName
(
fullname
,
path
,
suid
,
colName
,
version
);
WriterCtx
*
wc
=
writer
CtxCreate
(
TFile
,
fullname
,
true
,
1024
*
1024
*
1024
);
IFileCtx
*
wc
=
idxFile
CtxCreate
(
TFile
,
fullname
,
true
,
1024
*
1024
*
1024
);
if
(
wc
==
NULL
)
{
terrno
=
TAOS_SYSTEM_ERROR
(
errno
);
indexError
(
"failed to open readonly file: %s, reason: %s"
,
fullname
,
terrstr
());
return
NULL
;
}
indexTrace
(
"open read file name:%s, file size: %
d
"
,
wc
->
file
.
buf
,
wc
->
file
.
size
);
indexTrace
(
"open read file name:%s, file size: %
"
PRId64
"
"
,
wc
->
file
.
buf
,
wc
->
file
.
size
);
TFileReader
*
reader
=
tfileReaderCreate
(
wc
);
return
reader
;
}
TFileWriter
*
tfileWriterCreate
(
Writer
Ctx
*
ctx
,
TFileHeader
*
header
)
{
TFileWriter
*
tfileWriterCreate
(
IFile
Ctx
*
ctx
,
TFileHeader
*
header
)
{
TFileWriter
*
tw
=
taosMemoryCalloc
(
1
,
sizeof
(
TFileWriter
));
if
(
tw
==
NULL
)
{
indexError
(
"index: %"
PRIu64
" failed to alloc TFilerWriter"
,
header
->
suid
);
...
...
@@ -609,14 +609,14 @@ void tfileWriterClose(TFileWriter* tw) {
if
(
tw
==
NULL
)
{
return
;
}
writer
CtxDestroy
(
tw
->
ctx
,
false
);
idxFile
CtxDestroy
(
tw
->
ctx
,
false
);
taosMemoryFree
(
tw
);
}
void
tfileWriterDestroy
(
TFileWriter
*
tw
)
{
if
(
tw
==
NULL
)
{
return
;
}
writer
CtxDestroy
(
tw
->
ctx
,
false
);
idxFile
CtxDestroy
(
tw
->
ctx
,
false
);
taosMemoryFree
(
tw
);
}
...
...
@@ -892,8 +892,8 @@ static int tfileReaderLoadHeader(TFileReader* reader) {
return
0
;
}
static
int
tfileReaderLoadFst
(
TFileReader
*
reader
)
{
Writer
Ctx
*
ctx
=
reader
->
ctx
;
int
size
=
ctx
->
size
(
ctx
);
IFile
Ctx
*
ctx
=
reader
->
ctx
;
int
size
=
ctx
->
size
(
ctx
);
// current load fst into memory, refactor it later
int
fstSize
=
size
-
reader
->
header
.
fstOffset
-
sizeof
(
tfileMagicNumber
);
...
...
@@ -905,8 +905,9 @@ static int tfileReaderLoadFst(TFileReader* reader) {
int64_t
ts
=
taosGetTimestampUs
();
int32_t
nread
=
ctx
->
readFrom
(
ctx
,
buf
,
fstSize
,
reader
->
header
.
fstOffset
);
int64_t
cost
=
taosGetTimestampUs
()
-
ts
;
indexInfo
(
"nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %d, time cost: %"
PRId64
"us"
,
nread
,
reader
->
header
.
fstOffset
,
fstSize
,
ctx
->
file
.
buf
,
ctx
->
file
.
size
,
cost
);
indexInfo
(
"nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %"
PRId64
", time cost: %"
PRId64
"us"
,
nread
,
reader
->
header
.
fstOffset
,
fstSize
,
ctx
->
file
.
buf
,
ctx
->
file
.
size
,
cost
);
// we assuse fst size less than FST_MAX_SIZE
assert
(
nread
>
0
&&
nread
<=
fstSize
);
...
...
@@ -919,7 +920,7 @@ static int tfileReaderLoadFst(TFileReader* reader) {
}
static
int
tfileReaderLoadTableIds
(
TFileReader
*
reader
,
int32_t
offset
,
SArray
*
result
)
{
// TODO(yihao): opt later
Writer
Ctx
*
ctx
=
reader
->
ctx
;
IFile
Ctx
*
ctx
=
reader
->
ctx
;
// add block cache
char
block
[
4096
]
=
{
0
};
int32_t
nread
=
ctx
->
readFrom
(
ctx
,
block
,
sizeof
(
block
),
offset
);
...
...
@@ -952,7 +953,7 @@ static int tfileReaderLoadTableIds(TFileReader* reader, int32_t offset, SArray*
}
static
int
tfileReaderVerify
(
TFileReader
*
reader
)
{
// just validate header and Footer, file corrupted also shuild be verified later
Writer
Ctx
*
ctx
=
reader
->
ctx
;
IFile
Ctx
*
ctx
=
reader
->
ctx
;
uint64_t
tMagicNumber
=
0
;
...
...
source/libs/index/test/fstTest.cc
浏览文件 @
ba72ce2b
...
...
@@ -7,7 +7,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
...
...
@@ -20,7 +19,7 @@ class FstWriter {
public:
FstWriter
()
{
taosRemoveFile
(
fileName
.
c_str
());
_wc
=
writer
CtxCreate
(
TFile
,
fileName
.
c_str
(),
false
,
64
*
1024
*
1024
);
_wc
=
idxFile
CtxCreate
(
TFile
,
fileName
.
c_str
(),
false
,
64
*
1024
*
1024
);
_b
=
fstBuilderCreate
(
_wc
,
0
);
}
bool
Put
(
const
std
::
string
&
key
,
uint64_t
val
)
{
...
...
@@ -38,25 +37,25 @@ class FstWriter {
fstBuilderFinish
(
_b
);
fstBuilderDestroy
(
_b
);
writer
CtxDestroy
(
_wc
,
false
);
idxFile
CtxDestroy
(
_wc
,
false
);
}
private:
FstBuilder
*
_b
;
WriterCtx
*
_wc
;
IFileCtx
*
_wc
;
};
class
FstReadMemory
{
public:
FstReadMemory
(
int32_t
size
,
const
std
::
string
&
fileName
=
TD_TMP_DIR_PATH
"tindex.tindex"
)
{
_wc
=
writer
CtxCreate
(
TFile
,
fileName
.
c_str
(),
true
,
64
*
1024
);
_w
=
fstCountingWriter
Create
(
_wc
);
_wc
=
idxFile
CtxCreate
(
TFile
,
fileName
.
c_str
(),
true
,
64
*
1024
);
_w
=
idxFile
Create
(
_wc
);
_size
=
size
;
memset
((
void
*
)
&
_s
,
0
,
sizeof
(
_s
));
}
bool
init
()
{
char
*
buf
=
(
char
*
)
taosMemoryCalloc
(
1
,
sizeof
(
char
)
*
_size
);
int
nRead
=
fstCountingWriter
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
int
nRead
=
idxFile
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
if
(
nRead
<=
0
)
{
return
false
;
}
...
...
@@ -141,18 +140,18 @@ class FstReadMemory {
}
~
FstReadMemory
()
{
fstCountingWriter
Destroy
(
_w
);
idxFile
Destroy
(
_w
);
fstDestroy
(
_fst
);
fstSliceDestroy
(
&
_s
);
writer
CtxDestroy
(
_wc
,
false
);
idxFile
CtxDestroy
(
_wc
,
false
);
}
private:
FstCountingWriter
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
WriterCtx
*
_wc
;
int32_t
_size
;
IdxFstFile
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
IFileCtx
*
_wc
;
int32_t
_size
;
};
#define L 100
...
...
source/libs/index/test/fstUT.cc
浏览文件 @
ba72ce2b
...
...
@@ -8,7 +8,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
...
...
@@ -40,7 +39,7 @@ static void EnvCleanup() {}
class
FstWriter
{
public:
FstWriter
()
{
_wc
=
writer
CtxCreate
(
TFile
,
tindex
,
false
,
64
*
1024
*
1024
);
_wc
=
idxFile
CtxCreate
(
TFile
,
tindex
,
false
,
64
*
1024
*
1024
);
_b
=
fstBuilderCreate
(
_wc
,
0
);
}
bool
Put
(
const
std
::
string
&
key
,
uint64_t
val
)
{
...
...
@@ -58,25 +57,25 @@ class FstWriter {
fstBuilderFinish
(
_b
);
fstBuilderDestroy
(
_b
);
writer
CtxDestroy
(
_wc
,
false
);
idxFile
CtxDestroy
(
_wc
,
false
);
}
private:
FstBuilder
*
_b
;
WriterCtx
*
_wc
;
IFileCtx
*
_wc
;
};
class
FstReadMemory
{
public:
FstReadMemory
(
size_t
size
)
{
_wc
=
writer
CtxCreate
(
TFile
,
tindex
,
true
,
64
*
1024
);
_w
=
fstCountingWriter
Create
(
_wc
);
_wc
=
idxFile
CtxCreate
(
TFile
,
tindex
,
true
,
64
*
1024
);
_w
=
idxFile
Create
(
_wc
);
_size
=
size
;
memset
((
void
*
)
&
_s
,
0
,
sizeof
(
_s
));
}
bool
init
()
{
char
*
buf
=
(
char
*
)
taosMemoryCalloc
(
1
,
sizeof
(
char
)
*
_size
);
int
nRead
=
fstCountingWriter
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
int
nRead
=
idxFile
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
if
(
nRead
<=
0
)
{
return
false
;
}
...
...
@@ -130,18 +129,18 @@ class FstReadMemory {
}
~
FstReadMemory
()
{
fstCountingWriter
Destroy
(
_w
);
idxFile
Destroy
(
_w
);
fstDestroy
(
_fst
);
fstSliceDestroy
(
&
_s
);
writer
CtxDestroy
(
_wc
,
false
);
idxFile
CtxDestroy
(
_wc
,
false
);
}
private:
FstCountingWriter
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
WriterCtx
*
_wc
;
size_t
_size
;
IdxFstFile
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
IFileCtx
*
_wc
;
size_t
_size
;
};
class
FstWriterEnv
:
public
::
testing
::
Test
{
...
...
source/libs/index/test/indexTests.cc
浏览文件 @
ba72ce2b
...
...
@@ -20,7 +20,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
...
...
@@ -51,7 +50,7 @@ class DebugInfo {
class
FstWriter
{
public:
FstWriter
()
{
_wc
=
writer
CtxCreate
(
TFile
,
TD_TMP_DIR_PATH
"tindex"
,
false
,
64
*
1024
*
1024
);
_wc
=
idxFile
CtxCreate
(
TFile
,
TD_TMP_DIR_PATH
"tindex"
,
false
,
64
*
1024
*
1024
);
_b
=
fstBuilderCreate
(
NULL
,
0
);
}
bool
Put
(
const
std
::
string
&
key
,
uint64_t
val
)
{
...
...
@@ -64,25 +63,25 @@ class FstWriter {
fstBuilderFinish
(
_b
);
fstBuilderDestroy
(
_b
);
writer
CtxDestroy
(
_wc
,
false
);
idxFile
CtxDestroy
(
_wc
,
false
);
}
private:
FstBuilder
*
_b
;
WriterCtx
*
_wc
;
IFileCtx
*
_wc
;
};
class
FstReadMemory
{
public:
FstReadMemory
(
size_t
size
)
{
_wc
=
writer
CtxCreate
(
TFile
,
TD_TMP_DIR_PATH
"tindex"
,
true
,
64
*
1024
);
_w
=
fstCountingWriter
Create
(
_wc
);
_wc
=
idxFile
CtxCreate
(
TFile
,
TD_TMP_DIR_PATH
"tindex"
,
true
,
64
*
1024
);
_w
=
idxFile
Create
(
_wc
);
_size
=
size
;
memset
((
void
*
)
&
_s
,
0
,
sizeof
(
_s
));
}
bool
init
()
{
char
*
buf
=
(
char
*
)
taosMemoryCalloc
(
1
,
sizeof
(
char
)
*
_size
);
int
nRead
=
fstCountingWriter
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
int
nRead
=
idxFile
Read
(
_w
,
(
uint8_t
*
)
buf
,
_size
);
if
(
nRead
<=
0
)
{
return
false
;
}
...
...
@@ -124,18 +123,18 @@ class FstReadMemory {
}
~
FstReadMemory
()
{
fstCountingWriter
Destroy
(
_w
);
idxFile
Destroy
(
_w
);
fstDestroy
(
_fst
);
fstSliceDestroy
(
&
_s
);
writer
CtxDestroy
(
_wc
,
true
);
idxFile
CtxDestroy
(
_wc
,
true
);
}
private:
FstCountingWriter
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
WriterCtx
*
_wc
;
size_t
_size
;
IdxFstFile
*
_w
;
Fst
*
_fst
;
FstSlice
_s
;
IFileCtx
*
_wc
;
size_t
_size
;
};
#define L 100
...
...
@@ -392,13 +391,13 @@ class TFileObj {
fileName_
=
path
;
WriterCtx
*
ctx
=
writer
CtxCreate
(
TFile
,
path
.
c_str
(),
false
,
64
*
1024
*
1024
);
IFileCtx
*
ctx
=
idxFile
CtxCreate
(
TFile
,
path
.
c_str
(),
false
,
64
*
1024
*
1024
);
writer_
=
tfileWriterCreate
(
ctx
,
&
header
);
return
writer_
!=
NULL
?
true
:
false
;
}
bool
InitReader
()
{
WriterCtx
*
ctx
=
writer
CtxCreate
(
TFile
,
fileName_
.
c_str
(),
true
,
64
*
1024
*
1024
);
IFileCtx
*
ctx
=
idxFile
CtxCreate
(
TFile
,
fileName_
.
c_str
(),
true
,
64
*
1024
*
1024
);
reader_
=
tfileReaderCreate
(
ctx
);
return
reader_
!=
NULL
?
true
:
false
;
}
...
...
source/libs/index/test/jsonUT.cc
浏览文件 @
ba72ce2b
...
...
@@ -7,7 +7,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
...
...
source/libs/index/test/utilUT.cc
浏览文件 @
ba72ce2b
...
...
@@ -8,7 +8,6 @@
#include "indexCache.h"
#include "indexComm.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
...
...
source/libs/nodes/src/nodesUtilFuncs.c
浏览文件 @
ba72ce2b
...
...
@@ -19,8 +19,8 @@
#include "querynodes.h"
#include "taos.h"
#include "taoserror.h"
#include "thash.h"
#include "tdatablock.h"
#include "thash.h"
static
SNode
*
makeNode
(
ENodeType
type
,
size_t
size
)
{
SNode
*
p
=
taosMemoryCalloc
(
1
,
size
);
...
...
@@ -1497,13 +1497,18 @@ typedef struct SCollectFuncsCxt {
int32_t
errCode
;
FFuncClassifier
classifier
;
SNodeList
*
pFuncs
;
SHashObj
*
pAliasName
;
}
SCollectFuncsCxt
;
static
EDealRes
collectFuncs
(
SNode
*
pNode
,
void
*
pContext
)
{
SCollectFuncsCxt
*
pCxt
=
(
SCollectFuncsCxt
*
)
pContext
;
if
(
QUERY_NODE_FUNCTION
==
nodeType
(
pNode
)
&&
pCxt
->
classifier
(((
SFunctionNode
*
)
pNode
)
->
funcId
)
&&
!
(((
SExprNode
*
)
pNode
)
->
orderAlias
))
{
pCxt
->
errCode
=
nodesListStrictAppend
(
pCxt
->
pFuncs
,
nodesCloneNode
(
pNode
));
SExprNode
*
pExpr
=
(
SExprNode
*
)
pNode
;
if
(
NULL
==
taosHashGet
(
pCxt
->
pAliasName
,
pExpr
->
aliasName
,
strlen
(
pExpr
->
aliasName
)))
{
pCxt
->
errCode
=
nodesListStrictAppend
(
pCxt
->
pFuncs
,
nodesCloneNode
(
pNode
));
taosHashPut
(
pCxt
->
pAliasName
,
pExpr
->
aliasName
,
strlen
(
pExpr
->
aliasName
),
&
pExpr
,
POINTER_BYTES
);
}
return
(
TSDB_CODE_SUCCESS
==
pCxt
->
errCode
?
DEAL_RES_IGNORE_CHILD
:
DEAL_RES_ERROR
);
}
return
DEAL_RES_CONTINUE
;
...
...
@@ -1515,23 +1520,27 @@ int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifi
}
SCollectFuncsCxt
cxt
=
{
.
errCode
=
TSDB_CODE_SUCCESS
,
.
classifier
=
classifier
,
.
pFuncs
=
(
NULL
==
*
pFuncs
?
nodesMakeList
()
:
*
pFuncs
)};
.
errCode
=
TSDB_CODE_SUCCESS
,
.
classifier
=
classifier
,
.
pFuncs
=
(
NULL
==
*
pFuncs
?
nodesMakeList
()
:
*
pFuncs
),
.
pAliasName
=
taosHashInit
(
4
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_VARCHAR
),
false
,
false
)};
if
(
NULL
==
cxt
.
pFuncs
)
{
return
TSDB_CODE_OUT_OF_MEMORY
;
}
*
pFuncs
=
NULL
;
nodesWalkSelectStmt
(
pSelect
,
clause
,
collectFuncs
,
&
cxt
);
if
(
TSDB_CODE_SUCCESS
!
=
cxt
.
errCode
)
{
nodesDestroyList
(
cxt
.
pFuncs
);
return
cxt
.
errCode
;
}
if
(
LIST_LENGTH
(
cxt
.
pFuncs
)
>
0
)
{
*
pFuncs
=
cxt
.
pFuncs
;
if
(
TSDB_CODE_SUCCESS
=
=
cxt
.
errCode
)
{
if
(
LIST_LENGTH
(
cxt
.
pFuncs
)
>
0
)
{
*
pFuncs
=
cxt
.
pFuncs
;
}
else
{
nodesDestroyList
(
cxt
.
pFuncs
);
}
}
else
{
nodesDestroyList
(
cxt
.
pFuncs
);
}
taosHashCleanup
(
cxt
.
pAliasName
);
return
TSDB_CODE_SUCCESS
;
return
cxt
.
errCode
;
}
typedef
struct
SCollectSpecialNodesCxt
{
...
...
source/libs/parser/inc/parAst.h
浏览文件 @
ba72ce2b
...
...
@@ -154,6 +154,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_
SNode
*
createAlterTableRenameCol
(
SAstCreateContext
*
pCxt
,
SNode
*
pRealTable
,
int8_t
alterType
,
SToken
*
pOldColName
,
SToken
*
pNewColName
);
SNode
*
createAlterTableSetTag
(
SAstCreateContext
*
pCxt
,
SNode
*
pRealTable
,
SToken
*
pTagName
,
SNode
*
pVal
);
SNode
*
setAlterSuperTableType
(
SNode
*
pStmt
);
SNode
*
createUseDatabaseStmt
(
SAstCreateContext
*
pCxt
,
SToken
*
pDbName
);
SNode
*
createShowStmt
(
SAstCreateContext
*
pCxt
,
ENodeType
type
);
SNode
*
createShowStmtWithCond
(
SAstCreateContext
*
pCxt
,
ENodeType
type
,
SNode
*
pDbName
,
SNode
*
pTbName
,
...
...
source/libs/parser/inc/parUtil.h
浏览文件 @
ba72ce2b
...
...
@@ -53,6 +53,7 @@ typedef struct SParseMetaCache {
}
SParseMetaCache
;
int32_t
generateSyntaxErrMsg
(
SMsgBuf
*
pBuf
,
int32_t
errCode
,
...);
int32_t
generateSyntaxErrMsgExt
(
SMsgBuf
*
pBuf
,
int32_t
errCode
,
const
char
*
pFormat
,
...);
int32_t
buildInvalidOperationMsg
(
SMsgBuf
*
pMsgBuf
,
const
char
*
msg
);
int32_t
buildSyntaxErrMsg
(
SMsgBuf
*
pBuf
,
const
char
*
additionalInfo
,
const
char
*
sourceStr
);
...
...
source/libs/parser/inc/sql.y
浏览文件 @
ba72ce2b
...
...
@@ -232,7 +232,7 @@ cmd ::= DROP TABLE multi_drop_clause(A).
cmd ::= DROP STABLE exists_opt(A) full_table_name(B). { pCxt->pRootNode = createDropSuperTableStmt(pCxt, A, B); }
cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; }
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode =
A
; }
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode =
setAlterSuperTableType(A)
; }
alter_table_clause(A) ::= full_table_name(B) alter_table_options(C). { A = createAlterTableModifyOptions(pCxt, B, C); }
alter_table_clause(A) ::=
...
...
@@ -259,7 +259,7 @@ multi_create_clause(A) ::= multi_create_clause(B) create_subtable_clause(C).
create_subtable_clause(A) ::=
not_exists_opt(B) full_table_name(C) USING full_table_name(D)
specific_tags_opt(E) TAGS NK_LP
literal_list(F) NK_RP table_options(G).
{ A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); }
specific_tags_opt(E) TAGS NK_LP
expression_list(F) NK_RP table_options(G).
{ A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); }
%type multi_drop_clause { SNodeList* }
%destructor multi_drop_clause { nodesDestroyList($$); }
...
...
source/libs/parser/src/parAstCreater.c
浏览文件 @
ba72ce2b
...
...
@@ -1127,6 +1127,11 @@ SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken
return
createAlterTableStmtFinalize
(
pRealTable
,
pStmt
);
}
SNode
*
setAlterSuperTableType
(
SNode
*
pStmt
)
{
setNodeType
(
pStmt
,
QUERY_NODE_ALTER_SUPER_TABLE_STMT
);
return
pStmt
;
}
SNode
*
createUseDatabaseStmt
(
SAstCreateContext
*
pCxt
,
SToken
*
pDbName
)
{
CHECK_PARSER_STATUS
(
pCxt
);
if
(
!
checkDbName
(
pCxt
,
pDbName
,
false
))
{
...
...
source/libs/parser/src/parAstParser.c
浏览文件 @
ba72ce2b
...
...
@@ -247,6 +247,10 @@ static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTabl
return
code
;
}
static
int32_t
collectMetaKeyFromAlterStable
(
SCollectMetaKeyCxt
*
pCxt
,
SAlterTableStmt
*
pStmt
)
{
return
reserveTableMetaInCache
(
pCxt
->
pParseCxt
->
acctId
,
pStmt
->
dbName
,
pStmt
->
tableName
,
pCxt
->
pMetaCache
);
}
static
int32_t
collectMetaKeyFromUseDatabase
(
SCollectMetaKeyCxt
*
pCxt
,
SUseDatabaseStmt
*
pStmt
)
{
return
reserveDbVgVersionInCache
(
pCxt
->
pParseCxt
->
acctId
,
pStmt
->
dbName
,
pCxt
->
pMetaCache
);
}
...
...
@@ -483,6 +487,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return
collectMetaKeyFromDropTable
(
pCxt
,
(
SDropTableStmt
*
)
pStmt
);
case
QUERY_NODE_ALTER_TABLE_STMT
:
return
collectMetaKeyFromAlterTable
(
pCxt
,
(
SAlterTableStmt
*
)
pStmt
);
case
QUERY_NODE_ALTER_SUPER_TABLE_STMT
:
return
collectMetaKeyFromAlterStable
(
pCxt
,
(
SAlterTableStmt
*
)
pStmt
);
case
QUERY_NODE_USE_DATABASE_STMT
:
return
collectMetaKeyFromUseDatabase
(
pCxt
,
(
SUseDatabaseStmt
*
)
pStmt
);
case
QUERY_NODE_CREATE_INDEX_STMT
:
...
...
source/libs/parser/src/parInsert.c
浏览文件 @
ba72ce2b
...
...
@@ -48,6 +48,12 @@
pSql += sToken.n; \
} while (TK_NK_SPACE == sToken.type)
typedef
struct
SInsertParseBaseContext
{
SParseContext
*
pComCxt
;
char
*
pSql
;
SMsgBuf
msg
;
}
SInsertParseBaseContext
;
typedef
struct
SInsertParseContext
{
SParseContext
*
pComCxt
;
// input
char
*
pSql
;
// input
...
...
@@ -1105,6 +1111,32 @@ static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName*
return
taosHashPut
(
pHash
,
pName
,
len
,
&
pBackup
,
POINTER_BYTES
);
}
static
int32_t
skipParentheses
(
SInsertParseSyntaxCxt
*
pCxt
)
{
SToken
sToken
;
int32_t
expectRightParenthesis
=
1
;
while
(
1
)
{
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
if
(
TK_NK_LP
==
sToken
.
type
)
{
++
expectRightParenthesis
;
}
else
if
(
TK_NK_RP
==
sToken
.
type
&&
0
==
--
expectRightParenthesis
)
{
break
;
}
if
(
0
==
sToken
.
n
)
{
return
buildSyntaxErrMsg
(
&
pCxt
->
msg
,
") expected"
,
NULL
);
}
}
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
skipBoundColumns
(
SInsertParseSyntaxCxt
*
pCxt
)
{
return
skipParentheses
(
pCxt
);
}
static
int32_t
ignoreBoundColumns
(
SInsertParseContext
*
pCxt
)
{
SInsertParseSyntaxCxt
cxt
=
{.
pComCxt
=
pCxt
->
pComCxt
,
.
pSql
=
pCxt
->
pSql
,
.
msg
=
pCxt
->
msg
,
.
pMetaCache
=
NULL
};
int32_t
code
=
skipBoundColumns
(
&
cxt
);
pCxt
->
pSql
=
cxt
.
pSql
;
return
code
;
}
static
int32_t
skipUsingClause
(
SInsertParseSyntaxCxt
*
pCxt
);
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
...
...
@@ -1453,12 +1485,29 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
tNameGetFullDbName
(
&
name
,
dbFName
);
CHECK_CODE
(
taosHashPut
(
pCxt
->
pDbFNameHashObj
,
dbFName
,
strlen
(
dbFName
),
dbFName
,
sizeof
(
dbFName
)));
bool
existedUsing
=
false
;
// USING clause
if
(
TK_USING
==
sToken
.
type
)
{
existedUsing
=
true
;
CHECK_CODE
(
parseUsingClause
(
pCxt
,
&
name
,
tbFName
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
autoCreateTbl
=
true
;
}
else
{
}
char
*
pBoundColsStart
=
NULL
;
if
(
TK_NK_LP
==
sToken
.
type
)
{
// pSql -> field1_name, ...)
pBoundColsStart
=
pCxt
->
pSql
;
CHECK_CODE
(
ignoreBoundColumns
(
pCxt
));
// CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
}
if
(
TK_USING
==
sToken
.
type
)
{
CHECK_CODE
(
parseUsingClause
(
pCxt
,
&
name
,
tbFName
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
autoCreateTbl
=
true
;
}
else
if
(
!
existedUsing
)
{
CHECK_CODE
(
getTableMeta
(
pCxt
,
&
name
,
dbFName
));
}
...
...
@@ -1467,10 +1516,11 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
sizeof
(
SSubmitBlk
),
getTableInfo
(
pCxt
->
pTableMeta
).
rowSize
,
pCxt
->
pTableMeta
,
&
dataBuf
,
NULL
,
&
pCxt
->
createTblReq
));
if
(
TK_NK_LP
==
sToken
.
type
)
{
// pSql -> field1_name, ...)
if
(
NULL
!=
pBoundColsStart
)
{
char
*
pCurrPos
=
pCxt
->
pSql
;
pCxt
->
pSql
=
pBoundColsStart
;
CHECK_CODE
(
parseBoundColumns
(
pCxt
,
&
dataBuf
->
boundColumnInfo
,
getTableColumnSchema
(
pCxt
->
pTableMeta
)));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
)
;
pCxt
->
pSql
=
pCurrPos
;
}
if
(
TK_VALUES
==
sToken
.
type
)
{
...
...
@@ -1610,25 +1660,6 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
return
code
;
}
static
int32_t
skipParentheses
(
SInsertParseSyntaxCxt
*
pCxt
)
{
SToken
sToken
;
int32_t
expectRightParenthesis
=
1
;
while
(
1
)
{
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
if
(
TK_NK_LP
==
sToken
.
type
)
{
++
expectRightParenthesis
;
}
else
if
(
TK_NK_RP
==
sToken
.
type
&&
0
==
--
expectRightParenthesis
)
{
break
;
}
if
(
0
==
sToken
.
n
)
{
return
buildSyntaxErrMsg
(
&
pCxt
->
msg
,
") expected"
,
NULL
);
}
}
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
skipBoundColumns
(
SInsertParseSyntaxCxt
*
pCxt
)
{
return
skipParentheses
(
pCxt
);
}
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
static
int32_t
skipValuesClause
(
SInsertParseSyntaxCxt
*
pCxt
)
{
int32_t
numOfRows
=
0
;
...
...
@@ -1717,15 +1748,15 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
SToken
tbnameToken
=
sToken
;
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
bool
existedUsing
=
false
;
// USING clause
if
(
TK_USING
==
sToken
.
type
)
{
existedUsing
=
true
;
CHECK_CODE
(
collectAutoCreateTableMetaKey
(
pCxt
,
&
tbnameToken
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
CHECK_CODE
(
collectTableMetaKey
(
pCxt
,
&
sToken
));
CHECK_CODE
(
skipUsingClause
(
pCxt
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
}
else
{
CHECK_CODE
(
collectTableMetaKey
(
pCxt
,
&
tbnameToken
));
}
if
(
TK_NK_LP
==
sToken
.
type
)
{
...
...
@@ -1734,6 +1765,17 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
}
if
(
TK_USING
==
sToken
.
type
&&
!
existedUsing
)
{
existedUsing
=
true
;
CHECK_CODE
(
collectAutoCreateTableMetaKey
(
pCxt
,
&
tbnameToken
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
CHECK_CODE
(
collectTableMetaKey
(
pCxt
,
&
sToken
));
CHECK_CODE
(
skipUsingClause
(
pCxt
));
NEXT_TOKEN
(
pCxt
->
pSql
,
sToken
);
}
else
{
CHECK_CODE
(
collectTableMetaKey
(
pCxt
,
&
tbnameToken
));
}
if
(
TK_VALUES
==
sToken
.
type
)
{
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE
(
skipValuesClause
(
pCxt
));
...
...
source/libs/parser/src/parTranslater.c
浏览文件 @
ba72ce2b
此差异已折叠。
点击以展开。
source/libs/parser/src/parUtil.c
浏览文件 @
ba72ce2b
...
...
@@ -215,13 +215,21 @@ int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...) {
return
errCode
;
}
int32_t
generateSyntaxErrMsgExt
(
SMsgBuf
*
pBuf
,
int32_t
errCode
,
const
char
*
pFormat
,
...)
{
va_list
vArgList
;
va_start
(
vArgList
,
pFormat
);
vsnprintf
(
pBuf
->
buf
,
pBuf
->
len
,
pFormat
,
vArgList
);
va_end
(
vArgList
);
return
errCode
;
}
int32_t
buildInvalidOperationMsg
(
SMsgBuf
*
pBuf
,
const
char
*
msg
)
{
strncpy
(
pBuf
->
buf
,
msg
,
pBuf
->
len
);
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
int32_t
buildSyntaxErrMsg
(
SMsgBuf
*
pBuf
,
const
char
*
additionalInfo
,
const
char
*
sourceStr
)
{
if
(
pBuf
==
NULL
)
return
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
if
(
pBuf
==
NULL
)
return
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
const
char
*
msgFormat1
=
"syntax error near
\'
%s
\'
"
;
const
char
*
msgFormat2
=
"syntax error near
\'
%s
\'
(%s)"
;
const
char
*
msgFormat3
=
"%s"
;
...
...
source/libs/parser/src/sql.c
浏览文件 @
ba72ce2b
此差异已折叠。
点击以展开。
source/libs/parser/test/parInitialATest.cpp
浏览文件 @
ba72ce2b
...
...
@@ -77,8 +77,6 @@ TEST_F(ParserInitialATest, alterLocal) {
clearAlterLocal
();
}
// todo ALTER stable
/*
* ALTER TABLE [db_name.]tb_name alter_table_clause
*
...
...
@@ -157,7 +155,7 @@ TEST_F(ParserInitialATest, alterSTable) {
};
setCheckDdlFunc
([
&
](
const
SQuery
*
pQuery
,
ParserStage
stage
)
{
ASSERT_EQ
(
nodeType
(
pQuery
->
pRoot
),
QUERY_NODE_ALTER_TABLE_STMT
);
ASSERT_EQ
(
nodeType
(
pQuery
->
pRoot
),
QUERY_NODE_ALTER_
SUPER_
TABLE_STMT
);
SMAlterStbReq
req
=
{
0
};
ASSERT_EQ
(
tDeserializeSMAlterStbReq
(
pQuery
->
pCmdMsg
->
pMsg
,
pQuery
->
pCmdMsg
->
msgLen
,
&
req
),
TSDB_CODE_SUCCESS
);
ASSERT_EQ
(
std
::
string
(
req
.
name
),
std
::
string
(
expect
.
name
));
...
...
@@ -181,44 +179,44 @@ TEST_F(ParserInitialATest, alterSTable) {
});
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
// run("ALTER TABLE st1 TTL 10");
// run("ALTER
S
TABLE st1 TTL 10");
// clearAlterStbReq();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_UPDATE_OPTIONS
,
0
,
nullptr
,
0
,
0
,
nullptr
,
"test"
);
run
(
"ALTER TABLE st1 COMMENT 'test'"
);
run
(
"ALTER
S
TABLE st1 COMMENT 'test'"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_ADD_COLUMN
,
1
,
"cc1"
,
TSDB_DATA_TYPE_BIGINT
);
run
(
"ALTER TABLE st1 ADD COLUMN cc1 BIGINT"
);
run
(
"ALTER
S
TABLE st1 ADD COLUMN cc1 BIGINT"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_DROP_COLUMN
,
1
,
"c1"
);
run
(
"ALTER TABLE st1 DROP COLUMN c1"
);
run
(
"ALTER
S
TABLE st1 DROP COLUMN c1"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
,
1
,
"c2"
,
TSDB_DATA_TYPE_VARCHAR
,
30
+
VARSTR_HEADER_SIZE
);
run
(
"ALTER TABLE st1 MODIFY COLUMN c2 VARCHAR(30)"
);
run
(
"ALTER
S
TABLE st1 MODIFY COLUMN c2 VARCHAR(30)"
);
clearAlterStbReq
();
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
// run("ALTER TABLE st1 RENAME COLUMN c1 cc1");
// run("ALTER
S
TABLE st1 RENAME COLUMN c1 cc1");
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_ADD_TAG
,
1
,
"tag11"
,
TSDB_DATA_TYPE_BIGINT
);
run
(
"ALTER TABLE st1 ADD TAG tag11 BIGINT"
);
run
(
"ALTER
S
TABLE st1 ADD TAG tag11 BIGINT"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_DROP_TAG
,
1
,
"tag1"
);
run
(
"ALTER TABLE st1 DROP TAG tag1"
);
run
(
"ALTER
S
TABLE st1 DROP TAG tag1"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES
,
1
,
"tag2"
,
TSDB_DATA_TYPE_VARCHAR
,
30
+
VARSTR_HEADER_SIZE
);
run
(
"ALTER TABLE st1 MODIFY TAG tag2 VARCHAR(30)"
);
run
(
"ALTER
S
TABLE st1 MODIFY TAG tag2 VARCHAR(30)"
);
clearAlterStbReq
();
setAlterStbReqFunc
(
"st1"
,
TSDB_ALTER_TABLE_UPDATE_TAG_NAME
,
2
,
"tag1"
,
0
,
0
,
"tag11"
);
run
(
"ALTER TABLE st1 RENAME TAG tag1 tag11"
);
run
(
"ALTER
S
TABLE st1 RENAME TAG tag1 tag11"
);
clearAlterStbReq
();
// todo
...
...
@@ -228,11 +226,11 @@ TEST_F(ParserInitialATest, alterSTable) {
TEST_F
(
ParserInitialATest
,
alterSTableSemanticCheck
)
{
useDb
(
"root"
,
"test"
);
run
(
"ALTER TABLE st1 RENAME COLUMN c1 cc1"
,
TSDB_CODE_PAR_INVALID_ALTER_TABLE
);
run
(
"ALTER
S
TABLE st1 RENAME COLUMN c1 cc1"
,
TSDB_CODE_PAR_INVALID_ALTER_TABLE
);
run
(
"ALTER TABLE st1 MODIFY COLUMN c2 NCHAR(10)"
,
TSDB_CODE_PAR_INVALID_MODIFY_COL
);
run
(
"ALTER
S
TABLE st1 MODIFY COLUMN c2 NCHAR(10)"
,
TSDB_CODE_PAR_INVALID_MODIFY_COL
);
run
(
"ALTER TABLE st1 MODIFY TAG tag2 NCHAR(10)"
,
TSDB_CODE_PAR_INVALID_MODIFY_COL
);
run
(
"ALTER
S
TABLE st1 MODIFY TAG tag2 NCHAR(10)"
,
TSDB_CODE_PAR_INVALID_MODIFY_COL
);
}
TEST_F
(
ParserInitialATest
,
alterTable
)
{
...
...
source/libs/planner/src/planOptimizer.c
浏览文件 @
ba72ce2b
...
...
@@ -720,7 +720,7 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
// TODO: remove it after full implementation of pushing down to child
if
(
1
!=
LIST_LENGTH
(
pAgg
->
node
.
pChildren
)
||
QUERY_NODE_LOGIC_PLAN_SCAN
!=
nodeType
(
nodesListGetNode
(
pAgg
->
node
.
pChildren
,
0
))
&&
QUERY_NODE_LOGIC_PLAN_PROJECT
!=
nodeType
(
nodesListGetNode
(
pAgg
->
node
.
pChildren
,
0
)))
{
QUERY_NODE_LOGIC_PLAN_PROJECT
!=
nodeType
(
nodesListGetNode
(
pAgg
->
node
.
pChildren
,
0
)))
{
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -1251,7 +1251,7 @@ static SNode* partTagsCreateWrapperFunc(const char* pFuncName, SNode* pNode) {
}
strcpy
(
pFunc
->
functionName
,
pFuncName
);
if
(
QUERY_NODE_COLUMN
==
nodeType
(
pNode
))
{
if
(
QUERY_NODE_COLUMN
==
nodeType
(
pNode
)
&&
COLUMN_TYPE_TBNAME
!=
((
SColumnNode
*
)
pNode
)
->
colType
)
{
SColumnNode
*
pCol
=
(
SColumnNode
*
)
pNode
;
partTagsSetAlias
(
pFunc
->
node
.
aliasName
,
sizeof
(
pFunc
->
node
.
aliasName
),
pCol
->
tableAlias
,
pCol
->
colName
);
}
else
{
...
...
@@ -1868,6 +1868,8 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) {
pCxt
->
errCode
=
terrno
;
return
DEAL_RES_ERROR
;
}
snprintf
(((
SExprNode
*
)
pExpr
)
->
aliasName
,
sizeof
(((
SExprNode
*
)
pExpr
)
->
aliasName
),
"%s"
,
((
SExprNode
*
)
*
pNode
)
->
aliasName
);
nodesDestroyNode
(
*
pNode
);
*
pNode
=
pExpr
;
}
...
...
source/libs/planner/src/planSpliter.c
浏览文件 @
ba72ce2b
...
...
@@ -986,6 +986,10 @@ static bool unionIsChildSubplan(SLogicNode* pLogicNode, int32_t groupId) {
return
((
SExchangeLogicNode
*
)
pLogicNode
)
->
srcGroupId
==
groupId
;
}
if
(
QUERY_NODE_LOGIC_PLAN_MERGE
==
nodeType
(
pLogicNode
))
{
return
((
SMergeLogicNode
*
)
pLogicNode
)
->
srcGroupId
==
groupId
;
}
SNode
*
pChild
;
FOREACH
(
pChild
,
pLogicNode
->
pChildren
)
{
bool
isChild
=
unionIsChildSubplan
((
SLogicNode
*
)
pChild
,
groupId
);
...
...
source/libs/planner/test/planOptimizeTest.cpp
浏览文件 @
ba72ce2b
...
...
@@ -68,6 +68,8 @@ TEST_F(PlanOptimizeTest, PartitionTags) {
run
(
"SELECT SUM(c1), tag1 FROM st1 GROUP BY tag1"
);
run
(
"SELECT SUM(c1), tag1 + 10 FROM st1 GROUP BY tag1 + 10"
);
run
(
"SELECT SUM(c1), tbname FROM st1 GROUP BY tbname"
);
}
TEST_F
(
PlanOptimizeTest
,
eliminateProjection
)
{
...
...
source/libs/planner/test/planSetOpTest.cpp
浏览文件 @
ba72ce2b
...
...
@@ -97,7 +97,15 @@ TEST_F(PlanSetOpTest, unionSubquery) {
run
(
"SELECT * FROM (SELECT c1, c2 FROM t1 UNION SELECT c1, c2 FROM t1)"
);
}
TEST_F
(
PlanSetOpTest
,
bug001
)
{
TEST_F
(
PlanSetOpTest
,
unionWithSubquery
)
{
useDb
(
"root"
,
"test"
);
run
(
"SELECT c1 FROM (SELECT c1 FROM st1) UNION SELECT c2 FROM (SELECT c1 AS c2 FROM st2)"
);
run
(
"SELECT c1 FROM (SELECT c1 FROM st1 ORDER BY c2) UNION SELECT c1 FROM (SELECT c1 FROM st2)"
);
}
TEST_F
(
PlanSetOpTest
,
unionDataTypeConversion
)
{
useDb
(
"root"
,
"test"
);
run
(
"SELECT c2 FROM t1 WHERE c1 IS NOT NULL GROUP BY c2 "
...
...
source/libs/transport/src/transComm.c
浏览文件 @
ba72ce2b
...
...
@@ -479,6 +479,10 @@ bool transEpSetIsEqual(SEpSet* a, SEpSet* b) {
}
return
true
;
}
static
int32_t
transGetRefMgt
()
{
//
return
refMgt
;
}
static
void
transInitEnv
()
{
refMgt
=
transOpenExHandleMgt
(
50000
);
...
...
@@ -486,8 +490,9 @@ static void transInitEnv() {
}
static
void
transDestroyEnv
()
{
// close ref
transCloseExHandleMgt
(
refMgt
);
transCloseExHandleMgt
();
}
void
transInit
()
{
// init env
taosThreadOnce
(
&
transModuleInit
,
transInitEnv
);
...
...
@@ -502,25 +507,25 @@ int32_t transOpenExHandleMgt(int size) {
}
void
transCloseExHandleMgt
()
{
// close ref
taosCloseRef
(
refMgt
);
taosCloseRef
(
transGetRefMgt
()
);
}
int64_t
transAddExHandle
(
void
*
p
)
{
// acquire extern handle
return
taosAddRef
(
refMgt
,
p
);
return
taosAddRef
(
transGetRefMgt
()
,
p
);
}
int32_t
transRemoveExHandle
(
int64_t
refId
)
{
// acquire extern handle
return
taosRemoveRef
(
refMgt
,
refId
);
return
taosRemoveRef
(
transGetRefMgt
()
,
refId
);
}
SExHandle
*
transAcquireExHandle
(
int64_t
refId
)
{
// acquire extern handle
return
(
SExHandle
*
)
taosAcquireRef
(
refMgt
,
refId
);
return
(
SExHandle
*
)
taosAcquireRef
(
transGetRefMgt
()
,
refId
);
}
int32_t
transReleaseExHandle
(
int64_t
refId
)
{
// release extern handle
return
taosReleaseRef
(
refMgt
,
refId
);
return
taosReleaseRef
(
transGetRefMgt
()
,
refId
);
}
void
transDestoryExHandle
(
void
*
handle
)
{
if
(
handle
==
NULL
)
{
...
...
source/util/src/terror.c
浏览文件 @
ba72ce2b
...
...
@@ -598,6 +598,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TQ_NO_COMMITTED_OFFSET, "No committed offset
TAOS_DEFINE_ERROR
(
TSDB_CODE_INDEX_REBUILDING
,
"Index is rebuilding"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_INDEX_REBUILDING
,
"Invalid index file"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TMQ_INVALID_MSG
,
"Invalid message"
)
...
...
tests/script/tsim/insert/update0.sim
浏览文件 @
ba72ce2b
...
...
@@ -8,8 +8,8 @@ print =============== create database
sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table
and register rsma
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20))
rollup(min)
;
print =============== create super table
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20));
sql show stables
if $rows != 1 then
...
...
tests/system-test/1-insert/block_wise.py
0 → 100644
浏览文件 @
ba72ce2b
import
datetime
import
re
from
dataclasses
import
dataclass
,
field
from
typing
import
List
,
Any
,
Tuple
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
from
util.constant
import
*
PRIMARY_COL
=
"ts"
INT_COL
=
"c_int"
BINT_COL
=
"c_bint"
SINT_COL
=
"c_sint"
TINT_COL
=
"c_tint"
FLOAT_COL
=
"c_float"
DOUBLE_COL
=
"c_double"
BOOL_COL
=
"c_bool"
TINT_UN_COL
=
"c_utint"
SINT_UN_COL
=
"c_usint"
BINT_UN_COL
=
"c_ubint"
INT_UN_COL
=
"c_uint"
BINARY_COL
=
"c_binary"
NCHAR_COL
=
"c_nchar"
TS_COL
=
"c_ts"
NUM_COL
=
[
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
]
CHAR_COL
=
[
BINARY_COL
,
NCHAR_COL
,
]
BOOLEAN_COL
=
[
BOOL_COL
,
]
TS_TYPE_COL
=
[
TS_COL
,
]
INT_TAG
=
"t_int"
ALL_COL
=
[
PRIMARY_COL
,
INT_COL
,
BINT_COL
,
SINT_COL
,
TINT_COL
,
FLOAT_COL
,
DOUBLE_COL
,
BINARY_COL
,
NCHAR_COL
,
BOOL_COL
,
TS_COL
]
TAG_COL
=
[
INT_TAG
]
# insert data args:
TIME_STEP
=
10000
NOW
=
int
(
datetime
.
datetime
.
timestamp
(
datetime
.
datetime
.
now
())
*
1000
)
# init db/table
DBNAME
=
"db"
STBNAME
=
"stb1"
CTBNAME
=
"ct1"
NTBNAME
=
"nt1"
@
dataclass
class
DataSet
:
ts_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
int_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
bint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
sint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
tint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
int_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
bint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
sint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
tint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
float_data
:
List
[
float
]
=
field
(
default_factory
=
list
)
double_data
:
List
[
float
]
=
field
(
default_factory
=
list
)
bool_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
binary_data
:
List
[
str
]
=
field
(
default_factory
=
list
)
nchar_data
:
List
[
str
]
=
field
(
default_factory
=
list
)
@
dataclass
class
BSMAschema
:
creation
:
str
=
"CREATE"
tb_type
:
str
=
"stable"
tbname
:
str
=
STBNAME
cols
:
Tuple
[
str
]
=
None
tags
:
Tuple
[
str
]
=
None
sma_flag
:
str
=
"SMA"
sma_cols
:
Tuple
[
str
]
=
None
create_tabel_sql
:
str
=
None
other
:
Any
=
None
drop
:
str
=
"DROP"
drop_flag
:
str
=
"INDEX"
querySmaOptimize
:
int
=
1
show
:
str
=
"SHOW"
show_msg
:
str
=
"INDEXES"
show_oper
:
str
=
"FROM"
dbname
:
str
=
None
rollup_db
:
bool
=
False
def
__post_init__
(
self
):
if
isinstance
(
self
.
other
,
dict
):
for
k
,
v
in
self
.
other
.
items
():
if
k
.
lower
()
==
"tbname"
and
isinstance
(
v
,
str
)
and
not
self
.
tbname
:
self
.
tbname
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"cols"
and
(
isinstance
(
v
,
tuple
)
or
isinstance
(
v
,
list
))
and
not
self
.
cols
:
self
.
cols
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"tags"
and
(
isinstance
(
v
,
tuple
)
or
isinstance
(
v
,
list
))
and
not
self
.
tags
:
self
.
tags
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"sma_flag"
and
isinstance
(
v
,
str
)
and
not
self
.
sma_flag
:
self
.
sma_flag
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"sma_cols"
and
(
isinstance
(
v
,
tuple
)
or
isinstance
(
v
,
list
))
and
not
self
.
sma_cols
:
self
.
sma_cols
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"create_tabel_sql"
and
isinstance
(
v
,
str
)
and
not
self
.
create_tabel_sql
:
self
.
create_tabel_sql
=
v
del
self
.
other
[
k
]
# bSma show and drop operator is not completed
if
k
.
lower
()
==
"drop_flag"
and
isinstance
(
v
,
str
)
and
not
self
.
drop_flag
:
self
.
drop_flag
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"show_msg"
and
isinstance
(
v
,
str
)
and
not
self
.
show_msg
:
self
.
show_msg
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"dbname"
and
isinstance
(
v
,
str
)
and
not
self
.
dbname
:
self
.
dbname
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"show_oper"
and
isinstance
(
v
,
str
)
and
not
self
.
show_oper
:
self
.
show_oper
=
v
del
self
.
other
[
k
]
if
k
.
lower
()
==
"rollup_db"
and
isinstance
(
v
,
bool
)
and
not
self
.
rollup_db
:
self
.
rollup_db
=
v
del
self
.
other
[
k
]
# from ...pytest.util.sql import *
# from ...pytest.util.constant import *
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
False
)
self
.
precision
=
"ms"
self
.
sma_count
=
0
self
.
sma_created_index
=
[]
def
__create_sma_index
(
self
,
sma
:
BSMAschema
):
if
sma
.
create_tabel_sql
:
sql
=
sma
.
create_tabel_sql
else
:
sql
=
f
"
{
sma
.
creation
}
{
sma
.
tb_type
}
{
sma
.
tbname
}
(
{
', '
.
join
(
sma
.
cols
)
}
) "
if
sma
.
tb_type
==
"stable"
or
(
sma
.
tb_type
==
"table"
and
sma
.
tags
):
sql
=
f
"
{
sma
.
creation
}
{
sma
.
tb_type
}
{
sma
.
tbname
}
(
{
', '
.
join
(
sma
.
cols
)
}
) tags (
{
', '
.
join
(
sma
.
tags
)
}
) "
if
sma
.
sma_flag
:
sql
+=
sma
.
sma_flag
if
sma
.
sma_cols
:
sql
+=
f
"(
{
', '
.
join
(
sma
.
sma_cols
)
}
)"
if
isinstance
(
sma
.
other
,
dict
):
for
k
,
v
in
sma
.
other
.
items
():
if
isinstance
(
v
,
tuple
)
or
isinstance
(
v
,
list
):
sql
+=
f
"
{
k
}
(
{
' '
.
join
(
v
)
}
)"
else
:
sql
+=
f
"
{
k
}
{
v
}
"
if
isinstance
(
sma
.
other
,
tuple
)
or
isinstance
(
sma
.
other
,
list
):
sql
+=
" "
.
join
(
sma
.
other
)
if
isinstance
(
sma
.
other
,
int
)
or
isinstance
(
sma
.
other
,
float
)
or
isinstance
(
sma
.
other
,
str
):
sql
+=
f
"
{
sma
.
other
}
"
return
sql
def
__get_bsma_table_col_tag_str
(
self
,
sql
:
str
):
p
=
re
.
compile
(
r
"[(](.*)[)]"
,
re
.
S
)
if
"tags"
in
(
col_str
:
=
sql
):
col_str
=
re
.
findall
(
p
,
sql
.
split
(
"tags"
)[
0
])[
0
].
split
(
","
)
if
(
tag_str
:
=
re
.
findall
(
p
,
sql
.
split
(
"tags"
)[
1
])[
0
].
split
(
","
)
):
col_str
.
extend
(
tag_str
)
return
col_str
def
__get_bsma_col_tag_names
(
self
,
col_tags
:
list
):
return
[
col_tag
.
strip
().
split
(
" "
)[
0
]
for
col_tag
in
col_tags
]
@
property
def
__get_db_tbname
(
self
):
tb_list
=
[]
tdSql
.
query
(
"show tables"
)
for
row
in
tdSql
.
queryResult
:
tb_list
.
append
(
row
[
0
])
tdSql
.
query
(
"show tables"
)
for
row
in
tdSql
.
queryResult
:
tb_list
.
append
(
row
[
0
])
return
tb_list
def
__bsma_create_check
(
self
,
sma
:
BSMAschema
):
if
not
sma
.
creation
:
return
False
if
not
sma
.
create_tabel_sql
and
(
not
sma
.
tbname
or
not
sma
.
tb_type
or
not
sma
.
cols
):
return
False
if
not
sma
.
create_tabel_sql
and
(
sma
.
tb_type
==
"stable"
and
not
sma
.
tags
):
return
False
if
not
sma
.
sma_flag
or
not
isinstance
(
sma
.
sma_flag
,
str
)
or
sma
.
sma_flag
.
upper
()
!=
"SMA"
:
return
False
if
sma
.
tbname
in
self
.
__get_db_tbname
:
return
False
if
sma
.
create_tabel_sql
:
col_tag_list
=
self
.
__get_bsma_col_tag_names
(
self
.
__get_bsma_table_col_tag_str
(
sma
.
create_tabel_sql
))
else
:
col_str
=
list
(
sma
.
cols
)
if
sma
.
tags
:
col_str
.
extend
(
list
(
sma
.
tags
))
col_tag_list
=
self
.
__get_bsma_col_tag_names
(
col_str
)
if
not
sma
.
sma_cols
:
return
False
for
col
in
sma
.
sma_cols
:
if
col
not
in
col_tag_list
:
return
False
return
True
def
bsma_create_check
(
self
,
sma
:
BSMAschema
):
if
self
.
__bsma_create_check
(
sma
):
tdSql
.
query
(
self
.
__create_sma_index
(
sma
))
tdLog
.
info
(
f
"current sql:
{
self
.
__create_sma_index
(
sma
)
}
"
)
else
:
tdSql
.
error
(
self
.
__create_sma_index
(
sma
))
def
__sma_drop_check
(
self
,
sma
:
BSMAschema
):
pass
def
sma_drop_check
(
self
,
sma
:
BSMAschema
):
pass
def
__show_sma_index
(
self
,
sma
:
BSMAschema
):
pass
def
__sma_show_check
(
self
,
sma
:
BSMAschema
):
pass
def
sma_show_check
(
self
,
sma
:
BSMAschema
):
pass
@
property
def
__create_sma_sql
(
self
):
err_sqls
=
[]
cur_sqls
=
[]
# err_set
### case 1: required fields check
err_sqls
.
append
(
BSMAschema
(
creation
=
""
,
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
err_sqls
.
append
(
BSMAschema
(
tbname
=
""
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
err_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
err_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(),
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
err_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_flag
=
""
,
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
err_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
()
)
)
### case 2:
err_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
({
BINT_COL
})
)
)
# current_set
cur_sqls
.
append
(
BSMAschema
(
tbname
=
"stb2"
,
cols
=
(
f
"
{
PRIMARY_COL
}
timestamp"
,
f
"
{
INT_COL
}
int"
),
tags
=
(
f
"
{
INT_TAG
}
int"
,),
sma_cols
=
(
PRIMARY_COL
,
INT_COL
)
)
)
return
err_sqls
,
cur_sqls
def
test_create_sma
(
self
):
err_sqls
,
cur_sqls
=
self
.
__create_sma_sql
for
err_sql
in
err_sqls
:
self
.
bsma_create_check
(
err_sql
)
for
cur_sql
in
cur_sqls
:
self
.
bsma_create_check
(
cur_sql
)
@
property
def
__drop_sma_sql
(
self
):
err_sqls
=
[]
cur_sqls
=
[]
# err_set
## case 1: required fields check
return
err_sqls
,
cur_sqls
def
test_drop_sma
(
self
):
err_sqls
,
cur_sqls
=
self
.
__drop_sma_sql
for
err_sql
in
err_sqls
:
self
.
sma_drop_check
(
err_sql
)
for
cur_sql
in
cur_sqls
:
self
.
sma_drop_check
(
cur_sql
)
def
all_test
(
self
):
self
.
test_create_sma
()
def
__create_tb
(
self
):
tdLog
.
printNoPrefix
(
"==========step: create table"
)
create_stb_sql
=
f
'''create table
{
STBNAME
}
(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp,
{
TINT_UN_COL
}
tinyint unsigned,
{
SINT_UN_COL
}
smallint unsigned,
{
INT_UN_COL
}
int unsigned,
{
BINT_UN_COL
}
bigint unsigned
) tags (
{
INT_TAG
}
int)
'''
create_ntb_sql
=
f
'''create table
{
NTBNAME
}
(
ts timestamp,
{
INT_COL
}
int,
{
BINT_COL
}
bigint,
{
SINT_COL
}
smallint,
{
TINT_COL
}
tinyint,
{
FLOAT_COL
}
float,
{
DOUBLE_COL
}
double,
{
BOOL_COL
}
bool,
{
BINARY_COL
}
binary(16),
{
NCHAR_COL
}
nchar(32),
{
TS_COL
}
timestamp,
{
TINT_UN_COL
}
tinyint unsigned,
{
SINT_UN_COL
}
smallint unsigned,
{
INT_UN_COL
}
int unsigned,
{
BINT_UN_COL
}
bigint unsigned
)
'''
tdSql
.
execute
(
create_stb_sql
)
tdSql
.
execute
(
create_ntb_sql
)
for
i
in
range
(
4
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags (
{
i
+
1
}
)'
)
def
__data_set
(
self
,
rows
):
data_set
=
DataSet
()
for
i
in
range
(
rows
):
data_set
.
ts_data
.
append
(
NOW
+
1
*
(
rows
-
i
))
data_set
.
int_data
.
append
(
rows
-
i
)
data_set
.
bint_data
.
append
(
11111
*
(
rows
-
i
))
data_set
.
sint_data
.
append
(
111
*
(
rows
-
i
)
%
32767
)
data_set
.
tint_data
.
append
(
11
*
(
rows
-
i
)
%
127
)
data_set
.
int_un_data
.
append
(
rows
-
i
)
data_set
.
bint_un_data
.
append
(
11111
*
(
rows
-
i
))
data_set
.
sint_un_data
.
append
(
111
*
(
rows
-
i
)
%
32767
)
data_set
.
tint_un_data
.
append
(
11
*
(
rows
-
i
)
%
127
)
data_set
.
float_data
.
append
(
1.11
*
(
rows
-
i
))
data_set
.
double_data
.
append
(
1100.0011
*
(
rows
-
i
))
data_set
.
bool_data
.
append
((
rows
-
i
)
%
2
)
data_set
.
binary_data
.
append
(
f
'binary
{
(
rows
-
i
)
}
'
)
data_set
.
nchar_data
.
append
(
f
'nchar_测试_
{
(
rows
-
i
)
}
'
)
return
data_set
def
__insert_data
(
self
):
tdLog
.
printNoPrefix
(
"==========step: start inser data into tables now....."
)
data
=
self
.
__data_set
(
rows
=
self
.
rows
)
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
null_data
=
'''null, null, null, null, null, null, null, null, null, null, null, null, null, null'''
zero_data
=
"0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0"
for
i
in
range
(
self
.
rows
):
row_data
=
f
'''
{
data
.
int_data
[
i
]
}
,
{
data
.
bint_data
[
i
]
}
,
{
data
.
sint_data
[
i
]
}
,
{
data
.
tint_data
[
i
]
}
,
{
data
.
float_data
[
i
]
}
,
{
data
.
double_data
[
i
]
}
,
{
data
.
bool_data
[
i
]
}
, '
{
data
.
binary_data
[
i
]
}
', '
{
data
.
nchar_data
[
i
]
}
',
{
data
.
ts_data
[
i
]
}
,
{
data
.
tint_un_data
[
i
]
}
,
{
data
.
sint_un_data
[
i
]
}
,
{
data
.
int_un_data
[
i
]
}
,
{
data
.
bint_un_data
[
i
]
}
'''
neg_row_data
=
f
'''
{
-
1
*
data
.
int_data
[
i
]
}
,
{
-
1
*
data
.
bint_data
[
i
]
}
,
{
-
1
*
data
.
sint_data
[
i
]
}
,
{
-
1
*
data
.
tint_data
[
i
]
}
,
{
-
1
*
data
.
float_data
[
i
]
}
,
{
-
1
*
data
.
double_data
[
i
]
}
,
{
data
.
bool_data
[
i
]
}
, '
{
data
.
binary_data
[
i
]
}
', '
{
data
.
nchar_data
[
i
]
}
',
{
data
.
ts_data
[
i
]
}
,
{
1
*
data
.
tint_un_data
[
i
]
}
,
{
1
*
data
.
sint_un_data
[
i
]
}
,
{
1
*
data
.
int_un_data
[
i
]
}
,
{
1
*
data
.
bint_un_data
[
i
]
}
'''
tdSql
.
execute
(
f
"insert into ct1 values (
{
NOW
-
i
*
TIME_STEP
}
,
{
row_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
NOW
-
i
*
int
(
TIME_STEP
*
0.6
)
}
,
{
neg_row_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
NOW
-
i
*
int
(
TIME_STEP
*
0.8
)
}
,
{
row_data
}
)"
)
tdSql
.
execute
(
f
"insert into
{
NTBNAME
}
values (
{
NOW
-
i
*
int
(
TIME_STEP
*
1.2
)
}
,
{
row_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
NOW
+
int
(
TIME_STEP
*
0.6
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
NOW
-
(
self
.
rows
+
1
)
*
int
(
TIME_STEP
*
0.6
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct2 values (
{
NOW
-
self
.
rows
*
int
(
TIME_STEP
*
0.29
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
NOW
+
int
(
TIME_STEP
*
0.8
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
NOW
-
(
self
.
rows
+
1
)
*
int
(
TIME_STEP
*
0.8
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into ct4 values (
{
NOW
-
self
.
rows
*
int
(
TIME_STEP
*
0.39
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into
{
NTBNAME
}
values (
{
NOW
+
int
(
TIME_STEP
*
1.2
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into
{
NTBNAME
}
values (
{
NOW
-
(
self
.
rows
+
1
)
*
int
(
TIME_STEP
*
1.2
)
}
,
{
null_data
}
)"
)
tdSql
.
execute
(
f
"insert into
{
NTBNAME
}
values (
{
NOW
-
self
.
rows
*
int
(
TIME_STEP
*
0.59
)
}
,
{
null_data
}
)"
)
def
run
(
self
):
self
.
rows
=
10
tdLog
.
printNoPrefix
(
"==========step0:all check"
)
tdLog
.
printNoPrefix
(
"==========step1:create table in normal database"
)
tdSql
.
prepare
()
self
.
__create_tb
()
self
.
__insert_data
()
self
.
all_test
()
# drop databases, create same name db、stb and sma index
tdSql
.
prepare
()
self
.
__create_tb
()
self
.
__insert_data
()
self
.
all_test
()
tdLog
.
printNoPrefix
(
"==========step2:create table in rollup database"
)
tdSql
.
execute
(
"create database db3 retentions 1s:4m,2s:8m,3s:12m"
)
tdSql
.
execute
(
"use db3"
)
tdSql
.
query
(
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma(
{
INT_COL
}
)"
)
tdSql
.
execute
(
"drop database if exists db1 "
)
tdSql
.
execute
(
"drop database if exists db2 "
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdLog
.
printNoPrefix
(
"==========step4:after wal, all check again "
)
tdSql
.
prepare
()
self
.
__create_tb
()
self
.
__insert_data
()
self
.
all_test
()
# drop databases, create same name db、stb and sma index
tdSql
.
prepare
()
self
.
__create_tb
()
self
.
__insert_data
()
self
.
all_test
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tests/system-test/1-insert/create_retentions.py
浏览文件 @
ba72ce2b
import
datetime
from
dataclasses
import
dataclass
from
dataclasses
import
dataclass
,
field
from
typing
import
List
from
util.log
import
*
from
util.sql
import
*
...
...
@@ -36,36 +36,20 @@ NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
@
dataclass
class
DataSet
:
ts_data
:
List
[
int
]
=
None
int_data
:
List
[
int
]
=
None
bint_data
:
List
[
int
]
=
None
sint_data
:
List
[
int
]
=
None
tint_data
:
List
[
int
]
=
None
int_un_data
:
List
[
int
]
=
None
bint_un_data
:
List
[
int
]
=
None
sint_un_data
:
List
[
int
]
=
None
tint_un_data
:
List
[
int
]
=
None
float_data
:
List
[
float
]
=
None
double_data
:
List
[
float
]
=
None
bool_data
:
List
[
int
]
=
None
binary_data
:
List
[
str
]
=
None
nchar_data
:
List
[
str
]
=
None
def
__post_init__
(
self
):
self
.
ts_data
=
[]
self
.
int_data
=
[]
self
.
bint_data
=
[]
self
.
sint_data
=
[]
self
.
tint_data
=
[]
self
.
int_un_data
=
[]
self
.
bint_un_data
=
[]
self
.
sint_un_data
=
[]
self
.
tint_un_data
=
[]
self
.
float_data
=
[]
self
.
double_data
=
[]
self
.
bool_data
=
[]
self
.
binary_data
=
[]
self
.
nchar_data
=
[]
ts_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
int_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
bint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
sint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
tint_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
int_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
bint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
sint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
tint_un_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
float_data
:
List
[
float
]
=
field
(
default_factory
=
list
)
double_data
:
List
[
float
]
=
field
(
default_factory
=
list
)
bool_data
:
List
[
int
]
=
field
(
default_factory
=
list
)
binary_data
:
List
[
str
]
=
field
(
default_factory
=
list
)
nchar_data
:
List
[
str
]
=
field
(
default_factory
=
list
)
class
TDTestCase
:
...
...
@@ -107,15 +91,15 @@ class TDTestCase:
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(count) watermark 1min"
,
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(min) max_delay -1s"
,
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(min) watermark -1m"
,
#
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ",
#
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ",
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) watermark 1m "
,
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) max_delay 1m "
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
BINARY_COL
}
binary(16)) tags (tag1 int) rollup(avg) watermark 1s"
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
BINARY
_COL
}
nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m"
,
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {
BINARY
_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s",
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY
_COL} nchar(16)) tags (tag1 int) " ,
#
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " ,
#
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " ,
#
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " ,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
NCHAR
_COL
}
nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m"
,
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {
NCHAR
_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s",
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
NCHAR
_COL
}
nchar(16)) tags (tag1 int) "
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) "
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) "
,
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int,
{
BINARY_COL
}
nchar(16)) "
,
# watermark, max_delay: [0, 900000], [ms, s, m, ?]
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(min) max_delay 1u"
,
...
...
@@ -136,8 +120,9 @@ class TDTestCase:
f
"create stable stb2 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(min) watermark 5s max_delay 1m"
,
f
"create stable stb3 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(max) watermark 5s max_delay 1m"
,
f
"create stable stb4 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(sum) watermark 5s max_delay 1m"
,
# f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m",
# f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m",
f
"create stable stb5 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m"
,
f
"create stable stb6 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m"
,
f
"create stable stb7 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma(
{
INT_COL
}
)"
,
]
def
test_create_stb
(
self
):
...
...
@@ -150,7 +135,7 @@ class TDTestCase:
# assert "rollup" in tdSql.description
tdSql
.
checkRows
(
len
(
self
.
create_stable_sql_current
))
# tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup databas
e
tdSql
.
execute
(
"use db"
)
# because db is a noraml database, not a rollup database, should not be able to create a rollup stabl
e
# tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m")
...
...
@@ -210,20 +195,6 @@ class TDTestCase:
data_set
.
binary_data
.
append
(
f
'binary
{
(
rows
-
i
)
}
'
)
data_set
.
nchar_data
.
append
(
f
'nchar_测试_
{
(
rows
-
i
)
}
'
)
# neg_data_set.ts_data.append(-1 * i)
# neg_data_set.int_data.append(-i)
# neg_data_set.bint_data.append(-11111 * i)
# neg_data_set.sint_data.append(-111 * i % 32767)
# neg_data_set.tint_data.append(-11 * i % 127)
# neg_data_set.int_un_data.append(-i)
# neg_data_set.bint_un_data.append(-11111 * i)
# neg_data_set.sint_un_data.append(-111 * i % 32767)
# neg_data_set.tint_un_data.append(-11 * i % 127)
# neg_data_set.float_data.append(-1.11 * i)
# neg_data_set.double_data.append(-1100.0011 * i)
# neg_data_set.binary_data.append(f'binary{i}')
# neg_data_set.nchar_data.append(f'nchar_测试_{i}')
return
data_set
def
__insert_data
(
self
):
...
...
@@ -279,9 +250,14 @@ class TDTestCase:
tdLog
.
printNoPrefix
(
"==========step2:create table in rollup database"
)
tdSql
.
execute
(
"create database db3 retentions 1s:4m,2s:8m,3s:12m"
)
tdSql
.
execute
(
"drop database if exists db1 "
)
tdSql
.
execute
(
"drop database if exists db2 "
)
tdSql
.
execute
(
"use db3"
)
self
.
__create_tb
()
self
.
__insert_data
()
# self.__create_tb()
# self.__insert_data()
self
.
all_test
()
tdSql
.
execute
(
"drop database if exists db1 "
)
tdSql
.
execute
(
"drop database if exists db2 "
)
...
...
tests/system-test/1-insert/time_range_wise.py
浏览文件 @
ba72ce2b
...
...
@@ -325,7 +325,7 @@ class TDTestCase:
def
__sma_create_check
(
self
,
sma
:
SMAschema
):
if
self
.
updatecfgDict
[
"querySmaOptimize"
]
==
0
:
return
False
#
#
TODO: if database is a rollup-db, can not create sma index
# TODO: if database is a rollup-db, can not create sma index
# tdSql.query("select database()")
# if sma.rollup_db :
# return False
...
...
@@ -493,8 +493,8 @@ class TDTestCase:
err_sqls
,
cur_sqls
=
self
.
__drop_sma_sql
for
err_sql
in
err_sqls
:
self
.
sma_drop_check
(
err_sql
)
#
for cur_sql in cur_sqls:
#
self.sma_drop_check(cur_sql)
for
cur_sql
in
cur_sqls
:
self
.
sma_drop_check
(
cur_sql
)
def
all_test
(
self
):
self
.
test_create_sma
()
...
...
@@ -605,24 +605,23 @@ class TDTestCase:
tdLog
.
printNoPrefix
(
"==========step1:create table in normal database"
)
tdSql
.
prepare
()
self
.
__create_tb
()
#
self.__insert_data()
self
.
__insert_data
()
self
.
all_test
()
# drop databases, create same name db、stb and sma index
# tdSql.prepare()
# self.__create_tb()
# self.__insert_data()
# self.all_test()
return
tdSql
.
prepare
()
self
.
__create_tb
()
self
.
__insert_data
()
self
.
all_test
()
tdLog
.
printNoPrefix
(
"==========step2:create table in rollup database"
)
tdSql
.
execute
(
"create database db3 retentions 1s:4m,2s:8m,3s:12m"
)
tdSql
.
execute
(
"use db3"
)
self
.
__create_tb
()
self
.
__insert_data
()
# self.__create_tb()
tdSql
.
execute
(
f
"create stable stb1 (
{
PRIMARY_COL
}
timestamp,
{
INT_COL
}
int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma(
{
INT_COL
}
) "
)
self
.
all_test
()
# self.__insert_data()
tdSql
.
execute
(
"drop database if exists db1 "
)
tdSql
.
execute
(
"drop database if exists db2 "
)
...
...
tests/system-test/2-query/json_tag.py
浏览文件 @
ba72ce2b
...
...
@@ -566,7 +566,7 @@ class TDTestCase:
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select bottom(dataint,100) from jsons1 where jtag->'tag1'>1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select percentile(dataint,20) from jsons1 where jtag->'tag1'>1"
)
#
tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
tdSql
.
query
(
"select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1"
)
tdSql
.
checkData
(
0
,
0
,
1.5
)
# tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")
...
...
tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
0 → 100644
浏览文件 @
ba72ce2b
import
taos
import
sys
import
time
import
socket
import
os
import
threading
import
math
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
from
util.common
import
*
sys
.
path
.
append
(
"./7-tmq"
)
from
tmqCommon
import
*
class
TDTestCase
:
def
__init__
(
self
):
self
.
vgroups
=
4
self
.
ctbNum
=
10
self
.
rowsPerTbl
=
10000
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
False
)
def
prepareTestEnv
(
self
):
tdLog
.
printNoPrefix
(
"======== prepare test env include database, stable, ctables, and insert data: "
)
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
1
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
10
,
'rowsPerTbl'
:
10000
,
'batchNum'
:
10
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
3
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tmqCom
.
initConsumerTable
()
tdCom
.
create_database
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"dropFlag"
],
vgroups
=
paraDict
[
"vgroups"
],
replica
=
1
)
tdLog
.
info
(
"create stb"
)
tmqCom
.
create_stable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
])
tdLog
.
info
(
"create ctb"
)
tmqCom
.
create_ctable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
],
ctbPrefix
=
paraDict
[
'ctbPrefix'
],
ctbNum
=
paraDict
[
"ctbNum"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
tdLog
.
info
(
"insert data"
)
tmqCom
.
insert_data_interlaceByMultiTbl
(
tsql
=
tdSql
,
dbName
=
paraDict
[
"dbName"
],
ctbPrefix
=
paraDict
[
"ctbPrefix"
],
ctbNum
=
paraDict
[
"ctbNum"
],
rowsPerTbl
=
paraDict
[
"rowsPerTbl"
],
batchNum
=
paraDict
[
"batchNum"
],
startTs
=
paraDict
[
"startTs"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
tdLog
.
info
(
"restart taosd to ensure that the data falls into the disk"
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
return
def
tmqCase3
(
self
):
tdLog
.
printNoPrefix
(
"======== test case 3: "
)
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
1
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
10
,
'rowsPerTbl'
:
10000
,
'batchNum'
:
10
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
10
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
topicNameList
=
[
'topic1'
]
expectRowsList
=
[]
tmqCom
.
initConsumerTable
()
tdLog
.
info
(
"create topics from stb with filter"
)
queryString
=
"select * from %s.%s"
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString
=
"create topic %s as %s"
%
(
topicNameList
[
0
],
queryString
)
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
tdSql
.
query
(
queryString
)
expectRowsList
.
append
(
tdSql
.
getRows
())
totalRowsInserted
=
expectRowsList
[
0
]
# init consume info, and start tmq_sim, then check consume result
tdLog
.
info
(
"insert consume info to consume processor"
)
consumerId
=
3
expectrowcnt
=
math
.
ceil
(
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
/
3
)
topicList
=
topicNameList
[
0
]
ifcheckdata
=
1
ifManualCommit
=
1
keyList
=
'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
consumerId
=
4
expectrowcnt
=
math
.
ceil
(
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
*
2
/
3
)
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor 0"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"wait the consume result"
)
expectRows
=
2
resultList
=
tmqCom
.
selectConsumeResult
(
expectRows
)
actConsumeTotalRows
=
resultList
[
0
]
+
resultList
[
1
]
if
not
(
totalRowsInserted
==
actConsumeTotalRows
):
tdLog
.
info
(
"sum of two consume rows: %d should be equal to total inserted rows: %d"
%
(
actConsumeTotalRows
,
totalRowsInserted
))
tdLog
.
exit
(
"%d tmq consume rows error!"
%
consumerId
)
time
.
sleep
(
10
)
for
i
in
range
(
len
(
topicNameList
)):
tdSql
.
query
(
"drop topic %s"
%
topicNameList
[
i
])
tdLog
.
printNoPrefix
(
"======== test case 3 end ...... "
)
def
tmqCase4
(
self
):
tdLog
.
printNoPrefix
(
"======== test case 4: "
)
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
1
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
10
,
'rowsPerTbl'
:
10000
,
'batchNum'
:
10
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
10
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
topicNameList
=
[
'topic1'
]
expectRowsList
=
[]
tmqCom
.
initConsumerTable
()
tdLog
.
info
(
"create topics from stb with filter"
)
queryString
=
"select * from %s.%s"
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString
=
"create topic %s as %s"
%
(
topicNameList
[
0
],
queryString
)
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
tdSql
.
query
(
queryString
)
expectRowsList
.
append
(
tdSql
.
getRows
())
totalRowsInserted
=
expectRowsList
[
0
]
# init consume info, and start tmq_sim, then check consume result
tdLog
.
info
(
"insert consume info to consume processor"
)
consumerId
=
5
expectrowcnt
=
math
.
ceil
(
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
])
topicList
=
topicNameList
[
0
]
ifcheckdata
=
1
ifManualCommit
=
1
keyList
=
'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor 0"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"wait commit notify"
)
tmqCom
.
getStartCommitNotifyFromTmqsim
()
tdLog
.
info
(
"pkill consume processor"
)
tdCom
.
killProcessor
(
"tmq_sim"
)
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom
.
initConsumerTable
()
consumerId
=
6
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor 1"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"wait the consume result"
)
expectRows
=
1
resultList
=
tmqCom
.
selectConsumeResult
(
expectRows
)
actConsumeTotalRows
=
resultList
[
0
]
if
not
(
actConsumeTotalRows
>
0
and
actConsumeTotalRows
<
totalRowsInserted
):
tdLog
.
info
(
"act consume rows: %d"
%
(
actConsumeTotalRows
))
tdLog
.
info
(
"and second consume rows should be between 0 and %d"
%
(
totalRowsInserted
))
tdLog
.
exit
(
"%d tmq consume rows error!"
%
consumerId
)
time
.
sleep
(
10
)
for
i
in
range
(
len
(
topicNameList
)):
tdSql
.
query
(
"drop topic %s"
%
topicNameList
[
i
])
tdLog
.
printNoPrefix
(
"======== test case 4 end ...... "
)
def
run
(
self
):
tdSql
.
prepare
()
self
.
prepareTestEnv
()
self
.
tmqCase3
()
self
.
tmqCase4
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
event
=
threading
.
Event
()
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tests/system-test/7-tmq/tmqConsFromTsdb1.py
浏览文件 @
ba72ce2b
...
...
@@ -85,7 +85,7 @@ class TDTestCase:
'rowsPerTbl'
:
10000
,
'batchNum'
:
10
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
1
0
,
'pollDelay'
:
1
5
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
...
...
tests/system-test/fulltest.sh
浏览文件 @
ba72ce2b
...
...
@@ -19,11 +19,15 @@ python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
python3 ./test.py
-f
1-insert/opentsdb_telnet_line_taosc_insert.py
python3 ./test.py
-f
1-insert/opentsdb_json_taosc_insert.py
python3 ./test.py
-f
1-insert/test_stmt_muti_insert_query.py
python3 ./test.py
-f
1-insert/test_stmt_set_tbname_tag.py
python3 ./test.py
-f
1-insert/test_stmt_set_tbname_tag.py
python3 ./test.py
-f
1-insert/alter_stable.py
python3 ./test.py
-f
1-insert/alter_table.py
python3 ./test.py
-f
1-insert/insertWithMoreVgroup.py
python3 ./test.py
-f
1-insert/table_comment.py
python3 ./test.py
-f
1-insert/time_range_wise.py
python3 ./test.py
-f
1-insert/block_wise.py
python3 ./test.py
-f
1-insert/create_retentions.py
#python3 ./test.py -f 1-insert/table_param_ttl.py
python3 ./test.py
-f
2-query/between.py
python3 ./test.py
-f
2-query/distinct.py
...
...
@@ -114,19 +118,19 @@ python3 ./test.py -f 2-query/twa.py
python3 ./test.py
-f
2-query/irate.py
python3 ./test.py
-f
2-query/function_null.py
python3 ./test.py
-f
2-query/queryQnode.py
python3 ./test.py
-f
2-query/queryQnode.py
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
#python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
#python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
python3 ./test.py
-f
6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py
-N
5
-M
3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
...
...
@@ -158,3 +162,4 @@ python3 ./test.py -f 7-tmq/tmqAlterSchema.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb1.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb1-mutilVg.py
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录