Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
64c06c95
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
64c06c95
编写于
4月 06, 2021
作者:
S
Shengliang Guan
提交者:
GitHub
4月 06, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5683 from taosdata/develop
Merge from develop into master
上级
acaf5f25
25396ed6
变更
46
隐藏空白更改
内联
并排
Showing
46 changed file
with
1164 addition
and
821 deletion
+1164
-821
cmake/version.inc
cmake/version.inc
+1
-1
documentation20/cn/02.getting-started/docs.md
documentation20/cn/02.getting-started/docs.md
+5
-5
documentation20/cn/08.connector/docs.md
documentation20/cn/08.connector/docs.md
+2
-2
snap/snapcraft.yaml
snap/snapcraft.yaml
+2
-2
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+4
-13
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+17
-0
src/client/src/tscLocalMerge.c
src/client/src/tscLocalMerge.c
+19
-4
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+250
-284
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+79
-21
src/cq/src/cqMain.c
src/cq/src/cqMain.c
+6
-2
src/cq/test/cqtest.c
src/cq/test/cqtest.c
+1
-1
src/dnode/src/dnodeVWrite.c
src/dnode/src/dnodeVWrite.c
+1
-1
src/inc/tcq.h
src/inc/tcq.h
+1
-1
src/inc/tsdb.h
src/inc/tsdb.h
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+317
-272
src/kit/taosdump/taosdump.c
src/kit/taosdump/taosdump.c
+186
-127
src/mnode/src/mnodeDnode.c
src/mnode/src/mnodeDnode.c
+5
-0
src/mnode/src/mnodeMnode.c
src/mnode/src/mnodeMnode.c
+2
-0
src/query/inc/qExecutor.h
src/query/inc/qExecutor.h
+2
-1
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+23
-5
src/query/src/qTokenizer.c
src/query/src/qTokenizer.c
+5
-1
src/query/src/qUtil.c
src/query/src/qUtil.c
+4
-7
src/query/tests/resultBufferTest.cpp
src/query/tests/resultBufferTest.cpp
+3
-3
src/sync/inc/syncInt.h
src/sync/inc/syncInt.h
+1
-1
src/sync/src/syncMain.c
src/sync/src/syncMain.c
+7
-2
src/tsdb/src/tsdbMain.c
src/tsdb/src/tsdbMain.c
+2
-2
src/tsdb/src/tsdbMeta.c
src/tsdb/src/tsdbMeta.c
+2
-2
src/util/inc/tstoken.h
src/util/inc/tstoken.h
+1
-3
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+5
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/insert/metadataUpdate.py
tests/pytest/insert/metadataUpdate.py
+20
-27
tests/pytest/tools/insert-tblimit-tboffset-createdb.json
tests/pytest/tools/insert-tblimit-tboffset-createdb.json
+57
-0
tests/pytest/tools/insert-tblimit-tboffset-insertrec.json
tests/pytest/tools/insert-tblimit-tboffset-insertrec.json
+59
-0
tests/pytest/tools/insert-tblimit-tboffset0.json
tests/pytest/tools/insert-tblimit-tboffset0.json
+2
-2
tests/pytest/tools/insert-tblimit1-tboffset.json
tests/pytest/tools/insert-tblimit1-tboffset.json
+2
-2
tests/pytest/tools/taosdemo-sampledata.json
tests/pytest/tools/taosdemo-sampledata.json
+0
-2
tests/pytest/tools/taosdemoTestLimitOffset.py
tests/pytest/tools/taosdemoTestLimitOffset.py
+4
-1
tests/pytest/tools/taosdemoTestSampleData.py
tests/pytest/tools/taosdemoTestSampleData.py
+1
-1
tests/script/general/parser/alter.sim
tests/script/general/parser/alter.sim
+13
-6
tests/script/general/parser/gendata.sh
tests/script/general/parser/gendata.sh
+6
-0
tests/script/general/parser/import_file.sim
tests/script/general/parser/import_file.sim
+7
-11
tests/script/general/parser/select_with_tags.sim
tests/script/general/parser/select_with_tags.sim
+9
-0
tests/tsim/inc/sim.h
tests/tsim/inc/sim.h
+1
-0
tests/tsim/src/simExe.c
tests/tsim/src/simExe.c
+10
-1
tests/tsim/src/simMain.c
tests/tsim/src/simMain.c
+9
-1
tests/tsim/src/simSystem.c
tests/tsim/src/simSystem.c
+9
-1
未找到文件。
cmake/version.inc
浏览文件 @
64c06c95
...
...
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF
(
DEFINED
VERNUMBER
)
SET
(
TD_VER_NUMBER
$
{
VERNUMBER
})
ELSE
()
SET
(
TD_VER_NUMBER
"2.0.1
8
.0"
)
SET
(
TD_VER_NUMBER
"2.0.1
9
.0"
)
ENDIF
()
IF
(
DEFINED
VERCOMPATIBLE
)
...
...
documentation20/cn/02.getting-started/docs.md
浏览文件 @
64c06c95
...
...
@@ -101,7 +101,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;"
### 运行SQL命令脚本
TDengine
终端可以通过
`source`
命令来运行SQL
命令脚本.
TDengine
终端可以通过
`source`
命令来运行 SQL
命令脚本.
```
mysql
taos> source <filename>;
...
...
@@ -109,10 +109,10 @@ taos> source <filename>;
### Shell小技巧
-
可以使用上下光标键查看
已经历史输入的命
令
-
修改用户密码。在
shell中使用alter user命
令
-
可以使用上下光标键查看
历史输入的指
令
-
修改用户密码。在
shell 中使用 alter user 指
令
-
ctrl+c 中止正在进行中的查询
-
执行
`RESET QUERY CACHE`
清空本地缓存的表的
schema
-
执行
`RESET QUERY CACHE`
清空本地缓存的表
schema
## <a class="anchor" id="demo"></a>TDengine 极速体验
...
...
@@ -212,7 +212,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
**Python**
| ● | ● | ● | ○ | ● | ● | ● | -- | ● |
|
**Go**
| ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
**NodeJs**
| ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
**C#**
|
○ | ● | ●
| ○ | ○ | ○ | ○ | -- | -- |
|
**C#**
|
● | ● | ○
| ○ | ○ | ○ | ○ | -- | -- |
|
**RESTful**
| ● | ● | ● | ● | ● | ● | ● | ● | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
...
...
documentation20/cn/08.connector/docs.md
浏览文件 @
64c06c95
...
...
@@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
|
**Python**
| ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
|
**Go**
| ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
**NodeJs**
| ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
**C#**
|
○ | ● | ●
| ○ | ○ | ○ | ○ | -- | -- |
|
**C#**
|
● | ● | ○
| ○ | ○ | ○ | ○ | -- | -- |
|
**RESTful**
| ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
...
...
@@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*
在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。
*
所有执行 SQL 语句的 API,例如 C/C++ Connector 中的
`tao_query`
、
`taos_query_a`
、
`taos_subscribe`
等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
*
升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
*
升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。
详细的版本依赖关系请参见
[
taos-jdbcdriver 文档
](
https://www.taosdata.com/cn/documentation/connector/java#version
)
。
*
无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。
## <a class="anchor" id="driver"></a>安装连接器驱动步骤
...
...
snap/snapcraft.yaml
浏览文件 @
64c06c95
name
:
tdengine
base
:
core18
version
:
'
2.0.1
8
.0'
version
:
'
2.0.1
9
.0'
icon
:
snap/gui/t-dengine.svg
summary
:
an open-source big data platform designed and optimized for IoT.
description
:
|
...
...
@@ -72,7 +72,7 @@ parts:
-
usr/bin/taosd
-
usr/bin/taos
-
usr/bin/taosdemo
-
usr/lib/libtaos.so.2.0.1
8
.0
-
usr/lib/libtaos.so.2.0.1
9
.0
-
usr/lib/libtaos.so.1
-
usr/lib/libtaos.so
...
...
src/client/inc/tscUtil.h
浏览文件 @
64c06c95
...
...
@@ -36,19 +36,6 @@ extern "C" {
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
typedef
struct
SParsedColElem
{
int16_t
colIndex
;
uint16_t
offset
;
}
SParsedColElem
;
typedef
struct
SParsedDataColInfo
{
int16_t
numOfCols
;
int16_t
numOfAssignedCols
;
SParsedColElem
elems
[
TSDB_MAX_COLUMNS
];
bool
hasVal
[
TSDB_MAX_COLUMNS
];
}
SParsedDataColInfo
;
#pragma pack(push,1)
// this struct is transfered as binary, padding two bytes to avoid
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
...
...
@@ -118,6 +105,8 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
void
tscDestroyDataBlock
(
STableDataBlocks
*
pDataBlock
,
bool
removeMeta
);
void
tscSortRemoveDataBlockDupRows
(
STableDataBlocks
*
dataBuf
);
void
tscDestroyBoundColumnInfo
(
SParsedDataColInfo
*
pColInfo
);
SParamInfo
*
tscAddParamToDataBlock
(
STableDataBlocks
*
pDataBlock
,
char
type
,
uint8_t
timePrec
,
int16_t
bytes
,
uint32_t
offset
);
...
...
@@ -140,6 +129,8 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool
tscIsTWAQuery
(
SQueryInfo
*
pQueryInfo
);
bool
tscIsSecondStageQuery
(
SQueryInfo
*
pQueryInfo
);
bool
tscGroupbyColumn
(
SQueryInfo
*
pQueryInfo
);
bool
tscIsTopbotQuery
(
SQueryInfo
*
pQueryInfo
);
int32_t
tscGetTopbotQueryParam
(
SQueryInfo
*
pQueryInfo
);
bool
tscNonOrderedProjectionQueryOnSTable
(
SQueryInfo
*
pQueryInfo
,
int32_t
tableIndex
);
bool
tscOrderedProjectionQueryOnSTable
(
SQueryInfo
*
pQueryInfo
,
int32_t
tableIndex
);
...
...
src/client/inc/tsclient.h
浏览文件 @
64c06c95
...
...
@@ -175,6 +175,19 @@ typedef struct SParamInfo {
uint32_t
offset
;
}
SParamInfo
;
typedef
struct
SBoundColumn
{
bool
hasVal
;
// denote if current column has bound or not
int32_t
offset
;
// all column offset value
}
SBoundColumn
;
typedef
struct
SParsedDataColInfo
{
int16_t
numOfCols
;
int16_t
numOfBound
;
int32_t
*
boundedColumns
;
SBoundColumn
*
cols
;
}
SParsedDataColInfo
;
typedef
struct
STableDataBlocks
{
SName
tableName
;
int8_t
tsSource
;
// where does the UNIX timestamp come from, server or client
...
...
@@ -189,6 +202,8 @@ typedef struct STableDataBlocks {
STableMeta
*
pTableMeta
;
// the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
char
*
pData
;
SParsedDataColInfo
boundColumnInfo
;
// for parameter ('?') binding
uint32_t
numOfAllocedParams
;
uint32_t
numOfParams
;
...
...
@@ -425,6 +440,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t
tscCreateResPointerInfo
(
SSqlRes
*
pRes
,
SQueryInfo
*
pQueryInfo
);
void
tscSetResRawPtr
(
SSqlRes
*
pRes
,
SQueryInfo
*
pQueryInfo
);
void
destroyTableNameList
(
SSqlCmd
*
pCmd
);
void
tscResetSqlCmd
(
SSqlCmd
*
pCmd
,
bool
removeMeta
);
...
...
@@ -462,6 +478,7 @@ char* tscGetSqlStr(SSqlObj* pSql);
bool
tscIsQueryWithLimit
(
SSqlObj
*
pSql
);
bool
tscHasReachLimitation
(
SQueryInfo
*
pQueryInfo
,
SSqlRes
*
pRes
);
void
tscSetBoundColumnInfo
(
SParsedDataColInfo
*
pColInfo
,
SSchema
*
pSchema
,
int32_t
numOfCols
);
char
*
tscGetErrorMsgPayload
(
SSqlCmd
*
pCmd
);
...
...
src/client/src/tscLocalMerge.c
浏览文件 @
64c06c95
...
...
@@ -338,11 +338,20 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
pReducer
->
resColModel
->
capacity
=
pReducer
->
nResultBufSize
;
pReducer
->
finalModel
=
pFFModel
;
int32_t
expandFactor
=
1
;
if
(
finalmodel
->
rowSize
>
0
)
{
pReducer
->
resColModel
->
capacity
/=
finalmodel
->
rowSize
;
bool
topBotQuery
=
tscIsTopbotQuery
(
pQueryInfo
);
if
(
topBotQuery
)
{
expandFactor
=
tscGetTopbotQueryParam
(
pQueryInfo
);
pReducer
->
resColModel
->
capacity
/=
(
finalmodel
->
rowSize
*
expandFactor
);
pReducer
->
resColModel
->
capacity
*=
expandFactor
;
}
else
{
pReducer
->
resColModel
->
capacity
/=
finalmodel
->
rowSize
;
}
}
assert
(
finalmodel
->
rowSize
>
0
&&
finalmodel
->
rowSize
<=
pReducer
->
rowSize
);
pReducer
->
pFinalRes
=
calloc
(
1
,
pReducer
->
rowSize
*
pReducer
->
resColModel
->
capacity
);
if
(
pReducer
->
pTempBuffer
==
NULL
||
pReducer
->
discardData
==
NULL
||
pReducer
->
pResultBuf
==
NULL
||
...
...
@@ -1150,9 +1159,10 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
memset
(
buf
,
0
,
(
size_t
)
maxBufSize
);
memcpy
(
buf
,
pCtx
->
pOutput
,
(
size_t
)
pCtx
->
outputBytes
);
char
*
next
=
pCtx
->
pOutput
;
for
(
int32_t
i
=
0
;
i
<
inc
;
++
i
)
{
pCtx
->
pOutpu
t
+=
pCtx
->
outputBytes
;
memcpy
(
pCtx
->
pOutpu
t
,
buf
,
(
size_t
)
pCtx
->
outputBytes
);
nex
t
+=
pCtx
->
outputBytes
;
memcpy
(
nex
t
,
buf
,
(
size_t
)
pCtx
->
outputBytes
);
}
}
...
...
@@ -1440,6 +1450,11 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
SQueryInfo
*
pQueryInfo
=
tscGetQueryInfoDetail
(
pCmd
,
pCmd
->
clauseIndex
);
tFilePage
*
tmpBuffer
=
pLocalMerge
->
pTempBuffer
;
int32_t
remain
=
1
;
if
(
tscIsTopbotQuery
(
pQueryInfo
))
{
remain
=
tscGetTopbotQueryParam
(
pQueryInfo
);
}
if
(
doHandleLastRemainData
(
pSql
))
{
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -1528,7 +1543,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
if
((
!
sameGroup
&&
pResBuf
->
num
>
0
)
||
(
pResBuf
->
num
=
=
pLocalMerge
->
resColModel
->
capacity
))
{
if
((
!
sameGroup
&&
pResBuf
->
num
>
0
)
||
(
pResBuf
->
num
+
remain
>
=
pLocalMerge
->
resColModel
->
capacity
))
{
// does not belong to the same group
bool
notSkipped
=
genFinalResults
(
pSql
,
pLocalMerge
,
!
sameGroup
);
...
...
src/client/src/tscParseInsert.c
浏览文件 @
64c06c95
...
...
@@ -40,6 +40,7 @@ enum {
};
static
int32_t
tscAllocateMemIfNeed
(
STableDataBlocks
*
pDataBlock
,
int32_t
rowSize
,
int32_t
*
numOfRows
);
static
int32_t
parseBoundColumns
(
SSqlCmd
*
pCmd
,
SParsedDataColInfo
*
pColInfo
,
SSchema
*
pSchema
,
char
*
str
,
char
**
end
);
static
int32_t
tscToDouble
(
SStrToken
*
pToken
,
double
*
value
,
char
**
endPtr
)
{
errno
=
0
;
...
...
@@ -94,12 +95,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
*/
SStrToken
valueToken
;
index
=
0
;
sToken
=
tStrGetToken
(
pTokenEnd
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
pTokenEnd
,
&
index
,
false
);
pTokenEnd
+=
index
;
if
(
sToken
.
type
==
TK_MINUS
||
sToken
.
type
==
TK_PLUS
)
{
index
=
0
;
valueToken
=
tStrGetToken
(
pTokenEnd
,
&
index
,
false
,
0
,
NULL
);
valueToken
=
tStrGetToken
(
pTokenEnd
,
&
index
,
false
);
pTokenEnd
+=
index
;
if
(
valueToken
.
n
<
2
)
{
...
...
@@ -117,7 +118,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
if
(
sToken
.
type
==
TK_PLUS
)
{
useconds
+=
interval
;
}
else
{
useconds
=
(
useconds
>=
interval
)
?
useconds
-
interval
:
0
;
useconds
=
useconds
-
interval
;
}
*
next
=
pTokenEnd
;
...
...
@@ -127,13 +128,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return
TSDB_CODE_SUCCESS
;
}
// todo extract the null value check
static
bool
isNullStr
(
SStrToken
*
pToken
)
{
return
(
pToken
->
type
==
TK_NULL
)
||
((
pToken
->
type
==
TK_STRING
)
&&
(
pToken
->
n
!=
0
)
&&
(
strncasecmp
(
TSDB_DATA_NULL_STR_L
,
pToken
->
z
,
pToken
->
n
)
==
0
));
}
int32_t
tsParseOneColumn
Data
(
SSchema
*
pSchema
,
SStrToken
*
pToken
,
char
*
payload
,
char
*
msg
,
char
**
str
,
bool
primaryKey
,
int16_t
timePrec
)
{
int32_t
tsParseOneColumn
(
SSchema
*
pSchema
,
SStrToken
*
pToken
,
char
*
payload
,
char
*
msg
,
char
**
str
,
bool
primaryKey
,
int16_t
timePrec
)
{
int64_t
iv
;
int32_t
ret
;
char
*
endptr
=
NULL
;
...
...
@@ -417,29 +417,32 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
return
TSDB_CODE_SUCCESS
;
}
int
tsParseOneRowData
(
char
**
str
,
STableDataBlocks
*
pDataBlocks
,
SSchema
schema
[],
SParsedDataColInfo
*
spd
,
SSqlCmd
*
pCmd
,
int16_t
timePrec
,
int32_t
*
code
,
char
*
tmpTokenBuf
)
{
int32_t
index
=
0
;
SStrToken
sToken
=
{
0
};
char
*
payload
=
pDataBlocks
->
pData
+
pDataBlocks
->
size
;
int
tsParseOneRow
(
char
**
str
,
STableDataBlocks
*
pDataBlocks
,
SSqlCmd
*
pCmd
,
int16_t
timePrec
,
int32_t
*
len
,
char
*
tmpTokenBuf
)
{
int32_t
index
=
0
;
SStrToken
sToken
=
{
0
};
char
*
payload
=
pDataBlocks
->
pData
+
pDataBlocks
->
size
;
SParsedDataColInfo
*
spd
=
&
pDataBlocks
->
boundColumnInfo
;
SSchema
*
schema
=
tscGetTableSchema
(
pDataBlocks
->
pTableMeta
);
// 1. set the parsed value from sql string
int32_t
rowSize
=
0
;
for
(
int
i
=
0
;
i
<
spd
->
numOf
AssignedCols
;
++
i
)
{
for
(
int
i
=
0
;
i
<
spd
->
numOf
Bound
;
++
i
)
{
// the start position in data block buffer of current value in sql
char
*
start
=
payload
+
spd
->
elems
[
i
].
offset
;
int16_t
colIndex
=
spd
->
elems
[
i
].
colIndex
;
SSchema
*
pSchema
=
schema
+
colIndex
;
int32_t
colIndex
=
spd
->
boundedColumns
[
i
];
char
*
start
=
payload
+
spd
->
cols
[
colIndex
].
offset
;
SSchema
*
pSchema
=
&
schema
[
colIndex
];
rowSize
+=
pSchema
->
bytes
;
index
=
0
;
sToken
=
tStrGetToken
(
*
str
,
&
index
,
true
,
0
,
NULL
);
sToken
=
tStrGetToken
(
*
str
,
&
index
,
true
);
*
str
+=
index
;
if
(
sToken
.
type
==
TK_QUESTION
)
{
if
(
pCmd
->
insertType
!=
TSDB_QUERY_TYPE_STMT_INSERT
)
{
*
code
=
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"? only allowed in binding insertion"
,
*
str
);
return
-
1
;
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"? only allowed in binding insertion"
,
*
str
);
}
uint32_t
offset
=
(
uint32_t
)(
start
-
pDataBlocks
->
pData
);
...
...
@@ -448,15 +451,13 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
strcpy
(
pCmd
->
payload
,
"client out of memory"
);
*
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
-
1
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
int16_t
type
=
sToken
.
type
;
if
((
type
!=
TK_NOW
&&
type
!=
TK_INTEGER
&&
type
!=
TK_STRING
&&
type
!=
TK_FLOAT
&&
type
!=
TK_BOOL
&&
type
!=
TK_NULL
&&
type
!=
TK_HEX
&&
type
!=
TK_OCT
&&
type
!=
TK_BIN
)
||
(
sToken
.
n
==
0
)
||
(
type
==
TK_RP
))
{
*
code
=
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"invalid data or symbol"
,
sToken
.
z
);
return
-
1
;
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"invalid data or symbol"
,
sToken
.
z
);
}
// Remove quotation marks
...
...
@@ -485,26 +486,23 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
bool
isPrimaryKey
=
(
colIndex
==
PRIMARYKEY_TIMESTAMP_COL_INDEX
);
int32_t
ret
=
tsParseOneColumn
Data
(
pSchema
,
&
sToken
,
start
,
pCmd
->
payload
,
str
,
isPrimaryKey
,
timePrec
);
int32_t
ret
=
tsParseOneColumn
(
pSchema
,
&
sToken
,
start
,
pCmd
->
payload
,
str
,
isPrimaryKey
,
timePrec
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
*
code
=
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
return
-
1
;
// NOTE: here 0 mean error!
return
ret
;
}
if
(
isPrimaryKey
&&
tsCheckTimestamp
(
pDataBlocks
,
start
)
!=
TSDB_CODE_SUCCESS
)
{
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"client time/server time can not be mixed up"
,
sToken
.
z
);
*
code
=
TSDB_CODE_TSC_INVALID_TIME_STAMP
;
return
-
1
;
return
TSDB_CODE_TSC_INVALID_TIME_STAMP
;
}
}
// 2. set the null value for the columns that do not assign values
if
(
spd
->
numOf
AssignedCols
<
spd
->
numOfCols
)
{
if
(
spd
->
numOf
Bound
<
spd
->
numOfCols
)
{
char
*
ptr
=
payload
;
for
(
int32_t
i
=
0
;
i
<
spd
->
numOfCols
;
++
i
)
{
if
(
!
spd
->
hasVal
[
i
])
{
// current column do not have any value to insert, set it to null
if
(
!
spd
->
cols
[
i
].
hasVal
)
{
// current column do not have any value to insert, set it to null
if
(
schema
[
i
].
type
==
TSDB_DATA_TYPE_BINARY
)
{
varDataSetLen
(
ptr
,
sizeof
(
int8_t
));
*
(
uint8_t
*
)
varDataVal
(
ptr
)
=
TSDB_DATA_BINARY_NULL
;
...
...
@@ -522,7 +520,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
rowSize
=
(
int32_t
)(
ptr
-
payload
);
}
return
rowSize
;
*
len
=
rowSize
;
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
rowDataCompar
(
const
void
*
lhs
,
const
void
*
rhs
)
{
...
...
@@ -536,80 +535,79 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
}
int
tsParseValues
(
char
**
str
,
STableDataBlocks
*
pDataBlock
,
STableMeta
*
pTableMeta
,
int
maxRows
,
SParsedDataColInfo
*
spd
,
SSqlCmd
*
pCmd
,
int32_t
*
code
,
char
*
tmpTokenBuf
)
{
int32_t
index
=
0
;
SStrToken
sToken
;
int32_t
tsParseValues
(
char
**
str
,
STableDataBlocks
*
pDataBlock
,
int
maxRows
,
SSqlCmd
*
pCmd
,
int32_t
*
numOfRows
,
char
*
tmpTokenBuf
)
{
int32_t
index
=
0
;
int32_t
code
=
0
;
int32_t
numOfRows
=
0
;
(
*
numOfRows
)
=
0
;
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
SStrToken
sToken
;
STableMeta
*
pTableMeta
=
pDataBlock
->
pTableMeta
;
STableComInfo
tinfo
=
tscGetTableInfo
(
pTableMeta
);
int32_t
precision
=
tinfo
.
precision
;
if
(
spd
->
hasVal
[
0
]
==
false
)
{
*
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"primary timestamp column can not be null"
,
*
str
);
return
-
1
;
}
while
(
1
)
{
index
=
0
;
sToken
=
tStrGetToken
(
*
str
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
*
str
,
&
index
,
false
);
if
(
sToken
.
n
==
0
||
sToken
.
type
!=
TK_LP
)
break
;
*
str
+=
index
;
if
(
numOfRows
>=
maxRows
||
pDataBlock
->
size
+
tinfo
.
rowSize
>=
pDataBlock
->
nAllocSize
)
{
if
(
(
*
numOfRows
)
>=
maxRows
||
pDataBlock
->
size
+
tinfo
.
rowSize
>=
pDataBlock
->
nAllocSize
)
{
int32_t
tSize
;
*
code
=
tscAllocateMemIfNeed
(
pDataBlock
,
tinfo
.
rowSize
,
&
tSize
);
if
(
*
code
!=
TSDB_CODE_SUCCESS
)
{
//TODO pass the correct error code to client
code
=
tscAllocateMemIfNeed
(
pDataBlock
,
tinfo
.
rowSize
,
&
tSize
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
//TODO pass the correct error code to client
strcpy
(
pCmd
->
payload
,
"client out of memory"
);
return
-
1
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
ASSERT
(
tSize
>
maxRows
);
maxRows
=
tSize
;
}
int32_t
len
=
tsParseOneRowData
(
str
,
pDataBlock
,
pSchema
,
spd
,
pCmd
,
precision
,
code
,
tmpTokenBuf
);
if
(
len
<=
0
)
{
// error message has been set in tsParseOneRowData
return
-
1
;
int32_t
len
=
0
;
code
=
tsParseOneRow
(
str
,
pDataBlock
,
pCmd
,
precision
,
&
len
,
tmpTokenBuf
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
// error message has been set in tsParseOneRow, return directly
return
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
}
pDataBlock
->
size
+=
len
;
index
=
0
;
sToken
=
tStrGetToken
(
*
str
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
*
str
,
&
index
,
false
);
*
str
+=
index
;
if
(
sToken
.
n
==
0
||
sToken
.
type
!=
TK_RP
)
{
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
") expected"
,
*
str
);
*
code
=
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
code
=
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
return
-
1
;
}
numOfRows
++
;
(
*
numOfRows
)
++
;
}
if
(
numOfRows
<=
0
)
{
if
(
(
*
numOfRows
)
<=
0
)
{
strcpy
(
pCmd
->
payload
,
"no any data points"
);
*
code
=
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
return
-
1
;
return
TSDB_CODE_TSC_SQL_SYNTAX_ERROR
;
}
else
{
return
numOfRows
;
return
TSDB_CODE_SUCCESS
;
}
}
static
void
tscSetAssignedColumnInfo
(
SParsedDataColInfo
*
spd
,
SSchema
*
pSchema
,
int32_t
numOfCols
)
{
spd
->
numOfCols
=
numOfCols
;
spd
->
numOfAssignedCols
=
numOfCols
;
void
tscSetBoundColumnInfo
(
SParsedDataColInfo
*
pColInfo
,
SSchema
*
pSchema
,
int32_t
numOfCols
)
{
pColInfo
->
numOfCols
=
numOfCols
;
pColInfo
->
numOfBound
=
numOfCols
;
for
(
int32_t
i
=
0
;
i
<
numOfCols
;
++
i
)
{
spd
->
hasVal
[
i
]
=
true
;
spd
->
elems
[
i
].
colIndex
=
i
;
pColInfo
->
boundedColumns
=
calloc
(
pColInfo
->
numOfCols
,
sizeof
(
int32_t
));
pColInfo
->
cols
=
calloc
(
pColInfo
->
numOfCols
,
sizeof
(
SBoundColumn
));
for
(
int32_t
i
=
0
;
i
<
pColInfo
->
numOfCols
;
++
i
)
{
if
(
i
>
0
)
{
spd
->
elems
[
i
].
offset
=
spd
->
elems
[
i
-
1
].
offset
+
pSchema
[
i
-
1
].
bytes
;
pColInfo
->
cols
[
i
].
offset
=
pSchema
[
i
-
1
].
bytes
+
pColInfo
->
cols
[
i
-
1
].
offset
;
}
pColInfo
->
cols
[
i
].
hasVal
=
true
;
pColInfo
->
boundedColumns
[
i
]
=
i
;
}
}
...
...
@@ -697,33 +695,26 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
}
}
static
int32_t
doParseInsertStatement
(
SSqlCmd
*
pCmd
,
char
**
str
,
SParsedDataColInfo
*
spd
,
int32_t
*
totalNum
)
{
STableMetaInfo
*
pTableMetaInfo
=
tscGetTableMetaInfoFromCmd
(
pCmd
,
pCmd
->
clauseIndex
,
0
);
STableMeta
*
pTableMeta
=
pTableMetaInfo
->
pTableMeta
;
STableComInfo
tinfo
=
tscGetTableInfo
(
pTableMeta
);
STableDataBlocks
*
dataBuf
=
NULL
;
int32_t
ret
=
tscGetDataBlockFromList
(
pCmd
->
pTableBlockHashList
,
pTableMeta
->
id
.
uid
,
TSDB_DEFAULT_PAYLOAD_SIZE
,
sizeof
(
SSubmitBlk
),
tinfo
.
rowSize
,
&
pTableMetaInfo
->
name
,
pTableMeta
,
&
dataBuf
,
NULL
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
return
ret
;
}
static
int32_t
doParseInsertStatement
(
SSqlCmd
*
pCmd
,
char
**
str
,
STableDataBlocks
*
dataBuf
,
int32_t
*
totalNum
)
{
STableComInfo
tinfo
=
tscGetTableInfo
(
dataBuf
->
pTableMeta
);
int32_t
maxNumOfRows
;
ret
=
tscAllocateMemIfNeed
(
dataBuf
,
tinfo
.
rowSize
,
&
maxNumOfRows
);
if
(
TSDB_CODE_SUCCESS
!=
ret
)
{
int32_t
code
=
tscAllocateMemIfNeed
(
dataBuf
,
tinfo
.
rowSize
,
&
maxNumOfRows
);
if
(
TSDB_CODE_SUCCESS
!=
code
)
{
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
int32_t
code
=
TSDB_CODE_TSC_INVALID_SQL
;
char
*
tmpTokenBuf
=
calloc
(
1
,
16
*
1024
);
// used for deleting Escape character: \\, \', \"
code
=
TSDB_CODE_TSC_INVALID_SQL
;
char
*
tmpTokenBuf
=
calloc
(
1
,
16
*
1024
);
// used for deleting Escape character: \\, \', \"
if
(
NULL
==
tmpTokenBuf
)
{
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
int32_t
numOfRows
=
tsParseValues
(
str
,
dataBuf
,
pTableMeta
,
maxNumOfRows
,
spd
,
pCmd
,
&
code
,
tmpTokenBuf
);
int32_t
numOfRows
=
0
;
code
=
tsParseValues
(
str
,
dataBuf
,
maxNumOfRows
,
pCmd
,
&
numOfRows
,
tmpTokenBuf
);
free
(
tmpTokenBuf
);
if
(
numOfRows
<=
0
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
...
...
@@ -736,25 +727,23 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
}
SSubmitBlk
*
pBlocks
=
(
SSubmitBlk
*
)(
dataBuf
->
pData
);
code
=
tsSetBlockInfo
(
pBlocks
,
pTableMeta
,
numOfRows
);
code
=
tsSetBlockInfo
(
pBlocks
,
dataBuf
->
pTableMeta
,
numOfRows
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"too many rows in sql, total number of rows should be less than 32767"
,
*
str
);
return
code
;
}
dataBuf
->
vgId
=
pTableMeta
->
vgId
;
dataBuf
->
numOfTables
=
1
;
*
totalNum
+=
numOfRows
;
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
tscCheckIfCreateTable
(
char
**
sqlstr
,
SSqlObj
*
pSql
)
{
static
int32_t
tscCheckIfCreateTable
(
char
**
sqlstr
,
SSqlObj
*
pSql
,
char
**
boundColumn
)
{
int32_t
index
=
0
;
SStrToken
sToken
=
{
0
};
SStrToken
tableToken
=
{
0
};
int32_t
code
=
TSDB_CODE_SUCCESS
;
const
int32_t
TABLE_INDEX
=
0
;
const
int32_t
STABLE_INDEX
=
1
;
...
...
@@ -767,38 +756,37 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
// get the token of specified table
index
=
0
;
tableToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
tableToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
char
*
cstart
=
NULL
;
char
*
cend
=
NULL
;
// skip possibly exists column list
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
int32_t
numOfColList
=
0
;
bool
createTable
=
false
;
// Bind table columns list in string, skip it and continue
if
(
sToken
.
type
==
TK_LP
)
{
cstart
=
&
sToken
.
z
[
0
];
index
=
0
;
*
boundColumn
=
&
sToken
.
z
[
0
];
while
(
1
)
{
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
if
(
sToken
.
type
==
TK_RP
)
{
cend
=
&
sToken
.
z
[
0
];
break
;
}
sql
+=
index
;
++
numOfColList
;
}
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
}
if
(
numOfColList
==
0
&&
cstart
!=
NULL
)
{
if
(
numOfColList
==
0
&&
(
*
boundColumn
)
!=
NULL
)
{
return
TSDB_CODE_TSC_INVALID_SQL
;
}
...
...
@@ -806,7 +794,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
if
(
sToken
.
type
==
TK_USING
)
{
// create table if not exists according to the super table
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
//the source super table is moved to the secondary position of the pTableMetaInfo list
...
...
@@ -835,82 +823,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SSchema
*
pTagSchema
=
tscGetTableTagSchema
(
pSTableMetaInfo
->
pTableMeta
);
STableComInfo
tinfo
=
tscGetTableInfo
(
pSTableMetaInfo
->
pTableMeta
);
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sql
+=
index
;
SParsedDataColInfo
spd
=
{
0
};
uint8_t
numOfTags
=
tscGetNumOfTags
(
pSTableMetaInfo
->
pTableMeta
);
spd
.
numOfCols
=
numOfTags
;
// if specify some tags column
if
(
sToken
.
type
!=
TK_LP
)
{
tscSetAssignedColumnInfo
(
&
spd
,
pTagSchema
,
numOfTags
);
}
else
{
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
int16_t
offset
[
TSDB_MAX_COLUMNS
]
=
{
0
};
for
(
int32_t
t
=
1
;
t
<
numOfTags
;
++
t
)
{
offset
[
t
]
=
offset
[
t
-
1
]
+
pTagSchema
[
t
-
1
].
bytes
;
}
while
(
1
)
{
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sql
+=
index
;
if
(
TK_STRING
==
sToken
.
type
)
{
strdequote
(
sToken
.
z
);
sToken
.
n
=
(
uint32_t
)
strtrim
(
sToken
.
z
);
}
if
(
sToken
.
type
==
TK_RP
)
{
break
;
}
bool
findColumnIndex
=
false
;
tscSetBoundColumnInfo
(
&
spd
,
pTagSchema
,
tscGetNumOfTags
(
pSTableMetaInfo
->
pTableMeta
));
// todo speedup by using hash list
for
(
int32_t
t
=
0
;
t
<
numOfTags
;
++
t
)
{
if
(
strncmp
(
sToken
.
z
,
pTagSchema
[
t
].
name
,
sToken
.
n
)
==
0
&&
strlen
(
pTagSchema
[
t
].
name
)
==
sToken
.
n
)
{
SParsedColElem
*
pElem
=
&
spd
.
elems
[
spd
.
numOfAssignedCols
++
];
pElem
->
offset
=
offset
[
t
];
pElem
->
colIndex
=
t
;
if
(
spd
.
hasVal
[
t
]
==
true
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"duplicated tag name"
,
sToken
.
z
);
}
spd
.
hasVal
[
t
]
=
true
;
findColumnIndex
=
true
;
break
;
}
}
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
if
(
sToken
.
type
!=
TK_TAGS
&&
sToken
.
type
!=
TK_LP
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword TAGS expected"
,
sToken
.
z
);
}
if
(
!
findColumnIndex
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"invalid tag name"
,
sToken
.
z
);
}
// parse the bound tags column
if
(
sToken
.
type
==
TK_LP
)
{
/*
* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
*/
char
*
end
=
NULL
;
code
=
parseBoundColumns
(
pCmd
,
&
spd
,
pTagSchema
,
sql
,
&
end
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
if
(
spd
.
numOfAssignedCols
==
0
||
spd
.
numOfAssignedCols
>
numOfTags
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"tag name expected"
,
sToken
.
z
);
}
sql
=
end
;
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
index
=
0
;
// keywords of "TAGS"
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
}
else
{
sql
+=
index
;
}
if
(
sToken
.
type
!=
TK_TAGS
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword TAGS expected"
,
sToken
.
z
);
}
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
if
(
sToken
.
type
!=
TK_LP
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
NULL
,
sToken
.
z
);
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"( is expected"
,
sToken
.
z
);
}
SKVRowBuilder
kvRowBuilder
=
{
0
};
...
...
@@ -918,13 +866,11 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
uint32_t
ignoreTokenTypes
=
TK_LP
;
uint32_t
numOfIgnoreToken
=
1
;
for
(
int
i
=
0
;
i
<
spd
.
numOfAssignedCols
;
++
i
)
{
SSchema
*
pSchema
=
pTagSchema
+
spd
.
elems
[
i
].
colIndex
;
for
(
int
i
=
0
;
i
<
spd
.
numOfBound
;
++
i
)
{
SSchema
*
pSchema
=
&
pTagSchema
[
spd
.
boundedColumns
[
i
]];
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
true
,
numOfIgnoreToken
,
&
ignoreTokenTypes
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
true
);
sql
+=
index
;
if
(
TK_ILLEGAL
==
sToken
.
type
)
{
...
...
@@ -943,7 +889,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
}
char
tagVal
[
TSDB_MAX_TAGS_LEN
];
code
=
tsParseOneColumn
Data
(
pSchema
,
&
sToken
,
tagVal
,
pCmd
->
payload
,
&
sql
,
false
,
tinfo
.
precision
);
code
=
tsParseOneColumn
(
pSchema
,
&
sToken
,
tagVal
,
pCmd
->
payload
,
&
sql
,
false
,
tinfo
.
precision
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tdDestroyKVRowBuilder
(
&
kvRowBuilder
);
return
code
;
...
...
@@ -952,6 +898,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tdAddColToKVRow
(
&
kvRowBuilder
,
pSchema
->
colId
,
pSchema
->
type
,
tagVal
);
}
tscDestroyBoundColumnInfo
(
&
spd
);
SKVRow
row
=
tdGetKVRowFromBuilder
(
&
kvRowBuilder
);
tdDestroyKVRowBuilder
(
&
kvRowBuilder
);
if
(
row
==
NULL
)
{
...
...
@@ -974,7 +922,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
pCmd
->
tagData
.
data
=
pTag
;
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sql
+=
index
;
if
(
sToken
.
n
==
0
||
sToken
.
type
!=
TK_RP
)
{
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
") expected"
,
sToken
.
z
);
...
...
@@ -989,33 +937,21 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return
ret
;
}
createTable
=
true
;
code
=
tscGetTableMetaEx
(
pSql
,
pTableMetaInfo
,
true
);
if
(
TSDB_CODE_TSC_ACTION_IN_PROGRESS
==
code
)
{
return
code
;
}
}
else
{
if
(
cstart
!=
NULL
)
{
sql
=
cstart
;
}
else
{
sql
=
sToken
.
z
;
}
sql
=
sToken
.
z
;
code
=
tscGetTableMetaEx
(
pSql
,
pTableMetaInfo
,
false
);
if
(
pCmd
->
curSql
==
NULL
)
{
assert
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
);
}
}
int32_t
len
=
(
int32_t
)(
cend
-
cstart
+
1
);
if
(
cstart
!=
NULL
&&
createTable
==
true
)
{
/* move the column list to start position of the next accessed points */
memmove
(
sql
-
len
,
cstart
,
len
);
*
sqlstr
=
sql
-
len
;
}
else
{
*
sqlstr
=
sql
;
}
*
sqlstr
=
sql
;
if
(
*
sqlstr
==
NULL
)
{
code
=
TSDB_CODE_TSC_INVALID_SQL
;
...
...
@@ -1043,6 +979,76 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
parseBoundColumns
(
SSqlCmd
*
pCmd
,
SParsedDataColInfo
*
pColInfo
,
SSchema
*
pSchema
,
char
*
str
,
char
**
end
)
{
pColInfo
->
numOfBound
=
0
;
memset
(
pColInfo
->
boundedColumns
,
0
,
sizeof
(
int32_t
)
*
pColInfo
->
numOfCols
);
for
(
int32_t
i
=
0
;
i
<
pColInfo
->
numOfCols
;
++
i
)
{
pColInfo
->
cols
[
i
].
hasVal
=
false
;
}
int32_t
code
=
TSDB_CODE_SUCCESS
;
int32_t
index
=
0
;
SStrToken
sToken
=
tStrGetToken
(
str
,
&
index
,
false
);
str
+=
index
;
if
(
sToken
.
type
!=
TK_LP
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"( is expected"
,
sToken
.
z
);
goto
_clean
;
}
while
(
1
)
{
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
);
str
+=
index
;
if
(
TK_STRING
==
sToken
.
type
)
{
tscDequoteAndTrimToken
(
&
sToken
);
}
if
(
sToken
.
type
==
TK_RP
)
{
if
(
end
!=
NULL
)
{
// set the end position
*
end
=
str
;
}
break
;
}
bool
findColumnIndex
=
false
;
// todo speedup by using hash list
for
(
int32_t
t
=
0
;
t
<
pColInfo
->
numOfCols
;
++
t
)
{
if
(
strncmp
(
sToken
.
z
,
pSchema
[
t
].
name
,
sToken
.
n
)
==
0
&&
strlen
(
pSchema
[
t
].
name
)
==
sToken
.
n
)
{
if
(
pColInfo
->
cols
[
t
].
hasVal
==
true
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"duplicated column name"
,
sToken
.
z
);
goto
_clean
;
}
pColInfo
->
cols
[
t
].
hasVal
=
true
;
pColInfo
->
boundedColumns
[
pColInfo
->
numOfBound
]
=
t
;
pColInfo
->
numOfBound
+=
1
;
findColumnIndex
=
true
;
break
;
}
}
if
(
!
findColumnIndex
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"invalid column/tag name"
,
sToken
.
z
);
goto
_clean
;
}
}
memset
(
&
pColInfo
->
boundedColumns
[
pColInfo
->
numOfBound
],
0
,
sizeof
(
int32_t
)
*
(
pColInfo
->
numOfCols
-
pColInfo
->
numOfBound
));
return
TSDB_CODE_SUCCESS
;
_clean:
pCmd
->
curSql
=
NULL
;
pCmd
->
parseFinished
=
1
;
return
code
;
}
/**
* parse insert sql
* @param pSql
...
...
@@ -1083,7 +1089,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
while
(
1
)
{
int32_t
index
=
0
;
SStrToken
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
SStrToken
sToken
=
tStrGetToken
(
str
,
&
index
,
false
);
// no data in the sql string anymore.
if
(
sToken
.
n
==
0
)
{
...
...
@@ -1108,7 +1114,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
pCmd
->
curSql
=
sToken
.
z
;
char
buf
[
TSDB_TABLE_FNAME_LEN
];
char
buf
[
TSDB_TABLE_FNAME_LEN
];
SStrToken
sTblToken
;
sTblToken
.
z
=
buf
;
// Check if the table name available or not
...
...
@@ -1121,7 +1127,8 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto
_clean
;
}
if
((
code
=
tscCheckIfCreateTable
(
&
str
,
pSql
))
!=
TSDB_CODE_SUCCESS
)
{
char
*
bindedColumns
=
NULL
;
if
((
code
=
tscCheckIfCreateTable
(
&
str
,
pSql
,
&
bindedColumns
))
!=
TSDB_CODE_SUCCESS
)
{
/*
* After retrieving the table meta from server, the sql string will be parsed from the paused position.
* And during the getTableMetaCallback function, the sql string will be parsed from the paused position.
...
...
@@ -1129,7 +1136,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
if
(
TSDB_CODE_TSC_ACTION_IN_PROGRESS
==
code
)
{
return
code
;
}
tscError
(
"%p async insert parse error, code:%s"
,
pSql
,
tstrerror
(
code
));
pCmd
->
curSql
=
NULL
;
goto
_clean
;
...
...
@@ -1141,41 +1148,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
str
,
&
index
,
false
);
str
+=
index
;
if
(
sToken
.
n
==
0
)
{
if
(
sToken
.
n
==
0
||
(
sToken
.
type
!=
TK_FILE
&&
sToken
.
type
!=
TK_VALUES
)
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword VALUES or FILE required"
,
sToken
.
z
);
goto
_clean
;
}
STableComInfo
tinfo
=
tscGetTableInfo
(
pTableMetaInfo
->
pTableMeta
);
if
(
sToken
.
type
==
TK_VALUES
)
{
SParsedDataColInfo
spd
=
{.
numOfCols
=
tinfo
.
numOfColumns
};
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMetaInfo
->
pTableMeta
);
tscSetAssignedColumnInfo
(
&
spd
,
pSchema
,
tinfo
.
numOfColumns
);
if
(
validateDataSource
(
pCmd
,
DATA_FROM_SQL_STRING
,
sToken
.
z
)
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
/*
* app here insert data in different vnodes, so we need to set the following
* data in another submit procedure using async insert routines
*/
code
=
doParseInsertStatement
(
pCmd
,
&
str
,
&
spd
,
&
totalNum
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
}
else
if
(
sToken
.
type
==
TK_FILE
)
{
STableComInfo
tinfo
=
tscGetTableInfo
(
pTableMetaInfo
->
pTableMeta
);
if
(
sToken
.
type
==
TK_FILE
)
{
if
(
validateDataSource
(
pCmd
,
DATA_FROM_DATA_FILE
,
sToken
.
z
)
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
str
,
&
index
,
false
);
if
(
sToken
.
type
!=
TK_STRING
&&
sToken
.
type
!=
TK_ID
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"file path is required following keyword FILE"
,
sToken
.
z
);
goto
_clean
;
...
...
@@ -1199,83 +1187,63 @@ int tsParseInsertSql(SSqlObj *pSql) {
tstrncpy
(
pCmd
->
payload
,
full_path
.
we_wordv
[
0
],
pCmd
->
allocSize
);
wordfree
(
&
full_path
);
}
else
if
(
sToken
.
type
==
TK_LP
)
{
/* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */
STableMeta
*
pTableMeta
=
tscGetTableMetaInfoFromCmd
(
pCmd
,
pCmd
->
clauseIndex
,
0
)
->
pTableMeta
;
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
if
(
validateDataSource
(
pCmd
,
DATA_FROM_SQL_STRING
,
sToken
.
z
)
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
SParsedDataColInfo
spd
=
{
0
};
spd
.
numOfCols
=
tinfo
.
numOfColumns
;
int16_t
offset
[
TSDB_MAX_COLUMNS
]
=
{
0
};
for
(
int32_t
t
=
1
;
t
<
tinfo
.
numOfColumns
;
++
t
)
{
offset
[
t
]
=
offset
[
t
-
1
]
+
pSchema
[
t
-
1
].
bytes
;
}
while
(
1
)
{
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
str
+=
index
;
}
else
{
if
(
bindedColumns
==
NULL
)
{
STableMeta
*
pTableMeta
=
pTableMetaInfo
->
pTableMeta
;
if
(
TK_STRING
==
sToken
.
type
)
{
tscDequoteAndTrimToken
(
&
sToken
)
;
if
(
validateDataSource
(
pCmd
,
DATA_FROM_SQL_STRING
,
sToken
.
z
)
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
if
(
sToken
.
type
==
TK_RP
)
{
break
;
STableDataBlocks
*
dataBuf
=
NULL
;
int32_t
ret
=
tscGetDataBlockFromList
(
pCmd
->
pTableBlockHashList
,
pTableMeta
->
id
.
uid
,
TSDB_DEFAULT_PAYLOAD_SIZE
,
sizeof
(
SSubmitBlk
),
tinfo
.
rowSize
,
&
pTableMetaInfo
->
name
,
pTableMeta
,
&
dataBuf
,
NULL
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
bool
findColumnIndex
=
false
;
// todo speedup by using hash list
for
(
int32_t
t
=
0
;
t
<
tinfo
.
numOfColumns
;
++
t
)
{
if
(
strncmp
(
sToken
.
z
,
pSchema
[
t
].
name
,
sToken
.
n
)
==
0
&&
strlen
(
pSchema
[
t
].
name
)
==
sToken
.
n
)
{
SParsedColElem
*
pElem
=
&
spd
.
elems
[
spd
.
numOfAssignedCols
++
];
pElem
->
offset
=
offset
[
t
];
pElem
->
colIndex
=
t
;
if
(
spd
.
hasVal
[
t
]
==
true
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"duplicated column name"
,
sToken
.
z
);
goto
_clean
;
}
code
=
doParseInsertStatement
(
pCmd
,
&
str
,
dataBuf
,
&
totalNum
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
}
else
{
// bindedColumns != NULL
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
STableMeta
*
pTableMeta
=
tscGetTableMetaInfoFromCmd
(
pCmd
,
pCmd
->
clauseIndex
,
0
)
->
pTableMeta
;
spd
.
hasVal
[
t
]
=
true
;
findColumnIndex
=
true
;
break
;
}
if
(
validateDataSource
(
pCmd
,
DATA_FROM_SQL_STRING
,
sToken
.
z
)
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
if
(
!
findColumnIndex
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"invalid column name"
,
sToken
.
z
);
STableDataBlocks
*
dataBuf
=
NULL
;
int32_t
ret
=
tscGetDataBlockFromList
(
pCmd
->
pTableBlockHashList
,
pTableMeta
->
id
.
uid
,
TSDB_DEFAULT_PAYLOAD_SIZE
,
sizeof
(
SSubmitBlk
),
tinfo
.
rowSize
,
&
pTableMetaInfo
->
name
,
pTableMeta
,
&
dataBuf
,
NULL
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
}
if
(
spd
.
numOfAssignedCols
==
0
||
spd
.
numOfAssignedCols
>
tinfo
.
numOfColumns
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"column name expected"
,
sToken
.
z
);
goto
_clean
;
}
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
code
=
parseBoundColumns
(
pCmd
,
&
dataBuf
->
boundColumnInfo
,
pSchema
,
bindedColumns
,
NULL
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
str
+=
index
;
if
(
dataBuf
->
boundColumnInfo
.
cols
[
0
].
hasVal
==
false
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"primary timestamp column can not be null"
,
NULL
);
goto
_clean
;
}
if
(
sToken
.
type
!=
TK_VALUES
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword VALUES is expected"
,
sToken
.
z
);
goto
_clean
;
}
if
(
sToken
.
type
!=
TK_VALUES
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword VALUES is expected"
,
sToken
.
z
);
goto
_clean
;
}
code
=
doParseInsertStatement
(
pCmd
,
&
str
,
&
spd
,
&
totalNum
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
code
=
doParseInsertStatement
(
pCmd
,
&
str
,
dataBuf
,
&
totalNum
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
goto
_clean
;
}
}
}
else
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword VALUES or FILE are required"
,
sToken
.
z
);
goto
_clean
;
}
}
...
...
@@ -1294,7 +1262,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto
_clean
;
_clean:
pCmd
->
curSql
=
NULL
;
pCmd
->
curSql
=
NULL
;
pCmd
->
parseFinished
=
1
;
return
code
;
}
...
...
@@ -1307,7 +1275,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
int32_t
index
=
0
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SStrToken
sToken
=
tStrGetToken
(
pSql
->
sqlstr
,
&
index
,
false
,
0
,
NULL
);
SStrToken
sToken
=
tStrGetToken
(
pSql
->
sqlstr
,
&
index
,
false
);
assert
(
sToken
.
type
==
TK_INSERT
||
sToken
.
type
==
TK_IMPORT
);
pCmd
->
count
=
0
;
...
...
@@ -1317,7 +1285,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
TSDB_QUERY_SET_TYPE
(
pQueryInfo
->
type
,
TSDB_QUERY_TYPE_INSERT
|
pCmd
->
insertType
);
sToken
=
tStrGetToken
(
pSql
->
sqlstr
,
&
index
,
false
,
0
,
NULL
);
sToken
=
tStrGetToken
(
pSql
->
sqlstr
,
&
index
,
false
);
if
(
sToken
.
type
!=
TK_INTO
)
{
return
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"keyword INTO is expected"
,
sToken
.
z
);
}
...
...
@@ -1450,13 +1418,10 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMetaInfo
*
pTableMetaInfo
=
tscGetTableMetaInfoFromCmd
(
pCmd
,
pCmd
->
clauseIndex
,
0
);
STableMeta
*
pTableMeta
=
pTableMetaInfo
->
pTableMeta
;
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
STableComInfo
tinfo
=
tscGetTableInfo
(
pTableMeta
);
SParsedDataColInfo
spd
=
{.
numOfCols
=
tinfo
.
numOfColumns
};
tscSetAssignedColumnInfo
(
&
spd
,
pSchema
,
tinfo
.
numOfColumns
);
destroyTableNameList
(
pCmd
);
tfree
(
pCmd
->
pTableNameList
);
pCmd
->
pDataBlocks
=
tscDestroyBlockArrayList
(
pCmd
->
pDataBlocks
);
if
(
pCmd
->
pTableBlockHashList
==
NULL
)
{
...
...
@@ -1495,8 +1460,9 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
char
*
lineptr
=
line
;
strtolower
(
line
,
line
);
int32_t
len
=
tsParseOneRowData
(
&
lineptr
,
pTableDataBlock
,
pSchema
,
&
spd
,
pCmd
,
tinfo
.
precision
,
&
code
,
tokenBuf
);
if
(
len
<=
0
||
pTableDataBlock
->
numOfParams
>
0
)
{
int32_t
len
=
0
;
code
=
tsParseOneRow
(
&
lineptr
,
pTableDataBlock
,
pCmd
,
tinfo
.
precision
,
&
len
,
tokenBuf
);
if
(
code
!=
TSDB_CODE_SUCCESS
||
pTableDataBlock
->
numOfParams
>
0
)
{
pSql
->
res
.
code
=
code
;
break
;
}
...
...
src/client/src/tscUtil.c
浏览文件 @
64c06c95
...
...
@@ -271,6 +271,41 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
return
false
;
}
bool
tscIsTopbotQuery
(
SQueryInfo
*
pQueryInfo
)
{
size_t
numOfExprs
=
tscSqlExprNumOfExprs
(
pQueryInfo
);
for
(
int32_t
i
=
0
;
i
<
numOfExprs
;
++
i
)
{
SSqlExpr
*
pExpr
=
tscSqlExprGet
(
pQueryInfo
,
i
);
if
(
pExpr
==
NULL
)
{
continue
;
}
int32_t
functionId
=
pExpr
->
functionId
;
if
(
functionId
==
TSDB_FUNC_TOP
||
functionId
==
TSDB_FUNC_BOTTOM
)
{
return
true
;
}
}
return
false
;
}
int32_t
tscGetTopbotQueryParam
(
SQueryInfo
*
pQueryInfo
)
{
size_t
numOfExprs
=
tscSqlExprNumOfExprs
(
pQueryInfo
);
for
(
int32_t
i
=
0
;
i
<
numOfExprs
;
++
i
)
{
SSqlExpr
*
pExpr
=
tscSqlExprGet
(
pQueryInfo
,
i
);
if
(
pExpr
==
NULL
)
{
continue
;
}
int32_t
functionId
=
pExpr
->
functionId
;
if
(
functionId
==
TSDB_FUNC_TOP
||
functionId
==
TSDB_FUNC_BOTTOM
)
{
return
(
int32_t
)
pExpr
->
param
[
0
].
i64
;
}
}
return
0
;
}
void
tscClearInterpInfo
(
SQueryInfo
*
pQueryInfo
)
{
if
(
!
tscIsPointInterpQuery
(
pQueryInfo
))
{
return
;
...
...
@@ -415,6 +450,20 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
tfree
(
pCmd
->
pQueryInfo
);
}
void
destroyTableNameList
(
SSqlCmd
*
pCmd
)
{
if
(
pCmd
->
numOfTables
==
0
)
{
assert
(
pCmd
->
pTableNameList
==
NULL
);
return
;
}
for
(
int32_t
i
=
0
;
i
<
pCmd
->
numOfTables
;
++
i
)
{
tfree
(
pCmd
->
pTableNameList
[
i
]);
}
pCmd
->
numOfTables
=
0
;
tfree
(
pCmd
->
pTableNameList
);
}
void
tscResetSqlCmd
(
SSqlCmd
*
pCmd
,
bool
removeMeta
)
{
pCmd
->
command
=
0
;
pCmd
->
numOfCols
=
0
;
...
...
@@ -424,14 +473,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd
->
parseFinished
=
0
;
pCmd
->
autoCreated
=
0
;
for
(
int32_t
i
=
0
;
i
<
pCmd
->
numOfTables
;
++
i
)
{
if
(
pCmd
->
pTableNameList
&&
pCmd
->
pTableNameList
[
i
])
{
tfree
(
pCmd
->
pTableNameList
[
i
]);
}
}
pCmd
->
numOfTables
=
0
;
tfree
(
pCmd
->
pTableNameList
);
destroyTableNameList
(
pCmd
);
pCmd
->
pTableBlockHashList
=
tscDestroyBlockHashTable
(
pCmd
->
pTableBlockHashList
,
removeMeta
);
pCmd
->
pDataBlocks
=
tscDestroyBlockArrayList
(
pCmd
->
pDataBlocks
);
...
...
@@ -548,6 +590,11 @@ void tscFreeSqlObj(SSqlObj* pSql) {
free
(
pSql
);
}
void
tscDestroyBoundColumnInfo
(
SParsedDataColInfo
*
pColInfo
)
{
tfree
(
pColInfo
->
boundedColumns
);
tfree
(
pColInfo
->
cols
);
}
void
tscDestroyDataBlock
(
STableDataBlocks
*
pDataBlock
,
bool
removeMeta
)
{
if
(
pDataBlock
==
NULL
)
{
return
;
...
...
@@ -568,6 +615,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
taosHashRemove
(
tscTableMetaInfo
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
}
tscDestroyBoundColumnInfo
(
&
pDataBlock
->
boundColumnInfo
);
tfree
(
pDataBlock
);
}
...
...
@@ -678,7 +726,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
int32_t
tscCreateDataBlock
(
size_t
initial
Size
,
int32_t
rowSize
,
int32_t
startOffset
,
SName
*
name
,
int32_t
tscCreateDataBlock
(
size_t
default
Size
,
int32_t
rowSize
,
int32_t
startOffset
,
SName
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
)
{
STableDataBlocks
*
dataBuf
=
(
STableDataBlocks
*
)
calloc
(
1
,
sizeof
(
STableDataBlocks
));
if
(
dataBuf
==
NULL
)
{
...
...
@@ -686,10 +734,12 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
dataBuf
->
nAllocSize
=
(
uint32_t
)
initialSize
;
dataBuf
->
headerSize
=
startOffset
;
// the header size will always be the startOffset value, reserved for the subumit block header
dataBuf
->
nAllocSize
=
(
uint32_t
)
defaultSize
;
dataBuf
->
headerSize
=
startOffset
;
// the header size will always be the startOffset value, reserved for the subumit block header
if
(
dataBuf
->
nAllocSize
<=
dataBuf
->
headerSize
)
{
dataBuf
->
nAllocSize
=
dataBuf
->
headerSize
*
2
;
dataBuf
->
nAllocSize
=
dataBuf
->
headerSize
*
2
;
}
dataBuf
->
pData
=
calloc
(
1
,
dataBuf
->
nAllocSize
);
...
...
@@ -699,25 +749,31 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
dataBuf
->
ordered
=
true
;
dataBuf
->
prevTS
=
INT64_MIN
;
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf
->
pTableMeta
=
tscTableMetaDup
(
pTableMeta
);
SParsedDataColInfo
*
pColInfo
=
&
dataBuf
->
boundColumnInfo
;
SSchema
*
pSchema
=
tscGetTableSchema
(
dataBuf
->
pTableMeta
);
tscSetBoundColumnInfo
(
pColInfo
,
pSchema
,
dataBuf
->
pTableMeta
->
tableInfo
.
numOfColumns
);
dataBuf
->
rowSize
=
rowSize
;
dataBuf
->
size
=
startOffset
;
dataBuf
->
ordered
=
true
;
dataBuf
->
prevTS
=
INT64_MIN
;
dataBuf
->
rowSize
=
rowSize
;
dataBuf
->
size
=
startOffset
;
dataBuf
->
tsSource
=
-
1
;
dataBuf
->
vgId
=
dataBuf
->
pTableMeta
->
vgId
;
tNameAssign
(
&
dataBuf
->
tableName
,
name
);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf
->
pTableMeta
=
tscTableMetaDup
(
pTableMeta
);
assert
(
initialSize
>
0
&&
pTableMeta
!=
NULL
&&
dataBuf
->
pTableMeta
!=
NULL
);
assert
(
defaultSize
>
0
&&
pTableMeta
!=
NULL
&&
dataBuf
->
pTableMeta
!=
NULL
);
*
dataBlocks
=
dataBuf
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
tscGetDataBlockFromList
(
SHashObj
*
pHashList
,
int64_t
id
,
int32_t
size
,
int32_t
startOffset
,
int32_t
rowSize
,
SName
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
,
SArray
*
pBlockList
)
{
SName
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
,
SArray
*
pBlockList
)
{
*
dataBlocks
=
NULL
;
STableDataBlocks
**
t1
=
(
STableDataBlocks
**
)
taosHashGet
(
pHashList
,
(
const
char
*
)
&
id
,
sizeof
(
id
));
if
(
t1
!=
NULL
)
{
...
...
@@ -826,6 +882,8 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
int32_t
i
=
0
;
while
(
p1
)
{
STableDataBlocks
*
pBlocks
=
*
p1
;
tfree
(
pCmd
->
pTableNameList
[
i
]);
pCmd
->
pTableNameList
[
i
++
]
=
tNameDup
(
&
pBlocks
->
tableName
);
p1
=
taosHashIterate
(
pCmd
->
pTableBlockHashList
,
p1
);
}
...
...
@@ -942,7 +1000,7 @@ bool tscIsInsertData(char* sqlstr) {
int32_t
index
=
0
;
do
{
SStrToken
t0
=
tStrGetToken
(
sqlstr
,
&
index
,
false
,
0
,
NULL
);
SStrToken
t0
=
tStrGetToken
(
sqlstr
,
&
index
,
false
);
if
(
t0
.
type
!=
TK_LP
)
{
return
t0
.
type
==
TK_INSERT
||
t0
.
type
==
TK_IMPORT
;
}
...
...
src/cq/src/cqMain.c
浏览文件 @
64c06c95
...
...
@@ -294,7 +294,7 @@ void cqStop(void *handle) {
pthread_mutex_unlock
(
&
pContext
->
mutex
);
}
void
*
cqCreate
(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
)
{
void
*
cqCreate
(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
,
int
start
)
{
if
(
tsEnableStream
==
0
)
{
return
NULL
;
}
...
...
@@ -326,7 +326,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch
pObj
->
rid
=
taosAddRef
(
cqObjRef
,
pObj
);
cqCreateStream
(
pContext
,
pObj
);
if
(
start
&&
pContext
->
master
)
{
cqCreateStream
(
pContext
,
pObj
);
}
else
{
pObj
->
pContext
=
pContext
;
}
rid
=
pObj
->
rid
;
...
...
src/cq/test/cqtest.c
浏览文件 @
64c06c95
...
...
@@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
tdDestroyTSchemaBuilder
(
&
schemaBuilder
);
for
(
int
sid
=
1
;
sid
<
10
;
++
sid
)
{
cqCreate
(
pCq
,
sid
,
sid
,
NULL
,
"select avg(speed) from demo.t1 sliding(1s) interval(5s)"
,
pSchema
);
cqCreate
(
pCq
,
sid
,
sid
,
NULL
,
"select avg(speed) from demo.t1 sliding(1s) interval(5s)"
,
pSchema
,
1
);
}
tdFreeSchema
(
pSchema
);
...
...
src/dnode/src/dnodeVWrite.c
浏览文件 @
64c06c95
...
...
@@ -222,7 +222,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
dnodeSendRpcVWriteRsp
(
pVnode
,
pWrite
,
pWrite
->
code
);
}
else
{
if
(
qtype
==
TAOS_QTYPE_FWD
)
{
vnodeConfirmForward
(
pVnode
,
pWrite
->
pHead
.
version
,
0
,
pWrite
->
pHead
.
msgType
!=
TSDB_MSG_TYPE_SUBMIT
);
vnodeConfirmForward
(
pVnode
,
pWrite
->
pHead
.
version
,
pWrite
->
code
,
pWrite
->
pHead
.
msgType
!=
TSDB_MSG_TYPE_SUBMIT
);
}
if
(
pWrite
->
rspRet
.
rsp
)
{
rpcFreeCont
(
pWrite
->
rspRet
.
rsp
);
...
...
src/inc/tcq.h
浏览文件 @
64c06c95
...
...
@@ -42,7 +42,7 @@ void cqStart(void *handle);
void
cqStop
(
void
*
handle
);
// cqCreate is called by TSDB to start an instance of CQ
void
*
cqCreate
(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
);
void
*
cqCreate
(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
,
int
start
);
// cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate
void
cqDrop
(
void
*
handle
);
...
...
src/inc/tsdb.h
浏览文件 @
64c06c95
...
...
@@ -51,7 +51,7 @@ typedef struct {
void
*
cqH
;
int
(
*
notifyStatus
)(
void
*
,
int
status
,
int
eno
);
int
(
*
eventCallBack
)(
void
*
);
void
*
(
*
cqCreateFunc
)(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
);
void
*
(
*
cqCreateFunc
)(
void
*
handle
,
uint64_t
uid
,
int32_t
sid
,
const
char
*
dstTable
,
char
*
sqlStr
,
STSchema
*
pSchema
,
int
start
);
void
(
*
cqDropFunc
)(
void
*
handle
);
}
STsdbAppH
;
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
64c06c95
...
...
@@ -211,8 +211,8 @@ typedef struct SArguments_S {
int
num_of_tables
;
int
num_of_DPT
;
int
abort
;
int
disorderRatio
;
int
disorderRange
;
int
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms or us by database precision
int
method_of_delete
;
char
**
arg_list
;
int64_t
totalInsertRows
;
...
...
@@ -229,25 +229,25 @@ typedef struct SColumn_S {
typedef
struct
SSuperTable_S
{
char
sTblName
[
MAX_TB_NAME_SIZE
+
1
];
int
childTblCount
;
bool
childTblExists
;
// 0: no, 1: yes
int
batchCreateTableNum
;
// 0: no batch, > 0: batch table number in one sql
int8_t
autoCreateTable
;
// 0: create sub table, 1: auto create sub table
bool
childTblExists
;
// 0: no, 1: yes
int
batchCreateTableNum
;
// 0: no batch, > 0: batch table number in one sql
int8_t
autoCreateTable
;
// 0: create sub table, 1: auto create sub table
char
childTblPrefix
[
MAX_TB_NAME_SIZE
];
char
dataSource
[
MAX_TB_NAME_SIZE
+
1
];
// rand_gen or sample
char
insertMode
[
MAX_TB_NAME_SIZE
];
// taosc, restful
char
insertMode
[
MAX_TB_NAME_SIZE
];
// taosc, restful
int
childTblLimit
;
int
childTblOffset
;
int
multiThreadWriteOneTbl
;
// 0: no, 1: yes
int
interlaceRows
;
//
int
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms or us by database precision
int
maxSqlLen
;
//
int
multiThreadWriteOneTbl
;
// 0: no, 1: yes
int
interlaceRows
;
//
int
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms or us by database precision
int
maxSqlLen
;
//
int
insertInterval
;
// insert interval, will override global insert interval
int64_t
insertRows
;
// 0: no limit
int64_t
insertRows
;
// 0: no limit
int
timeStampStep
;
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
//
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
char
sampleFormat
[
MAX_TB_NAME_SIZE
];
// csv, json
char
sampleFile
[
MAX_FILE_NAME_LEN
+
1
];
char
tagsFile
[
MAX_FILE_NAME_LEN
+
1
];
...
...
@@ -263,7 +263,6 @@ typedef struct SSuperTable_S {
int
lenOfTagOfOneRow
;
char
*
sampleDataBuf
;
int
sampleDataBufSize
;
//int sampleRowCount;
//int sampleUsePos;
...
...
@@ -488,7 +487,7 @@ static int taosRandom()
return
number
;
}
#else
#else
// Not windows
static
void
setupForAnsiEscape
(
void
)
{}
static
void
resetAfterAnsiEscape
(
void
)
{
...
...
@@ -500,11 +499,15 @@ static void resetAfterAnsiEscape(void) {
static
int
taosRandom
()
{
srand
(
time
(
NULL
));
struct
timeval
tv
;
gettimeofday
(
&
tv
,
NULL
);
srand
(
tv
.
tv_usec
);
return
rand
();
}
#endif
#endif
// ifdef Windows
static
int
createDatabasesAndStables
();
static
void
createChildTables
();
...
...
@@ -677,7 +680,7 @@ static void printHelp() {
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-x"
,
indent
,
"Not insert only flag."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-y"
,
indent
,
"Default input yes for prompt."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-O"
,
indent
,
"Insert mode--0: In order,
>
0: disorder ratio. Default is in order."
);
"Insert mode--0: In order,
1 ~ 5
0: disorder ratio. Default is in order."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-R"
,
indent
,
"Out of order data's range, ms, default is 1000."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-g"
,
indent
,
...
...
@@ -691,19 +694,12 @@ static void printHelp() {
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
char
**
sptr
;
wordexp_t
full_path
;
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
strcmp
(
argv
[
i
],
"-f"
)
==
0
)
{
arguments
->
metaFile
=
argv
[
++
i
];
}
else
if
(
strcmp
(
argv
[
i
],
"-c"
)
==
0
)
{
char
*
configPath
=
argv
[
++
i
];
if
(
wordexp
(
configPath
,
&
full_path
,
0
)
!=
0
)
{
errorPrint
(
"Invalid path %s
\n
"
,
configPath
);
return
;
}
taos_options
(
TSDB_OPTION_CONFIGDIR
,
full_path
.
we_wordv
[
0
]);
wordfree
(
&
full_path
);
tstrncpy
(
configDir
,
argv
[
++
i
],
MAX_FILE_NAME_LEN
);
}
else
if
(
strcmp
(
argv
[
i
],
"-h"
)
==
0
)
{
arguments
->
host
=
argv
[
++
i
];
...
...
@@ -798,23 +794,22 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments
->
verbose_print
=
true
;
}
else
if
(
strcmp
(
argv
[
i
],
"-pp"
)
==
0
)
{
arguments
->
performance_print
=
true
;
}
else
if
(
strcmp
(
argv
[
i
],
"-c"
)
==
0
)
{
strcpy
(
configDir
,
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-O"
)
==
0
)
{
arguments
->
disorderRatio
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
disorderRatio
>
1
||
arguments
->
disorderRatio
<
0
)
{
if
(
arguments
->
disorderRatio
>
50
)
arguments
->
disorderRatio
=
50
;
if
(
arguments
->
disorderRatio
<
0
)
arguments
->
disorderRatio
=
0
;
}
else
if
(
arguments
->
disorderRatio
==
1
)
{
arguments
->
disorderRange
=
10
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-R"
)
==
0
)
{
arguments
->
disorderRange
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
disorderRange
==
1
&&
(
arguments
->
disorderRange
>
50
||
arguments
->
disorderRange
<=
0
))
{
arguments
->
disorderRange
=
10
;
}
if
(
arguments
->
disorderRange
<
0
)
arguments
->
disorderRange
=
1000
;
}
else
if
(
strcmp
(
argv
[
i
],
"-a"
)
==
0
)
{
arguments
->
replica
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
replica
>
3
||
arguments
->
replica
<
1
)
{
...
...
@@ -998,8 +993,9 @@ static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName)
taos_free_result
(
res
);
}
static
double
getCurrentTime
()
{
static
double
getCurrentTime
Us
()
{
struct
timeval
tv
;
if
(
gettimeofday
(
&
tv
,
NULL
)
!=
0
)
{
perror
(
"Failed to get current time in ms"
);
return
0
.
0
;
...
...
@@ -1107,6 +1103,7 @@ static int printfInsertMeta() {
printf
(
"host:
\033
[33m%s:%u
\033
[0m
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
printf
(
"user:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
user
);
printf
(
"password:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
password
);
printf
(
"configDir:
\033
[33m%s
\033
[0m
\n
"
,
configDir
);
printf
(
"resultFile:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
resultFile
);
printf
(
"thread num of insert data:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCount
);
printf
(
"thread num of create table:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
...
...
@@ -1292,6 +1289,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf
(
fp
,
"host: %s:%u
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
fprintf
(
fp
,
"user: %s
\n
"
,
g_Dbs
.
user
);
fprintf
(
fp
,
"configDir: %s
\n
"
,
configDir
);
fprintf
(
fp
,
"resultFile: %s
\n
"
,
g_Dbs
.
resultFile
);
fprintf
(
fp
,
"thread num of insert data: %d
\n
"
,
g_Dbs
.
threadCount
);
fprintf
(
fp
,
"thread num of create table: %d
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
...
...
@@ -2279,7 +2277,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
}
static
int
createSuperTable
(
TAOS
*
taos
,
char
*
dbName
,
SSuperTable
*
superTbl
s
,
bool
use_metric
)
{
SSuperTable
*
superTbl
)
{
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
char
cols
[
STRING_LEN
]
=
"
\0
"
;
...
...
@@ -2287,19 +2285,26 @@ static int createSuperTable(TAOS * taos, char* dbName,
int
len
=
0
;
int
lenOfOneRow
=
0
;
for
(
colIndex
=
0
;
colIndex
<
superTbls
->
columnCount
;
colIndex
++
)
{
char
*
dataType
=
superTbls
->
columns
[
colIndex
].
dataType
;
if
(
superTbl
->
columnCount
==
0
)
{
errorPrint
(
"%s() LN%d, super table column count is %d
\n
"
,
__func__
,
__LINE__
,
superTbl
->
columnCount
);
return
-
1
;
}
for
(
colIndex
=
0
;
colIndex
<
superTbl
->
columnCount
;
colIndex
++
)
{
char
*
dataType
=
superTbl
->
columns
[
colIndex
].
dataType
;
if
(
strcasecmp
(
dataType
,
"BINARY"
)
==
0
)
{
len
+=
snprintf
(
cols
+
len
,
STRING_LEN
-
len
,
", col%d %s(%d)"
,
colIndex
,
"BINARY"
,
superTbl
s
->
columns
[
colIndex
].
dataLen
);
lenOfOneRow
+=
superTbl
s
->
columns
[
colIndex
].
dataLen
+
3
;
superTbl
->
columns
[
colIndex
].
dataLen
);
lenOfOneRow
+=
superTbl
->
columns
[
colIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"NCHAR"
)
==
0
)
{
len
+=
snprintf
(
cols
+
len
,
STRING_LEN
-
len
,
", col%d %s(%d)"
,
colIndex
,
"NCHAR"
,
superTbl
s
->
columns
[
colIndex
].
dataLen
);
lenOfOneRow
+=
superTbl
s
->
columns
[
colIndex
].
dataLen
+
3
;
superTbl
->
columns
[
colIndex
].
dataLen
);
lenOfOneRow
+=
superTbl
->
columns
[
colIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"INT"
)
==
0
)
{
len
+=
snprintf
(
cols
+
len
,
STRING_LEN
-
len
,
", col%d %s"
,
colIndex
,
"INT"
);
lenOfOneRow
+=
11
;
...
...
@@ -2331,88 +2336,95 @@ static int createSuperTable(TAOS * taos, char* dbName,
}
}
superTbl
s
->
lenOfOneRow
=
lenOfOneRow
+
20
;
// timestamp
//printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbl
s[j].sTblName, g_Dbs.db[i].superTbls
[j].columnCount, lenOfOneRow);
superTbl
->
lenOfOneRow
=
lenOfOneRow
+
20
;
// timestamp
//printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbl
[j].sTblName, g_Dbs.db[i].superTbl
[j].columnCount, lenOfOneRow);
// save for creating child table
superTbls
->
colsOfCreateChildTable
=
(
char
*
)
calloc
(
len
+
20
,
1
);
if
(
NULL
==
superTbls
->
colsOfCreateChildTable
)
{
printf
(
"Failed when calloc, size:%d"
,
len
+
1
);
superTbl
->
colsOfCreateChildTable
=
(
char
*
)
calloc
(
len
+
20
,
1
);
if
(
NULL
==
superTbl
->
colsOfCreateChildTable
)
{
errorPrint
(
"%s() LN%d, Failed when calloc, size:%d"
,
__func__
,
__LINE__
,
len
+
1
);
taos_close
(
taos
);
exit
(
-
1
);
}
snprintf
(
superTbls
->
colsOfCreateChildTable
,
len
+
20
,
"(ts timestamp%s)"
,
cols
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
superTbls
->
colsOfCreateChildTable
);
if
(
use_metric
)
{
char
tags
[
STRING_LEN
]
=
"
\0
"
;
int
tagIndex
;
len
=
0
;
snprintf
(
superTbl
->
colsOfCreateChildTable
,
len
+
20
,
"(ts timestamp%s)"
,
cols
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
superTbl
->
colsOfCreateChildTable
);
int
lenOfTagOfOneRow
=
0
;
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"("
);
for
(
tagIndex
=
0
;
tagIndex
<
superTbls
->
tagCount
;
tagIndex
++
)
{
char
*
dataType
=
superTbls
->
tags
[
tagIndex
].
dataType
;
if
(
strcasecmp
(
dataType
,
"BINARY"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s(%d), "
,
tagIndex
,
"BINARY"
,
superTbls
->
tags
[
tagIndex
].
dataLen
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"NCHAR"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s(%d), "
,
tagIndex
,
"NCHAR"
,
superTbls
->
tags
[
tagIndex
].
dataLen
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"INT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"INT"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
11
;
}
else
if
(
strcasecmp
(
dataType
,
"BIGINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"BIGINT"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
21
;
}
else
if
(
strcasecmp
(
dataType
,
"SMALLINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"SMALLINT"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
6
;
}
else
if
(
strcasecmp
(
dataType
,
"TINYINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"TINYINT"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
4
;
}
else
if
(
strcasecmp
(
dataType
,
"BOOL"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"BOOL"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
6
;
}
else
if
(
strcasecmp
(
dataType
,
"FLOAT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"FLOAT"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
22
;
}
else
if
(
strcasecmp
(
dataType
,
"DOUBLE"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"DOUBLE"
);
lenOfTagOfOneRow
+=
superTbls
->
tags
[
tagIndex
].
dataLen
+
42
;
}
else
{
taos_close
(
taos
);
printf
(
"config error tag type : %s
\n
"
,
dataType
);
exit
(
-
1
);
}
}
len
-=
2
;
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
")"
);
if
(
superTbl
->
tagCount
==
0
)
{
errorPrint
(
"%s() LN%d, super table tag count is %d
\n
"
,
__func__
,
__LINE__
,
superTbl
->
tagCount
);
return
-
1
;
}
superTbls
->
lenOfTagOfOneRow
=
lenOfTagOfOneRow
;
char
tags
[
STRING_LEN
]
=
"
\0
"
;
int
tagIndex
;
len
=
0
;
snprintf
(
command
,
BUFFER_SIZE
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
dbName
,
superTbls
->
sTblName
,
cols
,
tags
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
command
)
;
int
lenOfTagOfOneRow
=
0
;
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"("
);
for
(
tagIndex
=
0
;
tagIndex
<
superTbl
->
tagCount
;
tagIndex
++
)
{
char
*
dataType
=
superTbl
->
tags
[
tagIndex
].
dataType
;
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"create supertable %s failed!
\n\n
"
,
superTbls
->
sTblName
);
return
-
1
;
if
(
strcasecmp
(
dataType
,
"BINARY"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s(%d), "
,
tagIndex
,
"BINARY"
,
superTbl
->
tags
[
tagIndex
].
dataLen
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"NCHAR"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s(%d), "
,
tagIndex
,
"NCHAR"
,
superTbl
->
tags
[
tagIndex
].
dataLen
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
3
;
}
else
if
(
strcasecmp
(
dataType
,
"INT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"INT"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
11
;
}
else
if
(
strcasecmp
(
dataType
,
"BIGINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"BIGINT"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
21
;
}
else
if
(
strcasecmp
(
dataType
,
"SMALLINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"SMALLINT"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
6
;
}
else
if
(
strcasecmp
(
dataType
,
"TINYINT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"TINYINT"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
4
;
}
else
if
(
strcasecmp
(
dataType
,
"BOOL"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"BOOL"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
6
;
}
else
if
(
strcasecmp
(
dataType
,
"FLOAT"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"FLOAT"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
22
;
}
else
if
(
strcasecmp
(
dataType
,
"DOUBLE"
)
==
0
)
{
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
"t%d %s, "
,
tagIndex
,
"DOUBLE"
);
lenOfTagOfOneRow
+=
superTbl
->
tags
[
tagIndex
].
dataLen
+
42
;
}
else
{
taos_close
(
taos
);
printf
(
"config error tag type : %s
\n
"
,
dataType
);
exit
(
-
1
);
}
debugPrint
(
"create supertable %s success!
\n\n
"
,
superTbls
->
sTblName
);
}
len
-=
2
;
len
+=
snprintf
(
tags
+
len
,
STRING_LEN
-
len
,
")"
);
superTbl
->
lenOfTagOfOneRow
=
lenOfTagOfOneRow
;
snprintf
(
command
,
BUFFER_SIZE
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
dbName
,
superTbl
->
sTblName
,
cols
,
tags
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"create supertable %s failed!
\n\n
"
,
superTbl
->
sTblName
);
return
-
1
;
}
debugPrint
(
"create supertable %s success!
\n\n
"
,
superTbl
->
sTblName
);
return
0
;
}
...
...
@@ -2434,85 +2446,88 @@ static int createDatabasesAndStables() {
taos_close
(
taos
);
return
-
1
;
}
}
int
dataLen
=
0
;
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
"create database if not exists %s"
,
g_Dbs
.
db
[
i
].
dbName
);
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" blocks %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cache
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" cache %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
days
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" days %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
keep
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" keep %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
>
1
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" quorum %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
replica
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" replica %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
update
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" update %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
}
//if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
// dataLen += snprintf(command + dataLen,
// BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode);
//}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" minrows %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" maxrows %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
comp
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" comp %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
comp
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" wal %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cacheLast
>
0
)
{
int
dataLen
=
0
;
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" cachelast %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cacheLast
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" fsync %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
}
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
strlen
(
"ms"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"us"
,
strlen
(
"us"
))))
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
BUFFER_SIZE
-
dataLen
,
"create database if not exists %s"
,
g_Dbs
.
db
[
i
].
dbName
);
debugPrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
return
-
1
;
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" blocks %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cache
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" cache %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
days
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" days %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
keep
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" keep %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
>
1
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" quorum %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
replica
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" replica %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
update
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" update %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
}
//if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
// dataLen += snprintf(command + dataLen,
// BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode);
//}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" minrows %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" maxrows %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
comp
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" comp %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
comp
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" wal %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cacheLast
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" cachelast %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cacheLast
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
>
0
)
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" fsync %d"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
}
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
strlen
(
"ms"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"us"
,
strlen
(
"us"
))))
{
dataLen
+=
snprintf
(
command
+
dataLen
,
BUFFER_SIZE
-
dataLen
,
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
debugPrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
return
-
1
;
}
printf
(
"
\n
create database %s success!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
}
printf
(
"
\n
create database %s success!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
debugPrint
(
"%s() %d supertbl count:%d
\n
"
,
__func__
,
__LINE__
,
g_Dbs
.
db
[
i
].
superTblCount
);
int
validStbCount
=
0
;
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
sprintf
(
command
,
"describe %s.%s;"
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
...
...
@@ -2522,12 +2537,11 @@ static int createDatabasesAndStables() {
if
((
ret
!=
0
)
||
(
g_Dbs
.
db
[
i
].
drop
))
{
ret
=
createSuperTable
(
taos
,
g_Dbs
.
db
[
i
].
dbName
,
&
g_Dbs
.
db
[
i
].
superTbls
[
j
]
,
g_Dbs
.
use_metric
);
&
g_Dbs
.
db
[
i
].
superTbls
[
j
]);
if
(
0
!=
ret
)
{
errorPrint
(
"
\n
create super table %d failed!
\n\n
"
,
j
);
taos_close
(
taos
);
return
-
1
;
errorPrint
(
"create super table %d failed!
\n\n
"
,
j
);
continue
;
}
}
...
...
@@ -2536,10 +2550,13 @@ static int createDatabasesAndStables() {
if
(
0
!=
ret
)
{
errorPrint
(
"
\n
get super table %s.%s info failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
taos_close
(
taos
);
return
-
1
;
continue
;
}
validStbCount
++
;
}
g_Dbs
.
db
[
i
].
superTblCount
=
validStbCount
;
}
taos_close
(
taos
);
...
...
@@ -2592,7 +2609,6 @@ static void* createTable(void *sarg)
len
+=
snprintf
(
buffer
+
len
,
buff_len
-
len
,
"create table "
);
}
char
*
tagsValBuf
=
NULL
;
if
(
0
==
superTblInfo
->
tagSource
)
{
tagsValBuf
=
generateTagVaulesForStb
(
superTblInfo
,
i
);
...
...
@@ -2605,7 +2621,6 @@ static void* createTable(void *sarg)
free
(
buffer
);
return
NULL
;
}
len
+=
snprintf
(
buffer
+
len
,
superTblInfo
->
maxSqlLen
-
len
,
"if not exists %s.%s%d using %s.%s tags %s "
,
...
...
@@ -2614,7 +2629,6 @@ static void* createTable(void *sarg)
superTblInfo
->
sTblName
,
tagsValBuf
);
free
(
tagsValBuf
);
batchNum
++
;
if
((
batchNum
<
superTblInfo
->
batchCreateTableNum
)
&&
((
superTblInfo
->
maxSqlLen
-
len
)
>=
(
superTblInfo
->
lenOfTagOfOneRow
+
256
)))
{
...
...
@@ -2724,27 +2738,29 @@ static void createChildTables() {
int
len
;
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
if
(
g_Dbs
.
db
[
i
].
superTblCount
>
0
)
{
// with super table
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
if
((
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
||
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
))
{
continue
;
}
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
colsOfCreateChildTable
);
int
startFrom
=
0
;
g_totalChildTables
+=
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblCount
;
if
(
g_Dbs
.
use_metric
)
{
if
(
g_Dbs
.
db
[
i
].
superTblCount
>
0
)
{
// with super table
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
if
((
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
||
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
))
{
continue
;
}
verbosePrint
(
"%s() LN%d: create %d child tables from %d
\n
"
,
__func__
,
__LINE__
,
g_totalChildTables
,
startFrom
);
startMultiThreadCreateChildTable
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
colsOfCreateChildTable
,
g_Dbs
.
threadCountByCreateTbl
,
startFrom
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblCount
,
g_Dbs
.
db
[
i
].
dbName
,
&
(
g_Dbs
.
db
[
i
].
superTbls
[
j
]));
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
colsOfCreateChildTable
);
int
startFrom
=
0
;
g_totalChildTables
+=
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblCount
;
verbosePrint
(
"%s() LN%d: create %d child tables from %d
\n
"
,
__func__
,
__LINE__
,
g_totalChildTables
,
startFrom
);
startMultiThreadCreateChildTable
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
colsOfCreateChildTable
,
g_Dbs
.
threadCountByCreateTbl
,
startFrom
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblCount
,
g_Dbs
.
db
[
i
].
dbName
,
&
(
g_Dbs
.
db
[
i
].
superTbls
[
j
]));
}
}
}
else
{
// normal table
...
...
@@ -2764,7 +2780,7 @@ static void createChildTables() {
j
++
;
}
len
=
snprintf
(
tblColsBuf
+
len
,
MAX_SQL_SIZE
-
len
,
")"
);
snprintf
(
tblColsBuf
+
len
,
MAX_SQL_SIZE
-
len
,
")"
);
verbosePrint
(
"%s() LN%d: dbName: %s num of tb: %d schema: %s
\n
"
,
__func__
,
__LINE__
,
...
...
@@ -3552,19 +3568,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
}
cJSON
*
sampleDataBufSize
=
cJSON_GetObjectItem
(
stbInfo
,
"sample_buf_size"
);
if
(
sampleDataBufSize
&&
sampleDataBufSize
->
type
==
cJSON_Number
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleDataBufSize
=
sampleDataBufSize
->
valueint
;
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleDataBufSize
<
1024
*
1024
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleDataBufSize
=
1024
*
1024
+
1024
;
}
}
else
if
(
!
sampleDataBufSize
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleDataBufSize
=
1024
*
1024
+
1024
;
}
else
{
printf
(
"ERROR: failed to read json, sample_buf_size not found
\n
"
);
goto
PARSE_OVER
;
}
cJSON
*
sampleFormat
=
cJSON_GetObjectItem
(
stbInfo
,
"sample_format"
);
if
(
sampleFormat
&&
sampleFormat
->
type
==
cJSON_String
&&
sampleFormat
->
valuestring
!=
NULL
)
{
...
...
@@ -3662,6 +3665,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON
*
disorderRatio
=
cJSON_GetObjectItem
(
stbInfo
,
"disorder_ratio"
);
if
(
disorderRatio
&&
disorderRatio
->
type
==
cJSON_Number
)
{
if
(
disorderRatio
->
valueint
>
50
)
disorderRatio
->
valueint
=
50
;
if
(
disorderRatio
->
valueint
<
0
)
disorderRatio
->
valueint
=
0
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
=
disorderRatio
->
valueint
;
}
else
if
(
!
disorderRatio
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
=
0
;
...
...
@@ -4322,6 +4331,8 @@ static int32_t generateData(char *recBuf, char **data_type,
pstr
+=
sprintf
(
pstr
,
")"
);
verbosePrint
(
"%s() LN%d, recBuf:
\n\t
%s
\n
"
,
__func__
,
__LINE__
,
recBuf
);
return
(
int32_t
)
strlen
(
recBuf
);
}
...
...
@@ -4420,6 +4431,8 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
int
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
memset
(
data
,
0
,
MAX_DATA_SIZE
);
int
retLen
=
0
;
if
(
superTblInfo
)
{
...
...
@@ -4433,7 +4446,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int
rand_num
=
rand_tinyint
()
%
100
;
int
rand_num
=
taosRandom
()
%
100
;
if
(
0
!=
superTblInfo
->
disorderRatio
&&
rand_num
<
superTblInfo
->
disorderRatio
)
{
int64_t
d
=
startTime
...
...
@@ -4460,15 +4473,16 @@ static int generateDataTail(char *tableName, int32_t tableSeq,
len
+=
retLen
;
remainderBufLen
-=
retLen
;
}
else
{
int
rand_num
=
taosRandom
()
%
100
;
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int
rand_num
=
taosRandom
()
%
100
;
if
((
g_args
.
disorderRatio
!=
0
)
&&
(
rand_num
<
g_args
.
disorderRa
nge
))
{
&&
(
rand_num
<
g_args
.
disorderRa
tio
))
{
int64_t
d
=
startTime
+
DEFAULT_TIMESTAMP_STEP
*
k
-
taosRandom
()
%
1000000
+
rand_num
;
-
taosRandom
()
%
g_args
.
disorderRange
;
retLen
=
generateData
(
data
,
data_type
,
ncols_per_record
,
d
,
lenOfBinary
);
}
else
{
...
...
@@ -4621,12 +4635,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return
NULL
;
}
char
tableName
[
TSDB_TABLE_NAME_LEN
];
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
int
nTimeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
int64_t
insertRows
=
(
superTblInfo
)
?
superTblInfo
->
insertRows
:
g_args
.
num_of_DPT
;
int
insert_interval
=
superTblInfo
?
superTblInfo
->
insertInterval
:
g_args
.
insert_interval
;
...
...
@@ -4685,8 +4700,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if
(
0
==
strlen
(
tableName
))
{
errorPrint
(
"[%d] %s() LN%d, getTableName return null
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
free
(
buffer
);
return
NULL
;
exit
(
-
1
);
}
int
headLen
;
...
...
@@ -4748,7 +4763,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
generatedRecPerTbl
+=
batchPerTbl
;
startTime
=
pThreadInfo
->
start_time
+
generatedRecPerTbl
*
superTblInfo
->
t
imeStampStep
;
+
generatedRecPerTbl
*
nT
imeStampStep
;
flagSleep
=
true
;
if
(
generatedRecPerTbl
>=
insertRows
)
...
...
@@ -5017,7 +5032,7 @@ static void callBack(void *param, TAOS_RES *res, int code) {
int
rand_num
=
taosRandom
()
%
100
;
if
(
0
!=
winfo
->
superTblInfo
->
disorderRatio
&&
rand_num
<
winfo
->
superTblInfo
->
disorderRatio
)
{
int64_t
d
=
winfo
->
lastTs
-
taosRandom
()
%
1000000
+
rand_num
;
int64_t
d
=
winfo
->
lastTs
-
taosRandom
()
%
winfo
->
superTblInfo
->
disorderRange
;
generateRowData
(
data
,
d
,
winfo
->
superTblInfo
);
}
else
{
generateRowData
(
data
,
winfo
->
lastTs
+=
1000
,
winfo
->
superTblInfo
);
...
...
@@ -5109,7 +5124,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
start_time
=
1500000000000
;
}
double
start
=
getCurrentTime
();
double
start
=
getCurrentTime
Us
();
// read sample data from file first
if
((
superTblInfo
)
&&
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
...
...
@@ -5146,16 +5161,14 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if
(
superTblInfo
)
{
int
limit
,
offset
;
if
(
superTblInfo
->
childTblOffset
>=
superTblInfo
->
childTblCount
)
{
printf
(
"WARNING: specified offset >= child table count!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
if
((
superTblInfo
->
childTblExists
==
TBL_NO_EXISTS
)
&&
((
superTblInfo
->
childTblOffset
!=
0
)
||
(
superTblInfo
->
childTblLimit
!=
0
)))
{
printf
(
"WARNING: offset and limit will not be used since the child tables are not exists!
\n
"
);
}
if
(
superTblInfo
->
childTblOffset
>=
0
)
{
if
(
superTblInfo
->
childTblLimit
<=
0
)
{
if
((
superTblInfo
->
childTblExists
==
TBL_ALREADY_EXISTS
)
&&
(
superTblInfo
->
childTblOffset
>=
0
))
{
if
(
superTblInfo
->
childTblLimit
<
0
)
{
superTblInfo
->
childTblLimit
=
superTblInfo
->
childTblCount
-
superTblInfo
->
childTblOffset
;
}
...
...
@@ -5163,13 +5176,32 @@ static void startMultiThreadInsertData(int threads, char* db_name,
offset
=
superTblInfo
->
childTblOffset
;
limit
=
superTblInfo
->
childTblLimit
;
}
else
{
limit
=
superTblInfo
->
childTblCount
;
offset
=
0
;
limit
=
superTblInfo
->
childTblCount
;
offset
=
0
;
}
ntables
=
limit
;
startFrom
=
offset
;
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
&&
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
>
superTblInfo
->
childTblCount
))
{
printf
(
"WARNING: specified offset + limit > child table count!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
&&
(
0
==
superTblInfo
->
childTblLimit
))
{
printf
(
"WARNING: specified limit = 0, which cannot find table name to insert or query!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
superTblInfo
->
childTblName
=
(
char
*
)
calloc
(
1
,
limit
*
TSDB_TABLE_NAME_LEN
);
if
(
superTblInfo
->
childTblName
==
NULL
)
{
...
...
@@ -5287,7 +5319,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if
(
cntDelay
==
0
)
cntDelay
=
1
;
avgDelay
=
(
double
)
totalDelay
/
cntDelay
;
double
end
=
getCurrentTime
();
double
end
=
getCurrentTime
Us
();
double
t
=
end
-
start
;
if
(
superTblInfo
)
{
...
...
@@ -5366,7 +5398,7 @@ static void *readTable(void *sarg) {
sprintf
(
command
,
"select %s from %s%d where ts>= %"
PRId64
,
aggreFunc
[
j
],
tb_prefix
,
i
,
sTime
);
double
t
=
getCurrentTime
();
double
t
=
getCurrentTime
Us
();
TAOS_RES
*
pSql
=
taos_query
(
taos
,
command
);
int32_t
code
=
taos_errno
(
pSql
);
...
...
@@ -5382,7 +5414,7 @@ static void *readTable(void *sarg) {
count
++
;
}
t
=
getCurrentTime
()
-
t
;
t
=
getCurrentTime
Us
()
-
t
;
totalT
+=
t
;
taos_free_result
(
pSql
);
...
...
@@ -5441,7 +5473,7 @@ static void *readMetric(void *sarg) {
printf
(
"Where condition: %s
\n
"
,
condition
);
fprintf
(
fp
,
"%s
\n
"
,
command
);
double
t
=
getCurrentTime
();
double
t
=
getCurrentTime
Us
();
TAOS_RES
*
pSql
=
taos_query
(
taos
,
command
);
int32_t
code
=
taos_errno
(
pSql
);
...
...
@@ -5457,7 +5489,7 @@ static void *readMetric(void *sarg) {
while
(
taos_fetch_row
(
pSql
)
!=
NULL
)
{
count
++
;
}
t
=
getCurrentTime
()
-
t
;
t
=
getCurrentTime
Us
()
-
t
;
fprintf
(
fp
,
"| Speed: %12.2f(per s) | Latency: %.4f(ms) |
\n
"
,
num_of_tables
*
num_of_DPT
/
t
,
t
*
1000
);
...
...
@@ -5511,9 +5543,9 @@ static int insertTestProcess() {
double
end
;
// create child tables
start
=
getCurrentTime
();
start
=
getCurrentTime
Us
();
createChildTables
();
end
=
getCurrentTime
();
end
=
getCurrentTime
Us
();
if
(
g_totalChildTables
>
0
)
{
printf
(
"Spent %.4f seconds to create %d tables with %d thread(s)
\n\n
"
,
...
...
@@ -5525,20 +5557,23 @@ static int insertTestProcess() {
taosMsleep
(
1000
);
// create sub threads for inserting data
//start = getCurrentTime();
//start = getCurrentTime
Us
();
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
if
(
g_Dbs
.
db
[
i
].
superTblCount
>
0
)
{
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
SSuperTable
*
superTblInfo
=
&
g_Dbs
.
db
[
i
].
superTbls
[
j
];
if
(
0
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
insertRows
)
{
continue
;
}
startMultiThreadInsertData
(
g_Dbs
.
threadCount
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
superTblInfo
);
if
(
g_Dbs
.
use_metric
)
{
if
(
g_Dbs
.
db
[
i
].
superTblCount
>
0
)
{
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
SSuperTable
*
superTblInfo
=
&
g_Dbs
.
db
[
i
].
superTbls
[
j
];
if
(
superTblInfo
&&
(
superTblInfo
->
insertRows
>
0
))
{
startMultiThreadInsertData
(
g_Dbs
.
threadCount
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
superTblInfo
);
}
}
}
}
else
{
startMultiThreadInsertData
(
g_Dbs
.
threadCount
,
...
...
@@ -5547,7 +5582,7 @@ static int insertTestProcess() {
NULL
);
}
}
//end = getCurrentTime();
//end = getCurrentTime
Us
();
//int64_t totalInsertRows = 0;
//int64_t totalAffectedRows = 0;
...
...
@@ -6368,7 +6403,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
char
*
line
=
NULL
;
size_t
line_len
=
0
;
double
t
=
getCurrentTime
();
double
t
=
getCurrentTime
Us
();
while
((
read_len
=
tgetline
(
&
line
,
&
line_len
,
fp
))
!=
-
1
)
{
if
(
read_len
>=
MAX_SQL_SIZE
)
continue
;
...
...
@@ -6399,7 +6434,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
cmd_len
=
0
;
}
t
=
getCurrentTime
()
-
t
;
t
=
getCurrentTime
Us
()
-
t
;
printf
(
"run %s took %.6f second(s)
\n\n
"
,
sqlFile
,
t
);
tmfree
(
cmd
);
...
...
@@ -6483,6 +6518,16 @@ static void queryResult() {
static
void
testCmdLine
()
{
if
(
strlen
(
configDir
))
{
wordexp_t
full_path
;
if
(
wordexp
(
configDir
,
&
full_path
,
0
)
!=
0
)
{
errorPrint
(
"Invalid path %s
\n
"
,
configDir
);
return
;
}
taos_options
(
TSDB_OPTION_CONFIGDIR
,
full_path
.
we_wordv
[
0
]);
wordfree
(
&
full_path
);
}
g_args
.
test_mode
=
INSERT_TEST
;
insertTestProcess
();
...
...
src/kit/taosdump/taosdump.c
浏览文件 @
64c06c95
...
...
@@ -39,6 +39,22 @@ typedef struct {
int8_t
type
;
}
SOColInfo
;
#define debugPrint(fmt, ...) \
do { if (g_args.debug_print || g_args.verbose_print) \
fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
#define verbosePrint(fmt, ...) \
do { if (g_args.verbose_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum
_show_db_index
{
TSDB_SHOW_DB_NAME_INDEX
,
...
...
@@ -46,7 +62,7 @@ enum _show_db_index {
TSDB_SHOW_DB_NTABLES_INDEX
,
TSDB_SHOW_DB_VGROUPS_INDEX
,
TSDB_SHOW_DB_REPLICA_INDEX
,
TSDB_SHOW_DB_QUORUM_INDEX
,
TSDB_SHOW_DB_QUORUM_INDEX
,
TSDB_SHOW_DB_DAYS_INDEX
,
TSDB_SHOW_DB_KEEP_INDEX
,
TSDB_SHOW_DB_CACHE_INDEX
,
...
...
@@ -101,10 +117,10 @@ typedef struct {
char
name
[
TSDB_DB_NAME_LEN
+
1
];
char
create_time
[
32
];
int32_t
ntables
;
int32_t
vgroups
;
int32_t
vgroups
;
int16_t
replica
;
int16_t
quorum
;
int16_t
days
;
int16_t
days
;
char
keeplist
[
32
];
//int16_t daysToKeep;
//int16_t daysToKeep1;
...
...
@@ -172,48 +188,50 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat
/* The options we understand. */
static
struct
argp_option
options
[]
=
{
// connection option
{
"host"
,
'h'
,
"HOST"
,
0
,
"Server host dumping data from. Default is localhost."
,
0
},
{
"user"
,
'u'
,
"USER"
,
0
,
"User name used to connect to server. Default is root."
,
0
},
{
"host"
,
'h'
,
"HOST"
,
0
,
"Server host dumping data from. Default is localhost."
,
0
},
{
"user"
,
'u'
,
"USER"
,
0
,
"User name used to connect to server. Default is root."
,
0
},
#ifdef _TD_POWER_
{
"password"
,
'p'
,
"PASSWORD"
,
0
,
"User password to connect to server. Default is powerdb."
,
0
},
{
"password"
,
'p'
,
"PASSWORD"
,
0
,
"User password to connect to server. Default is powerdb."
,
0
},
#else
{
"password"
,
'p'
,
"PASSWORD"
,
0
,
"User password to connect to server. Default is taosdata."
,
0
},
{
"password"
,
'p'
,
"PASSWORD"
,
0
,
"User password to connect to server. Default is taosdata."
,
0
},
#endif
{
"port"
,
'P'
,
"PORT"
,
0
,
"Port to connect"
,
0
},
{
"cversion"
,
'v'
,
"CVERION"
,
0
,
"client version"
,
0
},
{
"mysqlFlag"
,
'q'
,
"MYSQLFLAG"
,
0
,
"mysqlFlag, Default is 0"
,
0
},
{
"port"
,
'P'
,
"PORT"
,
0
,
"Port to connect"
,
0
},
{
"cversion"
,
'v'
,
"CVERION"
,
0
,
"client version"
,
0
},
{
"mysqlFlag"
,
'q'
,
"MYSQLFLAG"
,
0
,
"mysqlFlag, Default is 0"
,
0
},
// input/output file
{
"outpath"
,
'o'
,
"OUTPATH"
,
0
,
"Output file path."
,
1
},
{
"inpath"
,
'i'
,
"INPATH"
,
0
,
"Input file path."
,
1
},
{
"resultFile"
,
'r'
,
"RESULTFILE"
,
0
,
"DumpOut/In Result file path and name."
,
1
},
{
"outpath"
,
'o'
,
"OUTPATH"
,
0
,
"Output file path."
,
1
},
{
"inpath"
,
'i'
,
"INPATH"
,
0
,
"Input file path."
,
1
},
{
"resultFile"
,
'r'
,
"RESULTFILE"
,
0
,
"DumpOut/In Result file path and name."
,
1
},
#ifdef _TD_POWER_
{
"config"
,
'c'
,
"CONFIG_DIR"
,
0
,
"Configure directory. Default is /etc/power/taos.cfg."
,
1
},
{
"config"
,
'c'
,
"CONFIG_DIR"
,
0
,
"Configure directory. Default is /etc/power/taos.cfg."
,
1
},
#else
{
"config"
,
'c'
,
"CONFIG_DIR"
,
0
,
"Configure directory. Default is /etc/taos/taos.cfg."
,
1
},
{
"config"
,
'c'
,
"CONFIG_DIR"
,
0
,
"Configure directory. Default is /etc/taos/taos.cfg."
,
1
},
#endif
{
"encode"
,
'e'
,
"ENCODE"
,
0
,
"Input file encoding."
,
1
},
{
"encode"
,
'e'
,
"ENCODE"
,
0
,
"Input file encoding."
,
1
},
// dump unit options
{
"all-databases"
,
'A'
,
0
,
0
,
"Dump all databases."
,
2
},
{
"databases"
,
'B'
,
0
,
0
,
"Dump assigned databases"
,
2
},
{
"all-databases"
,
'A'
,
0
,
0
,
"Dump all databases."
,
2
},
{
"databases"
,
'B'
,
0
,
0
,
"Dump assigned databases"
,
2
},
// dump format options
{
"schemaonly"
,
's'
,
0
,
0
,
"Only dump schema."
,
3
},
{
"with-property"
,
'M'
,
0
,
0
,
"Dump schema with properties."
,
3
},
{
"start-time"
,
'S'
,
"START_TIME"
,
0
,
"Start time to dump."
,
3
},
{
"end-time"
,
'E'
,
"END_TIME"
,
0
,
"End time to dump."
,
3
},
{
"data-batch"
,
'N'
,
"DATA_BATCH"
,
0
,
"Number of data point per insert statement. Default is 1."
,
3
},
{
"max-sql-len"
,
'L'
,
"SQL_LEN"
,
0
,
"Max length of one sql. Default is 65480."
,
3
},
{
"table-batch"
,
't'
,
"TABLE_BATCH"
,
0
,
"Number of table dumpout into one output file. Default is 1."
,
3
},
{
"thread_num"
,
'T'
,
"THREAD_NUM"
,
0
,
"Number of thread for dump in file. Default is 5."
,
3
},
{
"allow-sys"
,
'a'
,
0
,
0
,
"Allow to dump sys database"
,
3
},
{
"schemaonly"
,
's'
,
0
,
0
,
"Only dump schema."
,
3
},
{
"with-property"
,
'M'
,
0
,
0
,
"Dump schema with properties."
,
3
},
{
"start-time"
,
'S'
,
"START_TIME"
,
0
,
"Start time to dump."
,
3
},
{
"end-time"
,
'E'
,
"END_TIME"
,
0
,
"End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800"
,
3
},
{
"data-batch"
,
'N'
,
"DATA_BATCH"
,
0
,
"Number of data point per insert statement. Default is 1."
,
3
},
{
"max-sql-len"
,
'L'
,
"SQL_LEN"
,
0
,
"Max length of one sql. Default is 65480."
,
3
},
{
"table-batch"
,
't'
,
"TABLE_BATCH"
,
0
,
"Number of table dumpout into one output file. Default is 1."
,
3
},
{
"thread_num"
,
'T'
,
"THREAD_NUM"
,
0
,
"Number of thread for dump in file. Default is 5."
,
3
},
{
"allow-sys"
,
'a'
,
0
,
0
,
"Allow to dump sys database"
,
3
},
{
"debug"
,
'g'
,
0
,
0
,
"Print debug info."
,
1
},
{
"verbose"
,
'v'
,
0
,
0
,
"Print verbose debug info."
,
1
},
{
0
}};
/* Used by main to communicate with parse_opt. */
struct
arguments
{
typedef
struct
arguments
{
// connection option
char
*
host
;
char
*
user
;
char
*
password
;
uint16_t
port
;
uint16_t
port
;
char
cversion
[
12
];
uint16_t
mysqlFlag
;
// output file
...
...
@@ -238,9 +256,12 @@ struct arguments {
int32_t
thread_num
;
int
abort
;
char
**
arg_list
;
int
arg_list_len
;
bool
isDumpIn
;
};
int
arg_list_len
;
bool
isDumpIn
;
bool
debug_print
;
bool
verbose_print
;
bool
performance_print
;
}
SArguments
;
/* Parse a single option. */
static
error_t
parse_opt
(
int
key
,
char
*
arg
,
struct
argp_state
*
state
)
{
...
...
@@ -286,6 +307,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
tstrncpy
(
arguments
->
outpath
,
full_path
.
we_wordv
[
0
],
TSDB_FILENAME_LEN
);
wordfree
(
&
full_path
);
break
;
case
'g'
:
arguments
->
debug_print
=
true
;
break
;
case
'i'
:
arguments
->
isDumpIn
=
true
;
if
(
wordexp
(
arg
,
&
full_path
,
0
)
!=
0
)
{
...
...
@@ -387,7 +411,7 @@ int taosCheckParam(struct arguments *arguments);
void
taosFreeDbInfos
();
static
void
taosStartDumpOutWorkThreads
(
void
*
taosCon
,
struct
arguments
*
args
,
int32_t
numOfThread
,
char
*
dbName
);
struct
arguments
tsArgument
s
=
{
struct
arguments
g_arg
s
=
{
// connection option
NULL
,
"root"
,
...
...
@@ -400,18 +424,18 @@ struct arguments tsArguments = {
""
,
0
,
// outpath and inpath
""
,
""
,
""
,
"./dump_result.txt"
,
NULL
,
// dump unit option
false
,
false
,
false
,
// dump format option
false
,
false
,
0
,
INT64_MAX
,
false
,
false
,
0
,
INT64_MAX
,
1
,
TSDB_MAX_SQL_LEN
,
1
,
...
...
@@ -419,11 +443,14 @@ struct arguments tsArguments = {
// other options
5
,
0
,
NULL
,
0
,
false
NULL
,
0
,
false
,
false
,
// debug_print
false
,
// verbose_print
false
// performance_print
};
static
int
queryDbImpl
(
TAOS
*
taos
,
char
*
command
)
{
int
i
;
TAOS_RES
*
res
=
NULL
;
...
...
@@ -434,7 +461,7 @@ static int queryDbImpl(TAOS *taos, char *command) {
taos_free_result
(
res
);
res
=
NULL
;
}
res
=
taos_query
(
taos
,
command
);
code
=
taos_errno
(
res
);
if
(
0
==
code
)
{
...
...
@@ -453,13 +480,44 @@ static int queryDbImpl(TAOS *taos, char *command) {
return
0
;
}
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
strcmp
(
argv
[
i
],
"-E"
)
==
0
)
{
if
(
argv
[
i
+
1
])
{
char
*
tmp
=
argv
[
++
i
];
int64_t
tmpEpoch
;
if
(
strchr
(
tmp
,
':'
)
&&
strchr
(
tmp
,
'-'
))
{
if
(
TSDB_CODE_SUCCESS
!=
taosParseTime
(
tmp
,
&
tmpEpoch
,
strlen
(
tmp
),
TSDB_TIME_PRECISION_MILLI
,
0
))
{
fprintf
(
stderr
,
"Input end time error!
\n
"
);
return
;
}
}
else
{
tmpEpoch
=
atoll
(
tmp
);
}
sprintf
(
argv
[
i
],
"%"
PRId64
""
,
tmpEpoch
);
debugPrint
(
"%s() LN%d, tmp is: %s, argv[%d]: %s
\n
"
,
__func__
,
__LINE__
,
tmp
,
i
,
argv
[
i
]);
}
else
{
fprintf
(
stderr
,
"Input end time error!
\n
"
);
return
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-g"
)
==
0
)
{
arguments
->
debug_print
=
true
;
}
}
}
int
main
(
int
argc
,
char
*
argv
[])
{
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
argp_parse
(
&
argp
,
argc
,
argv
,
0
,
0
,
&
tsArgument
s
);
parse_args
(
argc
,
argv
,
&
g_arg
s
);
if
(
tsArguments
.
abort
)
{
argp_parse
(
&
argp
,
argc
,
argv
,
0
,
0
,
&
g_args
);
if
(
g_args
.
abort
)
{
#ifndef _ALPINE
error
(
10
,
0
,
"ABORTED"
);
#else
...
...
@@ -469,81 +527,82 @@ int main(int argc, char *argv[]) {
printf
(
"====== arguments config ======
\n
"
);
{
printf
(
"host: %s
\n
"
,
tsArguments
.
host
);
printf
(
"user: %s
\n
"
,
tsArguments
.
user
);
printf
(
"password: %s
\n
"
,
tsArguments
.
password
);
printf
(
"port: %u
\n
"
,
tsArguments
.
port
);
printf
(
"cversion: %s
\n
"
,
tsArguments
.
cversion
);
printf
(
"mysqlFlag: %d
\n
"
,
tsArguments
.
mysqlFlag
);
printf
(
"outpath: %s
\n
"
,
tsArguments
.
outpath
);
printf
(
"inpath: %s
\n
"
,
tsArguments
.
inpath
);
printf
(
"resultFile: %s
\n
"
,
tsArguments
.
resultFile
);
printf
(
"encode: %s
\n
"
,
tsArguments
.
encode
);
printf
(
"all_databases: %d
\n
"
,
tsArguments
.
all_databases
);
printf
(
"databases: %d
\n
"
,
tsArguments
.
databases
);
printf
(
"schemaonly: %d
\n
"
,
tsArguments
.
schemaonly
);
printf
(
"with_property: %d
\n
"
,
tsArguments
.
with_property
);
printf
(
"start_time: %"
PRId64
"
\n
"
,
tsArguments
.
start_time
);
printf
(
"end_time: %"
PRId64
"
\n
"
,
tsArguments
.
end_time
);
printf
(
"data_batch: %d
\n
"
,
tsArguments
.
data_batch
);
printf
(
"max_sql_len: %d
\n
"
,
tsArguments
.
max_sql_len
);
printf
(
"table_batch: %d
\n
"
,
tsArguments
.
table_batch
);
printf
(
"thread_num: %d
\n
"
,
tsArguments
.
thread_num
);
printf
(
"allow_sys: %d
\n
"
,
tsArguments
.
allow_sys
);
printf
(
"abort: %d
\n
"
,
tsArguments
.
abort
);
printf
(
"isDumpIn: %d
\n
"
,
tsArguments
.
isDumpIn
);
printf
(
"arg_list_len: %d
\n
"
,
tsArguments
.
arg_list_len
);
for
(
int32_t
i
=
0
;
i
<
tsArguments
.
arg_list_len
;
i
++
)
{
printf
(
"arg_list[%d]: %s
\n
"
,
i
,
tsArguments
.
arg_list
[
i
]);
printf
(
"host: %s
\n
"
,
g_args
.
host
);
printf
(
"user: %s
\n
"
,
g_args
.
user
);
printf
(
"password: %s
\n
"
,
g_args
.
password
);
printf
(
"port: %u
\n
"
,
g_args
.
port
);
printf
(
"cversion: %s
\n
"
,
g_args
.
cversion
);
printf
(
"mysqlFlag: %d
\n
"
,
g_args
.
mysqlFlag
);
printf
(
"outpath: %s
\n
"
,
g_args
.
outpath
);
printf
(
"inpath: %s
\n
"
,
g_args
.
inpath
);
printf
(
"resultFile: %s
\n
"
,
g_args
.
resultFile
);
printf
(
"encode: %s
\n
"
,
g_args
.
encode
);
printf
(
"all_databases: %d
\n
"
,
g_args
.
all_databases
);
printf
(
"databases: %d
\n
"
,
g_args
.
databases
);
printf
(
"schemaonly: %d
\n
"
,
g_args
.
schemaonly
);
printf
(
"with_property: %d
\n
"
,
g_args
.
with_property
);
printf
(
"start_time: %"
PRId64
"
\n
"
,
g_args
.
start_time
);
printf
(
"end_time: %"
PRId64
"
\n
"
,
g_args
.
end_time
);
printf
(
"data_batch: %d
\n
"
,
g_args
.
data_batch
);
printf
(
"max_sql_len: %d
\n
"
,
g_args
.
max_sql_len
);
printf
(
"table_batch: %d
\n
"
,
g_args
.
table_batch
);
printf
(
"thread_num: %d
\n
"
,
g_args
.
thread_num
);
printf
(
"allow_sys: %d
\n
"
,
g_args
.
allow_sys
);
printf
(
"abort: %d
\n
"
,
g_args
.
abort
);
printf
(
"isDumpIn: %d
\n
"
,
g_args
.
isDumpIn
);
printf
(
"arg_list_len: %d
\n
"
,
g_args
.
arg_list_len
);
printf
(
"debug_print: %d
\n
"
,
g_args
.
debug_print
);
for
(
int32_t
i
=
0
;
i
<
g_args
.
arg_list_len
;
i
++
)
{
printf
(
"arg_list[%d]: %s
\n
"
,
i
,
g_args
.
arg_list
[
i
]);
}
}
}
printf
(
"==============================
\n
"
);
if
(
tsArgument
s
.
cversion
[
0
]
!=
0
){
tstrncpy
(
version
,
tsArgument
s
.
cversion
,
11
);
if
(
g_arg
s
.
cversion
[
0
]
!=
0
){
tstrncpy
(
version
,
g_arg
s
.
cversion
,
11
);
}
if
(
taosCheckParam
(
&
tsArgument
s
)
<
0
)
{
if
(
taosCheckParam
(
&
g_arg
s
)
<
0
)
{
exit
(
EXIT_FAILURE
);
}
g_fpOfResult
=
fopen
(
tsArgument
s
.
resultFile
,
"a"
);
g_fpOfResult
=
fopen
(
g_arg
s
.
resultFile
,
"a"
);
if
(
NULL
==
g_fpOfResult
)
{
fprintf
(
stderr
,
"Failed to open %s for save result
\n
"
,
tsArgument
s
.
resultFile
);
fprintf
(
stderr
,
"Failed to open %s for save result
\n
"
,
g_arg
s
.
resultFile
);
return
1
;
};
fprintf
(
g_fpOfResult
,
"#############################################################################
\n
"
);
fprintf
(
g_fpOfResult
,
"============================== arguments config =============================
\n
"
);
{
fprintf
(
g_fpOfResult
,
"host: %s
\n
"
,
tsArgument
s
.
host
);
fprintf
(
g_fpOfResult
,
"user: %s
\n
"
,
tsArgument
s
.
user
);
fprintf
(
g_fpOfResult
,
"password: %s
\n
"
,
tsArgument
s
.
password
);
fprintf
(
g_fpOfResult
,
"port: %u
\n
"
,
tsArgument
s
.
port
);
fprintf
(
g_fpOfResult
,
"cversion: %s
\n
"
,
tsArguments
.
cversion
);
fprintf
(
g_fpOfResult
,
"mysqlFlag: %d
\n
"
,
tsArguments
.
mysqlFlag
);
fprintf
(
g_fpOfResult
,
"outpath: %s
\n
"
,
tsArgument
s
.
outpath
);
fprintf
(
g_fpOfResult
,
"inpath: %s
\n
"
,
tsArgument
s
.
inpath
);
fprintf
(
g_fpOfResult
,
"resultFile: %s
\n
"
,
tsArgument
s
.
resultFile
);
fprintf
(
g_fpOfResult
,
"encode: %s
\n
"
,
tsArgument
s
.
encode
);
fprintf
(
g_fpOfResult
,
"all_databases: %d
\n
"
,
tsArgument
s
.
all_databases
);
fprintf
(
g_fpOfResult
,
"databases: %d
\n
"
,
tsArgument
s
.
databases
);
fprintf
(
g_fpOfResult
,
"schemaonly: %d
\n
"
,
tsArgument
s
.
schemaonly
);
fprintf
(
g_fpOfResult
,
"with_property: %d
\n
"
,
tsArgument
s
.
with_property
);
fprintf
(
g_fpOfResult
,
"start_time: %"
PRId64
"
\n
"
,
tsArgument
s
.
start_time
);
fprintf
(
g_fpOfResult
,
"end_time: %"
PRId64
"
\n
"
,
tsArgument
s
.
end_time
);
fprintf
(
g_fpOfResult
,
"data_batch: %d
\n
"
,
tsArgument
s
.
data_batch
);
fprintf
(
g_fpOfResult
,
"max_sql_len: %d
\n
"
,
tsArgument
s
.
max_sql_len
);
fprintf
(
g_fpOfResult
,
"table_batch: %d
\n
"
,
tsArgument
s
.
table_batch
);
fprintf
(
g_fpOfResult
,
"thread_num: %d
\n
"
,
tsArguments
.
thread_num
);
fprintf
(
g_fpOfResult
,
"allow_sys: %d
\n
"
,
tsArgument
s
.
allow_sys
);
fprintf
(
g_fpOfResult
,
"abort: %d
\n
"
,
tsArgument
s
.
abort
);
fprintf
(
g_fpOfResult
,
"isDumpIn: %d
\n
"
,
tsArgument
s
.
isDumpIn
);
fprintf
(
g_fpOfResult
,
"arg_list_len: %d
\n
"
,
tsArgument
s
.
arg_list_len
);
for
(
int32_t
i
=
0
;
i
<
tsArgument
s
.
arg_list_len
;
i
++
)
{
fprintf
(
g_fpOfResult
,
"arg_list[%d]: %s
\n
"
,
i
,
tsArgument
s
.
arg_list
[
i
]);
fprintf
(
g_fpOfResult
,
"host: %s
\n
"
,
g_arg
s
.
host
);
fprintf
(
g_fpOfResult
,
"user: %s
\n
"
,
g_arg
s
.
user
);
fprintf
(
g_fpOfResult
,
"password: %s
\n
"
,
g_arg
s
.
password
);
fprintf
(
g_fpOfResult
,
"port: %u
\n
"
,
g_arg
s
.
port
);
fprintf
(
g_fpOfResult
,
"cversion: %s
\n
"
,
g_args
.
cversion
);
fprintf
(
g_fpOfResult
,
"mysqlFlag: %d
\n
"
,
g_args
.
mysqlFlag
);
fprintf
(
g_fpOfResult
,
"outpath: %s
\n
"
,
g_arg
s
.
outpath
);
fprintf
(
g_fpOfResult
,
"inpath: %s
\n
"
,
g_arg
s
.
inpath
);
fprintf
(
g_fpOfResult
,
"resultFile: %s
\n
"
,
g_arg
s
.
resultFile
);
fprintf
(
g_fpOfResult
,
"encode: %s
\n
"
,
g_arg
s
.
encode
);
fprintf
(
g_fpOfResult
,
"all_databases: %d
\n
"
,
g_arg
s
.
all_databases
);
fprintf
(
g_fpOfResult
,
"databases: %d
\n
"
,
g_arg
s
.
databases
);
fprintf
(
g_fpOfResult
,
"schemaonly: %d
\n
"
,
g_arg
s
.
schemaonly
);
fprintf
(
g_fpOfResult
,
"with_property: %d
\n
"
,
g_arg
s
.
with_property
);
fprintf
(
g_fpOfResult
,
"start_time: %"
PRId64
"
\n
"
,
g_arg
s
.
start_time
);
fprintf
(
g_fpOfResult
,
"end_time: %"
PRId64
"
\n
"
,
g_arg
s
.
end_time
);
fprintf
(
g_fpOfResult
,
"data_batch: %d
\n
"
,
g_arg
s
.
data_batch
);
fprintf
(
g_fpOfResult
,
"max_sql_len: %d
\n
"
,
g_arg
s
.
max_sql_len
);
fprintf
(
g_fpOfResult
,
"table_batch: %d
\n
"
,
g_arg
s
.
table_batch
);
fprintf
(
g_fpOfResult
,
"thread_num: %d
\n
"
,
g_args
.
thread_num
);
fprintf
(
g_fpOfResult
,
"allow_sys: %d
\n
"
,
g_arg
s
.
allow_sys
);
fprintf
(
g_fpOfResult
,
"abort: %d
\n
"
,
g_arg
s
.
abort
);
fprintf
(
g_fpOfResult
,
"isDumpIn: %d
\n
"
,
g_arg
s
.
isDumpIn
);
fprintf
(
g_fpOfResult
,
"arg_list_len: %d
\n
"
,
g_arg
s
.
arg_list_len
);
for
(
int32_t
i
=
0
;
i
<
g_arg
s
.
arg_list_len
;
i
++
)
{
fprintf
(
g_fpOfResult
,
"arg_list[%d]: %s
\n
"
,
i
,
g_arg
s
.
arg_list
[
i
]);
}
}
...
...
@@ -552,11 +611,11 @@ int main(int argc, char *argv[]) {
time_t
tTime
=
time
(
NULL
);
struct
tm
tm
=
*
localtime
(
&
tTime
);
if
(
tsArguments
.
isDumpIn
)
{
if
(
g_args
.
isDumpIn
)
{
fprintf
(
g_fpOfResult
,
"============================== DUMP IN ==============================
\n
"
);
fprintf
(
g_fpOfResult
,
"# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d
\n
"
,
tm
.
tm_year
+
1900
,
tm
.
tm_mon
+
1
,
tm
.
tm_mday
,
tm
.
tm_hour
,
tm
.
tm_min
,
tm
.
tm_sec
);
if
(
taosDumpIn
(
&
tsArgument
s
)
<
0
)
{
if
(
taosDumpIn
(
&
g_arg
s
)
<
0
)
{
fprintf
(
g_fpOfResult
,
"
\n
"
);
fclose
(
g_fpOfResult
);
return
-
1
;
...
...
@@ -565,7 +624,7 @@ int main(int argc, char *argv[]) {
fprintf
(
g_fpOfResult
,
"============================== DUMP OUT ==============================
\n
"
);
fprintf
(
g_fpOfResult
,
"# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d
\n
"
,
tm
.
tm_year
+
1900
,
tm
.
tm_mon
+
1
,
tm
.
tm_mday
,
tm
.
tm_hour
,
tm
.
tm_min
,
tm
.
tm_sec
);
if
(
taosDumpOut
(
&
tsArgument
s
)
<
0
)
{
if
(
taosDumpOut
(
&
g_arg
s
)
<
0
)
{
fprintf
(
g_fpOfResult
,
"
\n
"
);
fclose
(
g_fpOfResult
);
return
-
1
;
...
...
@@ -573,9 +632,9 @@ int main(int argc, char *argv[]) {
fprintf
(
g_fpOfResult
,
"
\n
============================== TOTAL STATISTICS ==============================
\n
"
);
fprintf
(
g_fpOfResult
,
"# total database count: %d
\n
"
,
g_resultStatistics
.
totalDatabasesOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total super table count: %d
\n
"
,
g_resultStatistics
.
totalSuperTblsOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total child table count: %"
PRId64
"
\n
"
,
g_resultStatistics
.
totalChildTblsOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total row count: %"
PRId64
"
\n
"
,
g_resultStatistics
.
totalRowsOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total super table count: %d
\n
"
,
g_resultStatistics
.
totalSuperTblsOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total child table count: %"
PRId64
"
\n
"
,
g_resultStatistics
.
totalChildTblsOfDumpOut
);
fprintf
(
g_fpOfResult
,
"# total row count: %"
PRId64
"
\n
"
,
g_resultStatistics
.
totalRowsOfDumpOut
);
}
fprintf
(
g_fpOfResult
,
"
\n
"
);
...
...
@@ -1236,8 +1295,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE
*
fp
=
NULL
;
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
+
128
);
if
(
tsArgument
s
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmpBuf
,
"%s/%s.tables.%d.sql"
,
tsArgument
s
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
);
if
(
g_arg
s
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmpBuf
,
"%s/%s.tables.%d.sql"
,
g_arg
s
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
);
}
else
{
sprintf
(
tmpBuf
,
"%s.tables.%d.sql"
,
pThread
->
dbName
,
pThread
->
threadIndex
);
}
...
...
@@ -1270,7 +1329,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
ssize_t
readLen
=
read
(
fd
,
&
tableRecord
,
sizeof
(
STableRecord
));
if
(
readLen
<=
0
)
break
;
int
ret
=
taosDumpTable
(
tableRecord
.
name
,
tableRecord
.
metric
,
&
tsArgument
s
,
fp
,
pThread
->
taosCon
,
pThread
->
dbName
);
int
ret
=
taosDumpTable
(
tableRecord
.
name
,
tableRecord
.
metric
,
&
g_arg
s
,
fp
,
pThread
->
taosCon
,
pThread
->
dbName
);
if
(
ret
>=
0
)
{
// TODO: sum table count and table rows by self
pThread
->
tablesOfDumpOut
++
;
...
...
@@ -1282,13 +1341,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
}
tablesInOneFile
++
;
if
(
tablesInOneFile
>=
tsArgument
s
.
table_batch
)
{
if
(
tablesInOneFile
>=
g_arg
s
.
table_batch
)
{
fclose
(
fp
);
tablesInOneFile
=
0
;
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
+
128
);
if
(
tsArgument
s
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmpBuf
,
"%s/%s.tables.%d-%d.sql"
,
tsArgument
s
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
,
fileNameIndex
);
if
(
g_arg
s
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmpBuf
,
"%s/%s.tables.%d-%d.sql"
,
g_arg
s
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
,
fileNameIndex
);
}
else
{
sprintf
(
tmpBuf
,
"%s.tables.%d-%d.sql"
,
pThread
->
dbName
,
pThread
->
threadIndex
,
fileNameIndex
);
}
...
...
@@ -1491,14 +1550,14 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
taos_free_result
(
res
);
lseek
(
fd
,
0
,
SEEK_SET
);
int
maxThreads
=
tsArgument
s
.
thread_num
;
int
maxThreads
=
g_arg
s
.
thread_num
;
int
tableOfPerFile
;
if
(
numOfTable
<=
tsArgument
s
.
thread_num
)
{
if
(
numOfTable
<=
g_arg
s
.
thread_num
)
{
tableOfPerFile
=
1
;
maxThreads
=
numOfTable
;
}
else
{
tableOfPerFile
=
numOfTable
/
tsArgument
s
.
thread_num
;
if
(
0
!=
numOfTable
%
tsArgument
s
.
thread_num
)
{
tableOfPerFile
=
numOfTable
/
g_arg
s
.
thread_num
;
if
(
0
!=
numOfTable
%
g_arg
s
.
thread_num
)
{
tableOfPerFile
+=
1
;
}
}
...
...
@@ -1806,9 +1865,9 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
//}
}
fprintf
(
fp
,
"
\n
"
);
fprintf
(
fp
,
"
\n
"
);
atomic_add_fetch_64
(
&
totalDumpOutRows
,
totalRows
);
taos_free_result
(
tmpResult
);
free
(
tmpBuffer
);
return
totalRows
;
...
...
@@ -1824,7 +1883,7 @@ int taosCheckParam(struct arguments *arguments) {
fprintf
(
stderr
,
"start time is larger than end time
\n
"
);
return
-
1
;
}
if
(
arguments
->
arg_list_len
==
0
)
{
if
((
!
arguments
->
all_databases
)
&&
(
!
arguments
->
isDumpIn
))
{
fprintf
(
stderr
,
"taosdump requires parameters
\n
"
);
...
...
@@ -2214,7 +2273,7 @@ void* taosDumpInWorkThreadFp(void *arg)
continue
;
}
fprintf
(
stderr
,
"Success Open input file: %s
\n
"
,
SQLFileName
);
taosDumpInOneFile
(
pThread
->
taosCon
,
fp
,
tsfCharset
,
tsArgument
s
.
encode
,
SQLFileName
);
taosDumpInOneFile
(
pThread
->
taosCon
,
fp
,
tsfCharset
,
g_arg
s
.
encode
,
SQLFileName
);
}
}
...
...
src/mnode/src/mnodeDnode.c
浏览文件 @
64c06c95
...
...
@@ -628,6 +628,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
bnNotify
();
}
if
(
!
tsEnableBalance
)
{
int32_t
numOfMnodes
=
mnodeGetMnodesNum
();
if
(
numOfMnodes
<
tsNumOfMnodes
)
bnNotify
();
}
if
(
openVnodes
!=
pDnode
->
openVnodes
)
{
mnodeCheckUnCreatedVgroup
(
pDnode
,
pStatus
->
load
,
openVnodes
);
}
...
...
src/mnode/src/mnodeMnode.c
浏览文件 @
64c06c95
...
...
@@ -381,6 +381,8 @@ static bool mnodeAllOnline() {
void
*
pIter
=
NULL
;
bool
allOnline
=
true
;
sdbUpdateMnodeRoles
();
while
(
1
)
{
SMnodeObj
*
pMnode
=
NULL
;
pIter
=
mnodeGetNextMnode
(
pIter
,
&
pMnode
);
...
...
src/query/inc/qExecutor.h
浏览文件 @
64c06c95
...
...
@@ -86,7 +86,8 @@ typedef struct SResultRow {
bool
closed
;
// this result status: closed or opened
uint32_t
numOfRows
;
// number of rows of current time window
SResultRowCellInfo
*
pCellInfo
;
// For each result column, there is a resultInfo
union
{
STimeWindow
win
;
char
*
key
;};
// start key of current result row
STimeWindow
win
;
char
*
key
;
// start key of current result row
}
SResultRow
;
typedef
struct
SGroupResInfo
{
...
...
src/query/src/qExecutor.c
浏览文件 @
64c06c95
...
...
@@ -1876,14 +1876,15 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
taosHashCleanup
(
pRuntimeEnv
->
pResultRowHashTable
);
pRuntimeEnv
->
pResultRowHashTable
=
NULL
;
pRuntimeEnv
->
pool
=
destroyResultRowPool
(
pRuntimeEnv
->
pool
);
taosArrayDestroyEx
(
pRuntimeEnv
->
prevResult
,
freeInterResult
);
pRuntimeEnv
->
prevResult
=
NULL
;
taosHashCleanup
(
pRuntimeEnv
->
pTableRetrieveTsMap
);
pRuntimeEnv
->
pTableRetrieveTsMap
=
NULL
;
destroyOperatorInfo
(
pRuntimeEnv
->
proot
);
pRuntimeEnv
->
pool
=
destroyResultRowPool
(
pRuntimeEnv
->
pool
);
taosArrayDestroyEx
(
pRuntimeEnv
->
prevResult
,
freeInterResult
);
pRuntimeEnv
->
prevResult
=
NULL
;
}
static
bool
needBuildResAfterQueryComplete
(
SQInfo
*
pQInfo
)
{
...
...
@@ -2630,6 +2631,21 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
tsdbRetrieveDataBlockStatisInfo
(
pTableScanInfo
->
pQueryHandle
,
&
pBlock
->
pBlockStatis
);
if
(
pQuery
->
topBotQuery
&&
pBlock
->
pBlockStatis
!=
NULL
)
{
{
// set previous window
if
(
QUERY_IS_INTERVAL_QUERY
(
pQuery
))
{
SResultRow
*
pResult
=
NULL
;
bool
masterScan
=
IS_MASTER_SCAN
(
pRuntimeEnv
);
TSKEY
k
=
ascQuery
?
pBlock
->
info
.
window
.
skey
:
pBlock
->
info
.
window
.
ekey
;
STimeWindow
win
=
getActiveTimeWindow
(
pTableScanInfo
->
pResultRowInfo
,
k
,
pQuery
);
if
(
setWindowOutputBufByKey
(
pRuntimeEnv
,
pTableScanInfo
->
pResultRowInfo
,
&
win
,
masterScan
,
&
pResult
,
groupId
,
pTableScanInfo
->
pCtx
,
pTableScanInfo
->
numOfOutput
,
pTableScanInfo
->
rowCellInfoOffset
)
!=
TSDB_CODE_SUCCESS
)
{
longjmp
(
pRuntimeEnv
->
env
,
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
}
}
bool
load
=
false
;
for
(
int32_t
i
=
0
;
i
<
pQuery
->
numOfOutput
;
++
i
)
{
int32_t
functionId
=
pTableScanInfo
->
pCtx
[
i
].
functionId
;
...
...
@@ -6463,6 +6479,9 @@ void freeQInfo(SQInfo *pQInfo) {
SQueryRuntimeEnv
*
pRuntimeEnv
=
&
pQInfo
->
runtimeEnv
;
releaseQueryBuf
(
pRuntimeEnv
->
tableqinfoGroupInfo
.
numOfTables
);
doDestroyTableQueryInfo
(
&
pRuntimeEnv
->
tableqinfoGroupInfo
);
teardownQueryRuntimeEnv
(
&
pQInfo
->
runtimeEnv
);
SQuery
*
pQuery
=
pQInfo
->
runtimeEnv
.
pQuery
;
...
...
@@ -6498,7 +6517,6 @@ void freeQInfo(SQInfo *pQInfo) {
}
}
doDestroyTableQueryInfo
(
&
pRuntimeEnv
->
tableqinfoGroupInfo
);
tfree
(
pQInfo
->
pBuf
);
tfree
(
pQInfo
->
sql
);
...
...
src/query/src/qTokenizer.c
浏览文件 @
64c06c95
...
...
@@ -560,7 +560,7 @@ uint32_t tSQLGetToken(char* z, uint32_t* tokenId) {
return
0
;
}
SStrToken
tStrGetToken
(
char
*
str
,
int32_t
*
i
,
bool
isPrevOptr
,
uint32_t
numOfIgnoreToken
,
uint32_t
*
ignoreTokenTypes
)
{
SStrToken
tStrGetToken
(
char
*
str
,
int32_t
*
i
,
bool
isPrevOptr
)
{
SStrToken
t0
=
{
0
};
// here we reach the end of sql string, null-terminated string
...
...
@@ -585,7 +585,10 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
}
t0
.
n
=
tSQLGetToken
(
&
str
[
*
i
],
&
t0
.
type
);
break
;
// not support user specfied ignored symbol list
#if 0
bool ignore = false;
for (uint32_t k = 0; k < numOfIgnoreToken; k++) {
if (t0.type == ignoreTokenTypes[k]) {
...
...
@@ -597,6 +600,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
if (!ignore) {
break;
}
#endif
}
if
(
t0
.
type
==
TK_SEMI
)
{
...
...
src/query/src/qUtil.c
浏览文件 @
64c06c95
...
...
@@ -66,8 +66,8 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) {
return
;
}
if
(
pResultRowInfo
->
type
==
TSDB_DATA_TYPE_BINARY
||
pResultRowInfo
->
type
==
TSDB_DATA_TYPE_NCHAR
)
{
for
(
int32_t
i
=
0
;
i
<
pResultRowInfo
->
size
;
++
i
)
{
for
(
int32_t
i
=
0
;
i
<
pResultRowInfo
->
size
;
++
i
)
{
if
(
pResultRowInfo
->
pResult
[
i
]
)
{
tfree
(
pResultRowInfo
->
pResult
[
i
]
->
key
);
}
}
...
...
@@ -153,11 +153,8 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
pResultRow
->
offset
=
-
1
;
pResultRow
->
closed
=
false
;
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
tfree
(
pResultRow
->
key
);
}
else
{
pResultRow
->
win
=
TSWINDOW_INITIALIZER
;
}
tfree
(
pResultRow
->
key
);
pResultRow
->
win
=
TSWINDOW_INITIALIZER
;
}
// TODO refactor: use macro
...
...
src/query/tests/resultBufferTest.cpp
浏览文件 @
64c06c95
...
...
@@ -10,7 +10,7 @@ namespace {
// simple test
void
simpleTest
()
{
SDiskbasedResultBuf
*
pResultBuf
=
NULL
;
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4096
,
NULL
);
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4096
,
1
);
int32_t
pageId
=
0
;
int32_t
groupId
=
0
;
...
...
@@ -52,7 +52,7 @@ void simpleTest() {
void
writeDownTest
()
{
SDiskbasedResultBuf
*
pResultBuf
=
NULL
;
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4
*
1024
,
NULL
);
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4
*
1024
,
1
);
int32_t
pageId
=
0
;
int32_t
writePageId
=
0
;
...
...
@@ -99,7 +99,7 @@ void writeDownTest() {
void
recyclePageTest
()
{
SDiskbasedResultBuf
*
pResultBuf
=
NULL
;
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4
*
1024
,
NULL
);
int32_t
ret
=
createDiskbasedResultBuffer
(
&
pResultBuf
,
1024
,
4
*
1024
,
1
);
int32_t
pageId
=
0
;
int32_t
writePageId
=
0
;
...
...
src/sync/inc/syncInt.h
浏览文件 @
64c06c95
...
...
@@ -35,7 +35,7 @@ extern "C" {
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
#define SYNC_MAX_FWDS
512
#define SYNC_MAX_FWDS
1024
#define SYNC_FWD_TIMER 300
#define SYNC_ROLE_TIMER 15000 // ms
#define SYNC_CHECK_INTERVAL 1000 // ms
...
...
src/sync/src/syncMain.c
浏览文件 @
64c06c95
...
...
@@ -1372,7 +1372,7 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
if
(
/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */
nodeRole
>
TAOS_SYNC_ROLE_UNSYNCED
)
continue
;
if
(
/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */
nodeSStatus
>
TAOS_SYNC_STATUS_INIT
)
continue
;
sDebug
(
"%s, check roles since
self:%s sstatus:%s, peer
:%s sstatus:%s"
,
pPeer
->
id
,
syncRole
[
pPeer
->
role
],
sDebug
(
"%s, check roles since
peer:%s sstatus:%s, self
:%s sstatus:%s"
,
pPeer
->
id
,
syncRole
[
pPeer
->
role
],
syncStatus
[
pPeer
->
sstatus
],
syncRole
[
nodeRole
],
syncStatus
[
nodeSStatus
]);
syncSendPeersStatusMsgToPeer
(
pPeer
,
1
,
SYNC_STATUS_CHECK_ROLE
,
syncGenTranId
());
break
;
...
...
@@ -1459,7 +1459,12 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle
if
((
pNode
->
quorum
>
1
||
force
)
&&
code
==
0
)
{
code
=
syncSaveFwdInfo
(
pNode
,
pWalHead
->
version
,
mhandle
);
if
(
code
>=
0
)
code
=
1
;
if
(
code
>=
0
)
{
code
=
1
;
}
else
{
pthread_mutex_unlock
(
&
pNode
->
mutex
);
return
code
;
}
}
int32_t
retLen
=
taosWriteMsg
(
pPeer
->
peerFd
,
pSyncHead
,
fwdLen
);
...
...
src/tsdb/src/tsdbMain.c
浏览文件 @
64c06c95
...
...
@@ -526,7 +526,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
STable
*
pTable
=
pMeta
->
tables
[
i
];
if
(
pTable
&&
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
pTable
->
cqhandle
=
(
*
pRepo
->
appH
.
cqCreateFunc
)(
pRepo
->
appH
.
cqH
,
TABLE_UID
(
pTable
),
TABLE_TID
(
pTable
),
TABLE_NAME
(
pTable
)
->
data
,
pTable
->
sql
,
tsdbGetTableSchemaImpl
(
pTable
,
false
,
false
,
-
1
));
tsdbGetTableSchemaImpl
(
pTable
,
false
,
false
,
-
1
)
,
0
);
}
}
}
...
...
@@ -619,4 +619,4 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbDestroyReadH
(
&
readh
);
return
0
;
}
\ No newline at end of file
}
src/tsdb/src/tsdbMeta.c
浏览文件 @
64c06c95
...
...
@@ -840,7 +840,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
if
(
lock
&&
tsdbUnlockRepoMeta
(
pRepo
)
<
0
)
return
-
1
;
if
(
TABLE_TYPE
(
pTable
)
==
TSDB_STREAM_TABLE
&&
addIdx
)
{
pTable
->
cqhandle
=
(
*
pRepo
->
appH
.
cqCreateFunc
)(
pRepo
->
appH
.
cqH
,
TABLE_UID
(
pTable
),
TABLE_TID
(
pTable
),
TABLE_NAME
(
pTable
)
->
data
,
pTable
->
sql
,
tsdbGetTableSchemaImpl
(
pTable
,
false
,
false
,
-
1
));
tsdbGetTableSchemaImpl
(
pTable
,
false
,
false
,
-
1
)
,
1
);
}
tsdbDebug
(
"vgId:%d table %s tid %d uid %"
PRIu64
" is added to meta"
,
REPO_ID
(
pRepo
),
TABLE_CHAR_NAME
(
pTable
),
...
...
@@ -1322,4 +1322,4 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
}
return
0
;
}
\ No newline at end of file
}
src/util/inc/tstoken.h
浏览文件 @
64c06c95
...
...
@@ -51,11 +51,9 @@ uint32_t tSQLGetToken(char *z, uint32_t *tokenType);
* @param str
* @param i
* @param isPrevOptr
* @param numOfIgnoreToken
* @param ignoreTokenTypes
* @return
*/
SStrToken
tStrGetToken
(
char
*
str
,
int32_t
*
i
,
bool
isPrevOptr
,
uint32_t
numOfIgnoreToken
,
uint32_t
*
ignoreTokenTypes
);
SStrToken
tStrGetToken
(
char
*
str
,
int32_t
*
i
,
bool
isPrevOptr
);
/**
* check if it is a keyword or not
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
64c06c95
...
...
@@ -91,13 +91,17 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
int32_t
syncCode
=
0
;
bool
force
=
(
pWrite
==
NULL
?
false
:
pWrite
->
pHead
.
msgType
!=
TSDB_MSG_TYPE_SUBMIT
);
syncCode
=
syncForwardToPeer
(
pVnode
->
sync
,
pHead
,
pWrite
,
qtype
,
force
);
if
(
syncCode
<
0
)
return
syncCode
;
if
(
syncCode
<
0
)
{
pHead
->
version
=
0
;
return
syncCode
;
}
// write into WAL
code
=
walWrite
(
pVnode
->
wal
,
pHead
);
if
(
code
<
0
)
{
if
(
syncCode
>
0
)
atomic_sub_fetch_32
(
&
pWrite
->
processedCount
,
1
);
vError
(
"vgId:%d, hver:%"
PRIu64
" vver:%"
PRIu64
" code:0x%x"
,
pVnode
->
vgId
,
pHead
->
version
,
pVnode
->
version
,
code
);
pHead
->
version
=
0
;
return
code
;
}
...
...
tests/pytest/fulltest.sh
浏览文件 @
64c06c95
...
...
@@ -178,7 +178,7 @@ python3 ./test.py -f stable/query_after_reset.py
# perfbenchmark
python3 ./test.py
-f
perfbenchmark/bug3433.py
python3 ./test.py
-f
perfbenchmark/bug3589.py
#
python3 ./test.py -f perfbenchmark/bug3589.py
#query
...
...
tests/pytest/insert/metadataUpdate.py
浏览文件 @
64c06c95
...
...
@@ -11,13 +11,13 @@
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
tdLog
from
util.cases
import
tdCases
from
util.sql
import
tdSql
from
util.dnodes
import
tdDnodes
from
multiprocessing
import
Process
from
multiprocessing
import
Process
import
subprocess
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
...
...
@@ -40,27 +40,22 @@ class TDTestCase:
print
(
"alter table done"
)
def
deleteTableAndRecreate
(
self
):
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
tdDnodes
.
getSimCfgPath
()
self
.
conn
=
taos
.
connect
(
host
=
self
.
host
,
user
=
self
.
user
,
password
=
self
.
password
,
config
=
self
.
config
)
self
.
cursor
=
self
.
conn
.
cursor
()
self
.
cursor
.
execute
(
"use test"
)
print
(
"drop table stb"
)
self
.
cursor
.
execute
(
"drop table stb"
)
print
(
"create table stb"
)
self
.
cursor
.
execute
(
"create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20))"
)
print
(
"insert data"
)
sqlCmds
=
"use test; drop table stb;"
sqlCmds
+=
"create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20));"
for
i
in
range
(
self
.
tables
):
city
=
"beijing"
if
i
%
2
==
0
else
"shanghai"
self
.
cursor
.
execute
(
"create table tb%d using stb tags(%d, '%s')"
%
(
i
,
i
,
city
))
for
j
in
range
(
self
.
rows
):
self
.
cursor
.
execute
(
"insert into tb%d values(%d, %d)"
%
(
i
,
self
.
ts
+
j
,
j
*
100000
))
sqlCmds
+=
"create table tb%d using stb tags(%d, '%s');"
%
(
i
,
i
,
city
)
for
j
in
range
(
5
):
sqlCmds
+=
"insert into tb%d values(%d, %d);"
%
(
i
,
self
.
ts
+
j
,
j
*
100000
)
command
=
[
"taos"
,
"-c"
,
self
.
config
,
"-s"
,
sqlCmds
]
print
(
"drop stb, recreate stb and insert data "
)
result
=
subprocess
.
run
(
command
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
encoding
=
"utf-8"
)
if
result
.
returncode
==
0
:
print
(
"success:"
,
result
)
else
:
print
(
"error:"
,
result
)
def
run
(
self
):
tdSql
.
prepare
()
...
...
@@ -100,19 +95,17 @@ class TDTestCase:
tdSql
.
query
(
"select count(*) from stb"
)
tdSql
.
checkData
(
0
,
0
,
10000
)
tdSql
.
query
(
"select count(*) from tb
1
"
)
tdSql
.
query
(
"select count(*) from tb
0
"
)
tdSql
.
checkData
(
0
,
0
,
1000
)
p
=
Process
(
target
=
self
.
deleteTableAndRecreate
,
args
=
())
p
.
start
()
p
.
join
()
p
.
terminate
()
# drop stable in subprocess
self
.
deleteTableAndRecreate
()
tdSql
.
query
(
"select count(*) from stb"
)
tdSql
.
checkData
(
0
,
0
,
10000
)
tdSql
.
checkData
(
0
,
0
,
5
*
self
.
tables
)
tdSql
.
query
(
"select count(*) from tb
1
"
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
query
(
"select count(*) from tb
0
"
)
tdSql
.
checkData
(
0
,
0
,
5
)
def
stop
(
self
):
tdSql
.
close
()
...
...
tests/pytest/tools/insert-tblimit-tboffset-createdb.json
0 → 100644
浏览文件 @
64c06c95
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb_"
,
"auto_create_table"
:
"no"
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"number_of_tbl_in_one_sql"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/insert-tblimit-tboffset-insertrec.json
0 → 100644
浏览文件 @
64c06c95
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb_"
,
"auto_create_table"
:
"no"
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
1000
,
"childtable_limit"
:
33
,
"childtable_offset"
:
33
,
"multi_thread_write_one_tbl"
:
"no"
,
"number_of_tbl_in_one_sql"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/insert-tblimit-tboffset0.json
浏览文件 @
64c06c95
...
...
@@ -15,7 +15,7 @@
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"
yes
"
,
"drop"
:
"
no
"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
...
...
@@ -33,7 +33,7 @@
},
"super_tables"
:
[{
"name"
:
"stb"
,
"child_table_exists"
:
"
no
"
,
"child_table_exists"
:
"
yes
"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb_"
,
"auto_create_table"
:
"no"
,
...
...
tests/pytest/tools/insert-tblimit1-tboffset.json
浏览文件 @
64c06c95
...
...
@@ -15,7 +15,7 @@
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"
yes
"
,
"drop"
:
"
no
"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
...
...
@@ -33,7 +33,7 @@
},
"super_tables"
:
[{
"name"
:
"stb"
,
"child_table_exists"
:
"
no
"
,
"child_table_exists"
:
"
yes
"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb_"
,
"auto_create_table"
:
"no"
,
...
...
tests/pytest/tools/taosdemo-sampledata.json
浏览文件 @
64c06c95
...
...
@@ -16,8 +16,6 @@
"name"
:
"stb"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_limit"
:
10
,
"childtable_offset"
:
0
,
"childtable_prefix"
:
"t_"
,
"auto_create_table"
:
"no"
,
"data_source"
:
"sample"
,
...
...
tests/pytest/tools/taosdemoTestLimitOffset.py
浏览文件 @
64c06c95
...
...
@@ -51,7 +51,8 @@ class TDTestCase:
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset-createdb.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset-insertrec.json"
%
binPath
)
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"select count(tbname) from db.stb"
)
...
...
@@ -59,6 +60,7 @@ class TDTestCase:
tdSql
.
query
(
"select count(*) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
33000
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset-createdb.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset0.json"
%
binPath
)
tdSql
.
execute
(
"reset query cache"
)
...
...
@@ -68,6 +70,7 @@ class TDTestCase:
tdSql
.
query
(
"select count(*) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
20000
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit-tboffset-createdb.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/insert-tblimit1-tboffset.json"
%
binPath
)
tdSql
.
execute
(
"reset query cache"
)
...
...
tests/pytest/tools/taosdemoTestSampleData.py
浏览文件 @
64c06c95
...
...
@@ -57,7 +57,7 @@ class TDTestCase:
tdSql
.
query
(
"select count(tbname) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
query
(
"select count(*) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
2
00
)
tdSql
.
checkData
(
0
,
0
,
4
00
)
def
stop
(
self
):
tdSql
.
close
()
...
...
tests/script/general/parser/alter.sim
浏览文件 @
64c06c95
...
...
@@ -204,7 +204,13 @@ if $data03 != NULL then
return -1
endi
sql reset query cache
print ============================>TD-3366 TD-3486
sql insert into td_3366(ts, c3, c1) using mt(t1) tags(911) values('2018-1-1 11:11:11', 'new1', 12);
sql insert into td_3486(ts, c3, c1) using mt(t1) tags(-12) values('2018-1-1 11:11:11', 'new1', 12);
sql insert into ttxu(ts, c3, c1) using mt(t1) tags('-121') values('2018-1-1 11:11:11', 'new1', 12);
sql insert into tb(ts, c1, c3) using mt(t1) tags(123) values('2018-11-01 16:29:58.000', 2, 'port')
sql insert into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
sql import into tb values ('2018-11-01 16:29:58.000', 2, 'import', 3)
sql import into tb values ('2018-11-01 16:39:58.000', 2, 'import', 3)
...
...
@@ -212,6 +218,7 @@ sql select * from tb order by ts desc
if $rows != 4 then
return -1
endi
if $data03 != 3 then
return -1
endi
...
...
@@ -233,10 +240,10 @@ sql_error alter table mt add column c1 int
# drop non-existing columns
sql_error alter table mt drop column c9
sql drop database $db
sql show databases
if $rows != 0 then
return -1
endi
#
sql drop database $db
#
sql show databases
#if $rows != 0 then
#
return -1
#
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
tests/script/general/parser/gendata.sh
0 → 100755
浏览文件 @
64c06c95
#!/bin/bash
Cur_Dir
=
$(
pwd
)
echo
$Cur_Dir
echo
"'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'"
>>
~/data.sql
tests/script/general/parser/import_file.sim
浏览文件 @
64c06c95
...
...
@@ -8,32 +8,28 @@ sql connect
sleep 500
sql drop database if exists indb
sql create database if not exists indb
sql use indb
$inFileName = '~/data.csv'
$numOfRows = 10000
#system sh/gendata.sh $inFileName $numOfRows # input file invalid
system sh/gendata.sh ~/data.csv $numOfRows
system general/parser/gendata.sh
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting import data
sql import into tbx file
$inFileName
sql import into tbx file
'~/data.sql'
sql select count(*) from tbx
if $rows != 1 then
return -1
endi
if $data00 != $numOfRows then
print "expect: $numOfRows, act: $data00"
return -1
endi
#if $data00 != $numOfRows then
# print "expect: $numOfRows, act: $data00"
# return -1
#endi
#system rm -f $inFileName # invalid shell
system rm -f ~/data.csv
system rm -f ~/data.sql
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
tests/script/general/parser/select_with_tags.sim
浏览文件 @
64c06c95
...
...
@@ -159,6 +159,15 @@ if $data03 != @abc15@ then
return -1
endi
sql select top(c6, 3) from select_tags_mt0 interval(10a)
sql select top(c3,10) from select_tags_mt0 interval(10a) group by tbname
sql select top(c6, 3) from select_tags_mt0 interval(10a) group by tbname;
sql select top(c6, 10) from select_tags_mt0 interval(10a);
if $rows != 12800 then
return -1
endi
sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0;
if $rows != 100 then
return -1
...
...
tests/tsim/inc/sim.h
浏览文件 @
64c06c95
...
...
@@ -149,6 +149,7 @@ extern int32_t simScriptSucced;
extern
int32_t
simDebugFlag
;
extern
char
tsScriptDir
[];
extern
bool
simAsyncQuery
;
extern
bool
abortExecution
;
SScript
*
simParseScript
(
char
*
fileName
);
SScript
*
simProcessCallOver
(
SScript
*
script
);
...
...
tests/tsim/src/simExe.c
浏览文件 @
64c06c95
...
...
@@ -645,8 +645,12 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) {
bool
simCreateNativeConnect
(
SScript
*
script
,
char
*
user
,
char
*
pass
)
{
simCloseTaosdConnect
(
script
);
void
*
taos
=
NULL
;
taosMsleep
(
2000
);
for
(
int32_t
attempt
=
0
;
attempt
<
10
;
++
attempt
)
{
if
(
abortExecution
)
{
script
->
killed
=
true
;
return
false
;
}
taos
=
taos_connect
(
NULL
,
user
,
pass
,
NULL
,
tsDnodeShellPort
);
if
(
taos
==
NULL
)
{
simDebug
(
"script:%s, user:%s connect taosd failed:%s, attempt:%d"
,
script
->
fileName
,
user
,
taos_errstr
(
NULL
),
...
...
@@ -697,6 +701,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
TAOS_RES
*
pSql
=
NULL
;
for
(
int32_t
attempt
=
0
;
attempt
<
10
;
++
attempt
)
{
if
(
abortExecution
)
{
script
->
killed
=
true
;
return
false
;
}
simLogSql
(
rest
,
false
);
pSql
=
taos_query
(
script
->
taos
,
rest
);
ret
=
taos_errno
(
pSql
);
...
...
tests/tsim/src/simMain.c
浏览文件 @
64c06c95
...
...
@@ -21,10 +21,13 @@
bool
simAsyncQuery
=
false
;
bool
simExecSuccess
=
false
;
bool
abortExecution
=
false
;
void
simHandleSignal
(
int32_t
signo
,
void
*
sigInfo
,
void
*
context
)
{
simSystemCleanUp
();
exit
(
1
);
abortExecution
=
true
;
// runningScript->killed = true;
// exit(1);
}
int32_t
main
(
int32_t
argc
,
char
*
argv
[])
{
...
...
@@ -60,6 +63,11 @@ int32_t main(int32_t argc, char *argv[]) {
return
-
1
;
}
if
(
abortExecution
)
{
simError
(
"execute abort"
);
return
-
1
;
}
simScriptList
[
++
simScriptPos
]
=
script
;
simExecuteScript
(
script
);
...
...
tests/tsim/src/simSystem.c
浏览文件 @
64c06c95
...
...
@@ -159,9 +159,17 @@ void *simExecuteScript(void *inputScript) {
script
=
simScriptList
[
simScriptPos
];
}
if
(
abortExecution
)
{
script
->
killed
=
true
;
}
if
(
script
->
killed
||
script
->
linePos
>=
script
->
numOfLines
)
{
printf
(
"killed ---------------------->
\n
"
);
script
=
simProcessCallOver
(
script
);
if
(
script
==
NULL
)
break
;
if
(
script
==
NULL
)
{
printf
(
"abort now!
\n
"
);
break
;
}
}
else
{
SCmdLine
*
line
=
&
script
->
lines
[
script
->
linePos
];
char
*
option
=
script
->
optionBuffer
+
line
->
optionOffset
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录