Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
a7fab934
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22017
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a7fab934
编写于
8月 12, 2021
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into feature/query
上级
789918b9
e6be0e89
变更
29
展开全部
隐藏空白更改
内联
并排
Showing
29 changed file
with
2323 addition
and
796 deletion
+2323
-796
Jenkinsfile
Jenkinsfile
+55
-49
documentation20/cn/11.administrator/docs.md
documentation20/cn/11.administrator/docs.md
+349
-59
documentation20/cn/12.taos-sql/docs.md
documentation20/cn/12.taos-sql/docs.md
+43
-1
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+1
-1
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+488
-19
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+131
-453
src/client/src/tscPrepare.c
src/client/src/tscPrepare.c
+1
-1
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+3
-8
src/client/src/tscServer.c
src/client/src/tscServer.c
+8
-7
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+31
-121
src/common/inc/tdataformat.h
src/common/inc/tdataformat.h
+74
-16
src/common/src/tdataformat.c
src/common/src/tdataformat.c
+2
-1
src/common/src/tglobal.c
src/common/src/tglobal.c
+1
-1
src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java
.../taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java
+570
-0
src/connector/nodejs/nodetaos/taosobjects.js
src/connector/nodejs/nodetaos/taosobjects.js
+2
-1
src/connector/nodejs/package.json
src/connector/nodejs/package.json
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+20
-2
src/plugins/http/src/httpParser.c
src/plugins/http/src/httpParser.c
+34
-8
src/plugins/http/src/httpUtil.c
src/plugins/http/src/httpUtil.c
+12
-4
src/query/src/qAggMain.c
src/query/src/qAggMain.c
+1
-1
src/query/tests/rangeMergeTest.cpp
src/query/tests/rangeMergeTest.cpp
+1
-1
src/rpc/src/rpcMain.c
src/rpc/src/rpcMain.c
+2
-2
src/util/src/tcompare.c
src/util/src/tcompare.c
+12
-11
tests/perftest-scripts/perftest-query.sh
tests/perftest-scripts/perftest-query.sh
+7
-0
tests/pytest/crash_gen/valgrind_taos.supp
tests/pytest/crash_gen/valgrind_taos.supp
+367
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+2
-1
tests/pytest/functions/function_interp.py
tests/pytest/functions/function_interp.py
+46
-0
tests/pytest/query/queryWildcardLength.py
tests/pytest/query/queryWildcardLength.py
+5
-15
tests/pytest/tools/taosdemoPerformance.py
tests/pytest/tools/taosdemoPerformance.py
+54
-11
未找到文件。
Jenkinsfile
浏览文件 @
a7fab934
...
...
@@ -4,9 +4,6 @@ properties([pipelineTriggers([githubPush()])])
node
{
git
url:
'https://github.com/taosdata/TDengine.git'
}
def
skipstage
=
0
def
abortPreviousBuilds
()
{
def
currentJobName
=
env
.
JOB_NAME
def
currentBuildNumber
=
env
.
BUILD_NUMBER
.
toInteger
()
...
...
@@ -33,8 +30,7 @@ def abort_previous(){
milestone
(
buildNumber
)
}
def
pre_test
(){
sh
'hostname'
sh
'''
sudo rmtaos || echo "taosd has not installed"
'''
...
...
@@ -52,12 +48,18 @@ def pre_test(){
git checkout master
'''
}
else
{
else
if
(
env
.
CHANGE_TARGET
==
'2.0'
)
{
sh
'''
cd ${WKC}
git checkout
develop
git checkout
2.0
'''
}
else
{
sh
'''
cd ${WKC}
git checkout develop
'''
}
}
sh
'''
cd ${WKC}
...
...
@@ -75,7 +77,13 @@ def pre_test(){
git checkout master
'''
}
else
{
else
if
(
env
.
CHANGE_TARGET
==
'2.0'
){
sh
'''
cd ${WK}
git checkout 2.0
'''
}
else
{
sh
'''
cd ${WK}
git checkout develop
...
...
@@ -95,19 +103,17 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python
pip3 install ${WKC}/src/connector/python
/
'''
return
1
}
pipeline
{
agent
none
environment
{
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
stages
{
stage
(
'pre_build'
){
agent
{
label
'master'
}
...
...
@@ -123,19 +129,22 @@ pipeline {
rm -rf ${WORKSPACE}.tes
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
git fetch
'''
script
{
if
(
env
.
CHANGE_TARGET
==
'master'
)
{
sh
'''
git checkout master
git pull origin master
'''
}
else
{
else
if
(
env
.
CHANGE_TARGET
==
'2.0'
){
sh
'''
git checkout 2.0
'''
}
else
{
sh
'''
git checkout develop
git pull origin develop
'''
}
}
...
...
@@ -143,32 +152,33 @@ pipeline {
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
script
{
env
.
skipstage
=
sh
(
script:
"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 "
,
returnStdout:
true
)
script
{
skipbuild
=
'2'
skipbuild
=
sh
(
script:
"git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2"
,
returnStdout:
true
)
println
skipbuild
}
println
env
.
skipstage
sh
'''
rm -rf ${WORKSPACE}.tes
'''
}
}
stage
(
'Parallel test stage'
)
{
//only build pr
when
{
allOf
{
changeRequest
()
expression
{
env
.
skipstage
!=
0
expression
{
return
skipbuild
.
trim
()
==
'2'
}
}
}
parallel
{
stage
(
'python_1_s1'
)
{
agent
{
label
'p1'
}
agent
{
label
" slave1 || slave11 "
}
steps
{
pre_test
()
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
sh
'''
date
cd ${WKC}/tests
...
...
@@ -179,11 +189,11 @@ pipeline {
}
}
stage
(
'python_2_s5'
)
{
agent
{
label
'p2'
}
agent
{
label
" slave5 || slave15 "
}
steps
{
pre_test
()
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
sh
'''
date
cd ${WKC}/tests
...
...
@@ -193,9 +203,9 @@ pipeline {
}
}
stage
(
'python_3_s6'
)
{
agent
{
label
'p3'
}
agent
{
label
" slave6 || slave16 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
date
...
...
@@ -206,9 +216,9 @@ pipeline {
}
}
stage
(
'test_b1_s2'
)
{
agent
{
label
'b1'
}
agent
{
label
" slave2 || slave12 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
cd ${WKC}/tests
...
...
@@ -217,9 +227,8 @@ pipeline {
}
}
}
stage
(
'test_crash_gen_s3'
)
{
agent
{
label
"
b2
"
}
agent
{
label
"
slave3 || slave13
"
}
steps
{
pre_test
()
...
...
@@ -245,20 +254,18 @@ pipeline {
./handle_taosd_val_log.sh
'''
}
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
sh
'''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
}
stage
(
'test_valgrind_s4'
)
{
agent
{
label
"
b3
"
}
agent
{
label
"
slave4 || slave14
"
}
steps
{
pre_test
()
...
...
@@ -269,7 +276,7 @@ pipeline {
./handle_val_log.sh
'''
}
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
sh
'''
date
cd ${WKC}/tests
...
...
@@ -284,9 +291,9 @@ pipeline {
}
}
stage
(
'test_b4_s7'
)
{
agent
{
label
'b4'
}
agent
{
label
" slave7 || slave17 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
date
...
...
@@ -303,9 +310,9 @@ pipeline {
}
}
stage
(
'test_b5_s8'
)
{
agent
{
label
'b5'
}
agent
{
label
" slave8 || slave18 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
date
...
...
@@ -316,9 +323,9 @@ pipeline {
}
}
stage
(
'test_b6_s9'
)
{
agent
{
label
'b6'
}
agent
{
label
" slave9 || slave19 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
date
...
...
@@ -329,9 +336,9 @@ pipeline {
}
}
stage
(
'test_b7_s10'
)
{
agent
{
label
'b7'
}
agent
{
label
" slave10 || slave20 "
}
steps
{
timeout
(
time:
4
5
,
unit:
'MINUTES'
){
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
pre_test
()
sh
'''
date
...
...
@@ -421,6 +428,5 @@ pipeline {
from:
"support@taosdata.com"
)
}
}
}
}
}
\ No newline at end of file
documentation20/cn/11.administrator/docs.md
浏览文件 @
a7fab934
此差异已折叠。
点击以展开。
documentation20/cn/12.taos-sql/docs.md
浏览文件 @
a7fab934
...
...
@@ -206,7 +206,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
说明:可在
like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
说明:可在
like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
...
...
@@ -435,6 +435,17 @@ INSERT INTO
INSERT INTO d1001 FILE '/tmp/csvfile.csv';
```
-
**插入来自文件的数据记录,并自动建表**
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
```
mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
```
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
```
mysql
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
```
**历史记录写入**
:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
**说明:**
针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。
...
...
@@ -1215,6 +1226,37 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 1 row(s) in set (0.001042s)
```
- **INTERP**
```mysql
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR})];
```
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
适用于:**表、超级表**。
说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。
限制:INTERP 目前不支持 FILL(NEXT)。
示例:
```mysql
taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev);
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
====================================================================
2017-07-14 10:42:00.005 | 5 | 9 | 6 |
Query OK, 1 row(s) in set (0.002912s)
taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev);
interp(ts) | interp(f1) | interp(f2) | interp(f3) |
====================================================================
2017-07-14 10:42:00.005 | 5 | 6 | 7 |
Query OK, 1 row(s) in set (0.002005s)
```
### 计算函数
- **DIFF**
...
...
src/client/inc/tscUtil.h
浏览文件 @
a7fab934
...
...
@@ -343,7 +343,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t
tscGetTableMetaSize
(
STableMeta
*
pTableMeta
);
CChildTableMeta
*
tscCreateChildMeta
(
STableMeta
*
pTableMeta
);
uint32_t
tscGetTableMetaMaxSize
();
int32_t
tscCreateTableMetaFromSTableMeta
(
STableMeta
**
p
Child
,
const
char
*
name
,
size_t
*
tableMetaCapacity
);
int32_t
tscCreateTableMetaFromSTableMeta
(
STableMeta
**
p
pChild
,
const
char
*
name
,
size_t
*
tableMetaCapacity
,
STableMeta
**
ppStable
);
STableMeta
*
tscTableMetaDup
(
STableMeta
*
pTableMeta
);
SVgroupsInfo
*
tscVgroupsInfoDup
(
SVgroupsInfo
*
pVgroupsInfo
);
...
...
src/client/inc/tsclient.h
浏览文件 @
a7fab934
此差异已折叠。
点击以展开。
src/client/src/tscParseInsert.c
浏览文件 @
a7fab934
此差异已折叠。
点击以展开。
src/client/src/tscPrepare.c
浏览文件 @
a7fab934
...
...
@@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
SSchema
*
schema
=
(
SSchema
*
)
pBlock
->
pTableMeta
->
schema
;
for
(
int32_t
i
=
0
;
i
<
spd
->
numOfCols
;
++
i
)
{
if
(
!
spd
->
cols
[
i
].
hasVal
)
{
// current column do not have any value to insert, set it to null
if
(
spd
->
cols
[
i
].
valStat
==
VAL_STAT_NONE
)
{
// current column do not have any value to insert, set it to null
for
(
int32_t
n
=
0
;
n
<
rowNum
;
++
n
)
{
char
*
ptr
=
pBlock
->
pData
+
sizeof
(
SSubmitBlk
)
+
pBlock
->
rowSize
*
n
+
offset
;
...
...
src/client/src/tscSQLParser.c
浏览文件 @
a7fab934
...
...
@@ -8414,19 +8414,13 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char
name
[
TSDB_TABLE_FNAME_LEN
]
=
{
0
};
//if (!pSql->pBuf) {
// if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
// code = TSDB_CODE_TSC_OUT_OF_MEMORY;
// goto _end;
// }
//}
plist
=
taosArrayInit
(
4
,
POINTER_BYTES
);
pVgroupList
=
taosArrayInit
(
4
,
POINTER_BYTES
);
taosArraySort
(
tableNameList
,
tnameComparFn
);
taosArrayRemoveDuplicate
(
tableNameList
,
tnameComparFn
,
NULL
);
STableMeta
*
pSTMeta
=
(
STableMeta
*
)(
pSql
->
pBuf
);
size_t
numOfTables
=
taosArrayGetSize
(
tableNameList
);
for
(
int32_t
i
=
0
;
i
<
numOfTables
;
++
i
)
{
SName
*
pname
=
taosArrayGet
(
tableNameList
,
i
);
...
...
@@ -8442,7 +8436,8 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// avoid mem leak, may should update pTableMeta
void
*
pVgroupIdList
=
NULL
;
if
(
pTableMeta
->
tableType
==
TSDB_CHILD_TABLE
)
{
code
=
tscCreateTableMetaFromSTableMeta
((
STableMeta
**
)(
&
pTableMeta
),
name
,
&
tableMetaCapacity
);
code
=
tscCreateTableMetaFromSTableMeta
((
STableMeta
**
)(
&
pTableMeta
),
name
,
&
tableMetaCapacity
,
(
STableMeta
**
)(
&
pSTMeta
));
pSql
->
pBuf
=
(
void
*
)
pSTMeta
;
// create the child table meta from super table failed, try load it from mnode
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
...
...
src/client/src/tscServer.c
浏览文件 @
a7fab934
...
...
@@ -2882,18 +2882,19 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
tNameExtractFullName
(
&
pTableMetaInfo
->
name
,
name
);
size_t
len
=
strlen
(
name
);
if
(
pTableMetaInfo
->
tableMetaCapacity
!=
0
)
{
if
(
pTableMetaInfo
->
pTableMeta
!=
NULL
)
{
memset
(
pTableMetaInfo
->
pTableMeta
,
0
,
pTableMetaInfo
->
tableMetaCapacity
);
}
// just make runtime happy
if
(
pTableMetaInfo
->
tableMetaCapacity
!=
0
&&
pTableMetaInfo
->
pTableMeta
!=
NULL
)
{
memset
(
pTableMetaInfo
->
pTableMeta
,
0
,
pTableMetaInfo
->
tableMetaCapacity
);
}
taosHashGetCloneExt
(
tscTableMetaMap
,
name
,
len
,
NULL
,
(
void
**
)
&
(
pTableMetaInfo
->
pTableMeta
),
&
pTableMetaInfo
->
tableMetaCapacity
);
STableMeta
*
pMeta
=
pTableMetaInfo
->
pTableMeta
;
STableMeta
*
pMeta
=
pTableMetaInfo
->
pTableMeta
;
STableMeta
*
pSTMeta
=
(
STableMeta
*
)(
pSql
->
pBuf
);
if
(
pMeta
&&
pMeta
->
id
.
uid
>
0
)
{
// in case of child table, here only get the
if
(
pMeta
->
tableType
==
TSDB_CHILD_TABLE
)
{
int32_t
code
=
tscCreateTableMetaFromSTableMeta
(
&
pTableMetaInfo
->
pTableMeta
,
name
,
&
pTableMetaInfo
->
tableMetaCapacity
);
int32_t
code
=
tscCreateTableMetaFromSTableMeta
(
&
pTableMetaInfo
->
pTableMeta
,
name
,
&
pTableMetaInfo
->
tableMetaCapacity
,
(
STableMeta
**
)(
&
pSTMeta
));
pSql
->
pBuf
=
(
void
*
)(
pSTMeta
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
getTableMetaFromMnode
(
pSql
,
pTableMetaInfo
,
autocreate
);
}
...
...
src/client/src/tscUtil.c
浏览文件 @
a7fab934
...
...
@@ -1796,101 +1796,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
return
TSDB_CODE_SUCCESS
;
}
static
SMemRow
tdGenMemRowFromBuilder
(
SMemRowBuilder
*
pBuilder
)
{
SSchema
*
pSchema
=
pBuilder
->
pSchema
;
char
*
p
=
(
char
*
)
pBuilder
->
buf
;
int
toffset
=
0
;
uint16_t
nCols
=
pBuilder
->
nCols
;
uint8_t
memRowType
=
payloadType
(
p
);
uint16_t
nColsBound
=
payloadNCols
(
p
);
if
(
pBuilder
->
nCols
<=
0
||
nColsBound
<=
0
)
{
return
NULL
;
}
char
*
pVals
=
POINTER_SHIFT
(
p
,
payloadValuesOffset
(
p
));
SMemRow
*
memRow
=
(
SMemRow
)
pBuilder
->
pDataBlock
;
memRowSetType
(
memRow
,
memRowType
);
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
* +-----------+----------+----------+--------------------------------------|--------------------|
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
* +-----------+----------+----------+--------------------------------------+--------------------|
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
* 2. dataTLen: total length including the header and body.
*/
if
(
memRowType
==
SMEM_ROW_DATA
)
{
SDataRow
trow
=
(
SDataRow
)
memRowDataBody
(
memRow
);
dataRowSetLen
(
trow
,
(
TDRowLenT
)(
TD_DATA_ROW_HEAD_SIZE
+
pBuilder
->
flen
));
dataRowSetVersion
(
trow
,
pBuilder
->
sversion
);
p
=
(
char
*
)
payloadBody
(
pBuilder
->
buf
);
uint16_t
i
=
0
,
j
=
0
;
while
(
j
<
nCols
)
{
if
(
i
>=
nColsBound
)
{
break
;
}
int16_t
colId
=
payloadColId
(
p
);
if
(
colId
==
pSchema
[
j
].
colId
)
{
// ASSERT(payloadColType(p) == pSchema[j].type);
tdAppendColVal
(
trow
,
POINTER_SHIFT
(
pVals
,
payloadColOffset
(
p
)),
pSchema
[
j
].
type
,
toffset
);
toffset
+=
TYPE_BYTES
[
pSchema
[
j
].
type
];
p
=
payloadNextCol
(
p
);
++
i
;
++
j
;
}
else
if
(
colId
<
pSchema
[
j
].
colId
)
{
p
=
payloadNextCol
(
p
);
++
i
;
}
else
{
tdAppendColVal
(
trow
,
getNullValue
(
pSchema
[
j
].
type
),
pSchema
[
j
].
type
,
toffset
);
toffset
+=
TYPE_BYTES
[
pSchema
[
j
].
type
];
++
j
;
}
}
while
(
j
<
nCols
)
{
tdAppendColVal
(
trow
,
getNullValue
(
pSchema
[
j
].
type
),
pSchema
[
j
].
type
,
toffset
);
toffset
+=
TYPE_BYTES
[
pSchema
[
j
].
type
];
++
j
;
}
#if 0 // no need anymore
while (i < nColsBound) {
p = payloadNextCol(p);
++i;
}
#endif
}
else
if
(
memRowType
==
SMEM_ROW_KV
)
{
SKVRow
kvRow
=
(
SKVRow
)
memRowKvBody
(
memRow
);
kvRowSetLen
(
kvRow
,
(
TDRowLenT
)(
TD_KV_ROW_HEAD_SIZE
+
sizeof
(
SColIdx
)
*
nColsBound
));
kvRowSetNCols
(
kvRow
,
nColsBound
);
memRowSetKvVersion
(
memRow
,
pBuilder
->
sversion
);
p
=
(
char
*
)
payloadBody
(
pBuilder
->
buf
);
int
i
=
0
;
while
(
i
<
nColsBound
)
{
int16_t
colId
=
payloadColId
(
p
);
uint8_t
colType
=
payloadColType
(
p
);
tdAppendKvColVal
(
kvRow
,
POINTER_SHIFT
(
pVals
,
payloadColOffset
(
p
)),
colId
,
colType
,
&
toffset
);
//toffset += sizeof(SColIdx);
p
=
payloadNextCol
(
p
);
++
i
;
}
}
else
{
ASSERT
(
0
);
}
int32_t
rowTLen
=
memRowTLen
(
memRow
);
pBuilder
->
pDataBlock
=
(
char
*
)
pBuilder
->
pDataBlock
+
rowTLen
;
// next row
pBuilder
->
pSubmitBlk
->
dataLen
+=
rowTLen
;
return
memRow
;
}
// Erase the empty space reserved for binary data
static
int
trimDataBlock
(
void
*
pDataBlock
,
STableDataBlocks
*
pTableDataBlock
,
SInsertStatementParam
*
insertParam
,
SBlockKeyTuple
*
blkKeyTuple
)
{
...
...
@@ -1922,10 +1827,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
int32_t
schemaSize
=
sizeof
(
STColumn
)
*
numOfCols
;
pBlock
->
schemaLen
=
schemaSize
;
}
else
{
for
(
int32_t
j
=
0
;
j
<
tinfo
.
numOfColumns
;
++
j
)
{
flen
+=
TYPE_BYTES
[
pSchema
[
j
].
type
];
if
(
IS_RAW_PAYLOAD
(
insertParam
->
payloadType
))
{
for
(
int32_t
j
=
0
;
j
<
tinfo
.
numOfColumns
;
++
j
)
{
flen
+=
TYPE_BYTES
[
pSchema
[
j
].
type
];
}
}
pBlock
->
schemaLen
=
0
;
}
...
...
@@ -1952,18 +1858,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
pBlock
->
dataLen
+=
memRowTLen
(
memRow
);
}
}
else
{
SMemRowBuilder
rowBuilder
;
rowBuilder
.
pSchema
=
pSchema
;
rowBuilder
.
sversion
=
pTableMeta
->
sversion
;
rowBuilder
.
flen
=
flen
;
rowBuilder
.
nCols
=
tinfo
.
numOfColumns
;
rowBuilder
.
pDataBlock
=
pDataBlock
;
rowBuilder
.
pSubmitBlk
=
pBlock
;
rowBuilder
.
buf
=
p
;
for
(
int32_t
i
=
0
;
i
<
numOfRows
;
++
i
)
{
rowBuilder
.
buf
=
(
blkKeyTuple
+
i
)
->
payloadAddr
;
tdGenMemRowFromBuilder
(
&
rowBuilder
);
char
*
payload
=
(
blkKeyTuple
+
i
)
->
payloadAddr
;
if
(
isNeedConvertRow
(
payload
))
{
convertSMemRow
(
pDataBlock
,
payload
,
pTableDataBlock
);
TDRowTLenT
rowTLen
=
memRowTLen
(
pDataBlock
);
pDataBlock
=
POINTER_SHIFT
(
pDataBlock
,
rowTLen
);
pBlock
->
dataLen
+=
rowTLen
;
}
else
{
TDRowTLenT
rowTLen
=
memRowTLen
(
payload
);
memcpy
(
pDataBlock
,
payload
,
rowTLen
);
pDataBlock
=
POINTER_SHIFT
(
pDataBlock
,
rowTLen
);
pBlock
->
dataLen
+=
rowTLen
;
}
}
}
...
...
@@ -1976,9 +1883,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
static
int32_t
getRowExpandSize
(
STableMeta
*
pTableMeta
)
{
int32_t
result
=
TD_MEM_ROW_DATA_HEAD_SIZE
;
int32_t
columns
=
tscGetNumOfColumns
(
pTableMeta
);
int32_t
columns
=
tscGetNumOfColumns
(
pTableMeta
);
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
for
(
int32_t
i
=
0
;
i
<
columns
;
i
++
)
{
for
(
int32_t
i
=
0
;
i
<
columns
;
i
++
)
{
if
(
IS_VAR_DATA_TYPE
((
pSchema
+
i
)
->
type
))
{
result
+=
TYPE_BYTES
[
TSDB_DATA_TYPE_BINARY
];
}
...
...
@@ -2024,7 +1931,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
SSubmitBlk
*
pBlocks
=
(
SSubmitBlk
*
)
pOneTableBlock
->
pData
;
if
(
pBlocks
->
numOfRows
>
0
)
{
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t
expandSize
=
getRowExpandSize
(
pOneTableBlock
->
pTableMeta
)
;
int32_t
expandSize
=
isRawPayload
?
getRowExpandSize
(
pOneTableBlock
->
pTableMeta
)
:
0
;
STableDataBlocks
*
dataBuf
=
NULL
;
int32_t
ret
=
tscGetDataBlockFromList
(
pVnodeDataBlockHashList
,
pOneTableBlock
->
vgId
,
TSDB_PAYLOAD_SIZE
,
...
...
@@ -2037,7 +1944,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
return
ret
;
}
int64_t
destSize
=
dataBuf
->
size
+
pOneTableBlock
->
size
+
pBlocks
->
numOfRows
*
expandSize
+
sizeof
(
STColumn
)
*
tscGetNumOfColumns
(
pOneTableBlock
->
pTableMeta
);
int64_t
destSize
=
dataBuf
->
size
+
pOneTableBlock
->
size
+
pBlocks
->
numOfRows
*
expandSize
+
sizeof
(
STColumn
)
*
tscGetNumOfColumns
(
pOneTableBlock
->
pTableMeta
);
if
(
dataBuf
->
nAllocSize
<
destSize
)
{
dataBuf
->
nAllocSize
=
(
uint32_t
)(
destSize
*
1
.
5
);
...
...
@@ -2081,7 +1989,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pBlocks
->
numOfRows
,
pBlocks
->
sversion
,
blkKeyInfo
.
pKeyTuple
->
skey
,
pLastKeyTuple
->
skey
);
}
int32_t
len
=
pBlocks
->
numOfRows
*
(
pOneTableBlock
->
rowSize
+
expandSize
)
+
sizeof
(
STColumn
)
*
tscGetNumOfColumns
(
pOneTableBlock
->
pTableMeta
);
int32_t
len
=
pBlocks
->
numOfRows
*
(
isRawPayload
?
(
pOneTableBlock
->
rowSize
+
expandSize
)
:
getExtendedRowSize
(
pOneTableBlock
))
+
sizeof
(
STColumn
)
*
tscGetNumOfColumns
(
pOneTableBlock
->
pTableMeta
);
pBlocks
->
tid
=
htonl
(
pBlocks
->
tid
);
pBlocks
->
uid
=
htobe64
(
pBlocks
->
uid
);
...
...
@@ -4533,14 +4443,16 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return
cMeta
;
}
int32_t
tscCreateTableMetaFromSTableMeta
(
STableMeta
**
ppChild
,
const
char
*
name
,
size_t
*
tableMetaCapacity
)
{
int32_t
tscCreateTableMetaFromSTableMeta
(
STableMeta
**
ppChild
,
const
char
*
name
,
size_t
*
tableMetaCapacity
,
STableMeta
**
ppSTable
)
{
assert
(
*
ppChild
!=
NULL
);
STableMeta
*
p
=
NULL
;
size_t
sz
=
0
;
STableMeta
*
p
=
*
ppSTable
;
STableMeta
*
pChild
=
*
ppChild
;
size_t
sz
=
(
p
!=
NULL
)
?
tscGetTableMetaSize
(
p
)
:
0
;
//ppSTableBuf actually capacity may larger than sz, dont care
if
(
p
!=
NULL
&&
sz
!=
0
)
{
memset
((
char
*
)
p
,
0
,
sz
);
}
taosHashGetCloneExt
(
tscTableMetaMap
,
pChild
->
sTableName
,
strnlen
(
pChild
->
sTableName
,
TSDB_TABLE_FNAME_LEN
),
NULL
,
(
void
**
)
&
p
,
&
sz
);
*
ppSTable
=
p
;
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
...
...
@@ -4559,10 +4471,8 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
memcpy
(
pChild
->
schema
,
p
->
schema
,
totalBytes
);
*
ppChild
=
pChild
;
tfree
(
p
);
return
TSDB_CODE_SUCCESS
;
}
else
{
// super table has been removed, current tableMeta is also expired. remove it here
tfree
(
p
);
taosHashRemove
(
tscTableMetaMap
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
return
-
1
;
}
...
...
src/common/inc/tdataformat.h
浏览文件 @
a7fab934
...
...
@@ -186,6 +186,7 @@ typedef void *SDataRow;
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r))
#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
...
...
@@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row);
void
tdInitDataRow
(
SDataRow
row
,
STSchema
*
pSchema
);
SDataRow
tdDataRowDup
(
SDataRow
row
);
// offset here not include dataRow header length
static
FORCE_INLINE
int
tdAppendColVal
(
SDataRow
row
,
const
void
*
value
,
int8_t
type
,
int32_t
offset
)
{
static
FORCE_INLINE
int
tdAppendDataColVal
(
SDataRow
row
,
const
void
*
value
,
bool
isCopyVarData
,
int8_t
type
,
int32_t
offset
)
{
ASSERT
(
value
!=
NULL
);
int32_t
toffset
=
offset
+
TD_DATA_ROW_HEAD_SIZE
;
if
(
IS_VAR_DATA_TYPE
(
type
))
{
*
(
VarDataOffsetT
*
)
POINTER_SHIFT
(
row
,
toffset
)
=
dataRowLen
(
row
);
memcpy
(
POINTER_SHIFT
(
row
,
dataRowLen
(
row
)),
value
,
varDataTLen
(
value
));
if
(
isCopyVarData
)
{
memcpy
(
POINTER_SHIFT
(
row
,
dataRowLen
(
row
)),
value
,
varDataTLen
(
value
));
}
dataRowLen
(
row
)
+=
varDataTLen
(
value
);
}
else
{
if
(
offset
==
0
)
{
...
...
@@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t
return
0
;
}
// offset here not include dataRow header length
static
FORCE_INLINE
int
tdAppendColVal
(
SDataRow
row
,
const
void
*
value
,
int8_t
type
,
int32_t
offset
)
{
return
tdAppendDataColVal
(
row
,
value
,
true
,
type
,
offset
);
}
// NOTE: offset here including the header size
static
FORCE_INLINE
void
*
tdGetRowDataOfCol
(
SDataRow
row
,
int8_t
type
,
int32_t
offset
)
{
if
(
IS_VAR_DATA_TYPE
(
type
))
{
...
...
@@ -472,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
}
// offset here not include kvRow header length
static
FORCE_INLINE
int
tdAppendKvColVal
(
SKVRow
row
,
const
void
*
value
,
int16_t
colId
,
int8_t
type
,
int32_t
*
offset
)
{
static
FORCE_INLINE
int
tdAppendKvColVal
(
SKVRow
row
,
const
void
*
value
,
bool
isCopyValData
,
int16_t
colId
,
int8_t
type
,
int32_t
offset
)
{
ASSERT
(
value
!=
NULL
);
int32_t
toffset
=
*
offset
+
TD_KV_ROW_HEAD_SIZE
;
int32_t
toffset
=
offset
+
TD_KV_ROW_HEAD_SIZE
;
SColIdx
*
pColIdx
=
(
SColIdx
*
)
POINTER_SHIFT
(
row
,
toffset
);
char
*
ptr
=
(
char
*
)
POINTER_SHIFT
(
row
,
kvRowLen
(
row
));
...
...
@@ -482,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
pColIdx
->
offset
=
kvRowLen
(
row
);
// offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
if
(
IS_VAR_DATA_TYPE
(
type
))
{
memcpy
(
ptr
,
value
,
varDataTLen
(
value
));
if
(
isCopyValData
)
{
memcpy
(
ptr
,
value
,
varDataTLen
(
value
));
}
kvRowLen
(
row
)
+=
varDataTLen
(
value
);
}
else
{
if
(
*
offset
==
0
)
{
if
(
offset
==
0
)
{
ASSERT
(
type
==
TSDB_DATA_TYPE_TIMESTAMP
);
TKEY
tvalue
=
tdGetTKEY
(
*
(
TSKEY
*
)
value
);
memcpy
(
ptr
,
(
void
*
)(
&
tvalue
),
TYPE_BYTES
[
type
]);
...
...
@@ -494,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
}
kvRowLen
(
row
)
+=
TYPE_BYTES
[
type
];
}
*
offset
+=
sizeof
(
SColIdx
);
return
0
;
}
...
...
@@ -589,12 +602,24 @@ typedef void *SMemRow;
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
#define SMEM_ROW_DATA 0x0U // SDataRow
#define SMEM_ROW_KV 0x01U // SKVRow
#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
#define KVRatioKV (0.2f) // all bool
#define KVRatioPredict (0.4f)
#define KVRatioData (0.75f) // all bigint
#define KVRatioConvert (0.9f)
#define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
#define memRowType(r) (*(uint8_t *)(r))
#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
#define memRowKvBody(r) \
...
...
@@ -611,6 +636,14 @@ typedef void *SMemRow;
#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
static
FORCE_INLINE
char
*
memRowEnd
(
SMemRow
row
)
{
if
(
isDataRow
(
row
))
{
return
(
char
*
)
dataRowEnd
(
memRowDataBody
(
row
));
}
else
{
return
(
char
*
)
kvRowEnd
(
memRowKvBody
(
row
));
}
}
#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
...
...
@@ -628,7 +661,6 @@ typedef void *SMemRow;
} \
} while (0)
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
...
...
@@ -661,12 +693,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_
}
}
static
FORCE_INLINE
int
tdAppendMem
ColVal
(
SMemRow
row
,
const
void
*
value
,
int16_t
colId
,
int8_t
type
,
int32_t
offset
,
int32_t
*
kvO
ffset
)
{
static
FORCE_INLINE
int
tdAppendMem
RowColVal
(
SMemRow
row
,
const
void
*
value
,
bool
isCopyVarData
,
int16_t
colId
,
int8_t
type
,
int32_t
o
ffset
)
{
if
(
isDataRow
(
row
))
{
tdAppend
ColVal
(
memRowDataBody
(
row
),
value
,
type
,
offset
);
tdAppend
DataColVal
(
memRowDataBody
(
row
),
value
,
isCopyVarData
,
type
,
offset
);
}
else
{
tdAppendKvColVal
(
memRowKvBody
(
row
),
value
,
colId
,
type
,
kvO
ffset
);
tdAppendKvColVal
(
memRowKvBody
(
row
),
value
,
isCopyVarData
,
colId
,
type
,
o
ffset
);
}
return
0
;
}
...
...
@@ -688,6 +720,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value
return
len
;
}
/**
* 1. calculate the delta of AllNullLen for SDataRow.
* 2. calculate the real len for SKVRow.
*/
static
FORCE_INLINE
void
tdGetColAppendDeltaLen
(
const
void
*
value
,
int8_t
colType
,
int32_t
*
dataLen
,
int32_t
*
kvLen
)
{
switch
(
colType
)
{
case
TSDB_DATA_TYPE_BINARY
:
{
int32_t
varLen
=
varDataLen
(
value
);
*
dataLen
+=
(
varLen
-
CHAR_BYTES
);
*
kvLen
+=
(
varLen
+
sizeof
(
SColIdx
));
break
;
}
case
TSDB_DATA_TYPE_NCHAR
:
{
int32_t
varLen
=
varDataLen
(
value
);
*
dataLen
+=
(
varLen
-
TSDB_NCHAR_SIZE
);
*
kvLen
+=
(
varLen
+
sizeof
(
SColIdx
));
break
;
}
default:
{
*
kvLen
+=
(
TYPE_BYTES
[
colType
]
+
sizeof
(
SColIdx
));
break
;
}
}
}
typedef
struct
{
int16_t
colId
;
...
...
@@ -703,7 +759,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c
SMemRow
mergeTwoMemRows
(
void
*
buffer
,
SMemRow
row1
,
SMemRow
row2
,
STSchema
*
pSchema1
,
STSchema
*
pSchema2
);
#if 0
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
...
...
@@ -749,6 +805,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
#endif
#ifdef __cplusplus
}
#endif
...
...
src/common/src/tdataformat.c
浏览文件 @
a7fab934
...
...
@@ -851,7 +851,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
int16_t
k
;
for
(
k
=
0
;
k
<
nKvNCols
;
++
k
)
{
SColInfo
*
pColInfo
=
taosArrayGet
(
stashRow
,
k
);
tdAppendKvColVal
(
kvRow
,
pColInfo
->
colVal
,
pColInfo
->
colId
,
pColInfo
->
colType
,
&
toffset
);
tdAppendKvColVal
(
kvRow
,
pColInfo
->
colVal
,
true
,
pColInfo
->
colId
,
pColInfo
->
colType
,
toffset
);
toffset
+=
sizeof
(
SColIdx
);
}
ASSERT
(
kvLen
==
memRowTLen
(
tRow
));
}
...
...
src/common/src/tglobal.c
浏览文件 @
a7fab934
...
...
@@ -991,7 +991,7 @@ static void doInitGlobalConfig(void) {
cfg
.
valType
=
TAOS_CFG_VTYPE_INT32
;
cfg
.
cfgType
=
TSDB_CFG_CTYPE_B_CONFIG
|
TSDB_CFG_CTYPE_B_CLIENT
|
TSDB_CFG_CTYPE_B_SHOW
;
cfg
.
minValue
=
0
;
cfg
.
maxValue
=
TSDB_MAX_
ALLOWED_SQL
_LEN
;
cfg
.
maxValue
=
TSDB_MAX_
FIELD
_LEN
;
cfg
.
ptrLength
=
0
;
cfg
.
unitType
=
TAOS_CFG_UTYPE_BYTE
;
taosInitConfigOption
(
cfg
);
...
...
src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java
0 → 100644
浏览文件 @
a7fab934
此差异已折叠。
点击以展开。
src/connector/nodejs/nodetaos/taosobjects.js
浏览文件 @
a7fab934
...
...
@@ -47,7 +47,8 @@ class TaosTimestamp extends Date {
super
(
Math
.
floor
(
date
/
1000
));
this
.
precisionExtras
=
date
%
1000
;
}
else
if
(
precision
===
2
)
{
super
(
parseInt
(
date
/
1000000
));
// use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
super
(
parseInt
(
BigInt
(
date
)
/
1000000
n
));
// use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
this
.
precisionExtras
=
parseInt
(
BigInt
(
date
)
%
1000000
n
);
}
else
{
...
...
src/connector/nodejs/package.json
浏览文件 @
a7fab934
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.
9
"
,
"version"
:
"2.0.
10
"
,
"description"
:
"A Node.js connector for TDengine."
,
"main"
:
"tdengine.js"
,
"directories"
:
{
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
a7fab934
...
...
@@ -245,7 +245,6 @@ typedef struct SArguments_S {
uint32_t
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms, us or ns. accordig to database precision
uint32_t
method_of_delete
;
char
**
arg_list
;
uint64_t
totalInsertRows
;
uint64_t
totalAffectedRows
;
bool
demo_mode
;
// use default column name and semi-random data
...
...
@@ -637,7 +636,6 @@ SArguments g_args = {
0
,
// disorderRatio
1000
,
// disorderRange
1
,
// method_of_delete
NULL
,
// arg_list
0
,
// totalInsertRows;
0
,
// totalAffectedRows;
true
,
// demo_mode;
...
...
@@ -6407,6 +6405,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
bool
flagSleep
=
true
;
uint64_t
sleepTimeTotal
=
0
;
int
percentComplete
=
0
;
int64_t
totalRows
=
insertRows
*
pThreadInfo
->
ntables
;
while
(
pThreadInfo
->
totalInsertRows
<
pThreadInfo
->
ntables
*
insertRows
)
{
if
((
flagSleep
)
&&
(
insert_interval
))
{
st
=
taosGetTimestampMs
();
...
...
@@ -6583,6 +6584,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo
->
totalAffectedRows
+=
affectedRows
;
int
currentPercent
=
pThreadInfo
->
totalAffectedRows
*
100
/
totalRows
;
if
(
currentPercent
>
percentComplete
)
{
printf
(
"[%d]:%d%%
\n
"
,
pThreadInfo
->
threadID
,
currentPercent
);
percentComplete
=
currentPercent
;
}
int64_t
currentPrintTime
=
taosGetTimestampMs
();
if
(
currentPrintTime
-
lastPrintTime
>
30
*
1000
)
{
printf
(
"thread[%d] has currently inserted rows: %"
PRIu64
", affected rows: %"
PRIu64
"
\n
"
,
...
...
@@ -6604,6 +6610,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
}
if
(
percentComplete
<
100
)
printf
(
"[%d]:%d%%
\n
"
,
pThreadInfo
->
threadID
,
percentComplete
);
free_of_interlace:
tmfree
(
pThreadInfo
->
buffer
);
...
...
@@ -6641,6 +6649,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo
->
samplePos
=
0
;
int
percentComplete
=
0
;
int64_t
totalRows
=
insertRows
*
pThreadInfo
->
ntables
;
for
(
uint64_t
tableSeq
=
pThreadInfo
->
start_table_from
;
tableSeq
<=
pThreadInfo
->
end_table_to
;
tableSeq
++
)
{
...
...
@@ -6746,6 +6757,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo
->
totalAffectedRows
+=
affectedRows
;
int
currentPercent
=
pThreadInfo
->
totalAffectedRows
*
100
/
totalRows
;
if
(
currentPercent
>
percentComplete
)
{
printf
(
"[%d]:%d%%
\n
"
,
pThreadInfo
->
threadID
,
currentPercent
);
percentComplete
=
currentPercent
;
}
int64_t
currentPrintTime
=
taosGetTimestampMs
();
if
(
currentPrintTime
-
lastPrintTime
>
30
*
1000
)
{
printf
(
"thread[%d] has currently inserted rows: %"
PRId64
", affected rows: %"
PRId64
"
\n
"
,
...
...
@@ -6768,6 +6784,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__
,
__LINE__
,
pThreadInfo
->
samplePos
);
}
}
// tableSeq
if
(
percentComplete
<
100
)
printf
(
"[%d]:%d%%
\n
"
,
pThreadInfo
->
threadID
,
percentComplete
);
free_of_progressive:
tmfree
(
pThreadInfo
->
buffer
);
...
...
src/plugins/http/src/httpParser.c
浏览文件 @
a7fab934
...
...
@@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) {
}
static
void
httpCleanupString
(
HttpString
*
str
)
{
free
(
str
->
str
);
str
->
str
=
NULL
;
str
->
pos
=
0
;
str
->
size
=
0
;
if
(
str
->
str
)
{
free
(
str
->
str
);
str
->
str
=
NULL
;
str
->
pos
=
0
;
str
->
size
=
0
;
}
}
static
int32_t
httpAppendString
(
HttpString
*
str
,
const
char
*
s
,
int32_t
len
)
{
char
*
new_str
=
NULL
;
if
(
str
->
size
==
0
)
{
str
->
pos
=
0
;
str
->
size
=
len
+
1
;
...
...
@@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
}
else
if
(
str
->
pos
+
len
+
1
>=
str
->
size
)
{
str
->
size
+=
len
;
str
->
size
*=
4
;
str
->
str
=
realloc
(
str
->
str
,
str
->
size
);
new_str
=
realloc
(
str
->
str
,
str
->
size
);
if
(
new_str
==
NULL
&&
str
->
str
)
{
// if str->str was not NULL originally,
// the old allocated memory was left unchanged,
// see man 3 realloc
free
(
str
->
str
);
}
str
->
str
=
new_str
;
}
else
{
}
...
...
@@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
static
int32_t
httpOnBody
(
HttpParser
*
parser
,
const
char
*
chunk
,
int32_t
len
)
{
HttpContext
*
pContext
=
parser
->
pContext
;
HttpString
*
buf
=
&
parser
->
body
;
HttpString
*
buf
=
&
parser
->
body
;
if
(
parser
->
parseCode
!=
TSDB_CODE_SUCCESS
)
return
-
1
;
if
(
buf
->
size
<=
0
)
{
...
...
@@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
}
int32_t
newSize
=
buf
->
pos
+
len
+
1
;
char
*
newStr
=
NULL
;
if
(
newSize
>=
buf
->
size
)
{
if
(
buf
->
size
>=
HTTP_BUFFER_SIZE
)
{
httpError
(
"context:%p, fd:%d, failed parse body, exceeding buffer size %d"
,
pContext
,
pContext
->
fd
,
buf
->
size
);
...
...
@@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
newSize
=
MAX
(
newSize
,
HTTP_BUFFER_INIT
);
newSize
*=
4
;
newSize
=
MIN
(
newSize
,
HTTP_BUFFER_SIZE
);
buf
->
str
=
realloc
(
buf
->
str
,
newSize
);
newStr
=
realloc
(
buf
->
str
,
newSize
);
if
(
newStr
==
NULL
&&
buf
->
str
)
{
free
(
buf
->
str
);
}
buf
->
str
=
newStr
;
buf
->
size
=
newSize
;
if
(
buf
->
str
==
NULL
)
{
...
...
@@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) {
static
int32_t
httpPushStack
(
HttpParser
*
parser
,
HTTP_PARSER_STATE
state
)
{
HttpStack
*
stack
=
&
parser
->
stacks
;
int8_t
*
newStacks
=
NULL
;
if
(
stack
->
size
==
0
)
{
stack
->
pos
=
0
;
stack
->
size
=
32
;
stack
->
stacks
=
malloc
(
stack
->
size
*
sizeof
(
int8_t
));
}
else
if
(
stack
->
pos
+
1
>
stack
->
size
)
{
stack
->
size
*=
2
;
stack
->
stacks
=
realloc
(
stack
->
stacks
,
stack
->
size
*
sizeof
(
int8_t
));
newStacks
=
realloc
(
stack
->
stacks
,
stack
->
size
*
sizeof
(
int8_t
));
if
(
newStacks
==
NULL
&&
stack
->
stacks
)
{
free
(
stack
->
stacks
);
}
stack
->
stacks
=
newStacks
;
}
else
{
}
...
...
src/plugins/http/src/httpUtil.c
浏览文件 @
a7fab934
...
...
@@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS
bool
httpReMallocMultiCmdsSize
(
HttpContext
*
pContext
,
int32_t
cmdSize
)
{
HttpSqlCmds
*
multiCmds
=
pContext
->
multiCmds
;
if
(
cmdSize
>
HTTP_MAX_CMD_SIZE
)
{
if
(
cmdSize
<=
0
||
cmdSize
>
HTTP_MAX_CMD_SIZE
)
{
httpError
(
"context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d"
,
pContext
,
pContext
->
fd
,
pContext
->
user
,
cmdSize
,
HTTP_MAX_CMD_SIZE
);
return
false
;
}
multiCmds
->
cmds
=
(
HttpSqlCmd
*
)
realloc
(
multiCmds
->
cmds
,
(
size_t
)
cmdSize
*
sizeof
(
HttpSqlCmd
));
HttpSqlCmd
*
new_cmds
=
(
HttpSqlCmd
*
)
realloc
(
multiCmds
->
cmds
,
(
size_t
)
cmdSize
*
sizeof
(
HttpSqlCmd
));
if
(
new_cmds
==
NULL
&&
multiCmds
->
cmds
)
{
free
(
multiCmds
->
cmds
);
}
multiCmds
->
cmds
=
new_cmds
;
if
(
multiCmds
->
cmds
==
NULL
)
{
httpError
(
"context:%p, fd:%d, user:%s, malloc cmds:%d error"
,
pContext
,
pContext
->
fd
,
pContext
->
user
,
cmdSize
);
return
false
;
...
...
@@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
bool
httpReMallocMultiCmdsBuffer
(
HttpContext
*
pContext
,
int32_t
bufferSize
)
{
HttpSqlCmds
*
multiCmds
=
pContext
->
multiCmds
;
if
(
bufferSize
>
HTTP_MAX_BUFFER_SIZE
)
{
if
(
bufferSize
<=
0
||
bufferSize
>
HTTP_MAX_BUFFER_SIZE
)
{
httpError
(
"context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d"
,
pContext
,
pContext
->
fd
,
pContext
->
user
,
bufferSize
,
HTTP_MAX_BUFFER_SIZE
);
return
false
;
}
multiCmds
->
buffer
=
(
char
*
)
realloc
(
multiCmds
->
buffer
,
(
size_t
)
bufferSize
);
char
*
new_buffer
=
(
char
*
)
realloc
(
multiCmds
->
buffer
,
(
size_t
)
bufferSize
);
if
(
new_buffer
==
NULL
&&
multiCmds
->
buffer
)
{
free
(
multiCmds
->
buffer
);
}
multiCmds
->
buffer
=
new_buffer
;
if
(
multiCmds
->
buffer
==
NULL
)
{
httpError
(
"context:%p, fd:%d, user:%s, malloc buffer:%d error"
,
pContext
,
pContext
->
fd
,
pContext
->
user
,
bufferSize
);
return
false
;
...
...
src/query/src/qAggMain.c
浏览文件 @
a7fab934
...
...
@@ -4047,9 +4047,9 @@ void block_func_merge(SQLFunctionCtx* pCtx) {
STableBlockDist
info
=
{
0
};
int32_t
len
=
*
(
int32_t
*
)
pCtx
->
pInput
;
blockDistInfoFromBinary
(((
char
*
)
pCtx
->
pInput
)
+
sizeof
(
int32_t
),
len
,
&
info
);
SResultRowCellInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
mergeTableBlockDist
(
pResInfo
,
&
info
);
taosArrayDestroy
(
info
.
dataBlockInfos
);
pResInfo
->
numOfRes
=
1
;
pResInfo
->
hasResult
=
DATA_SET_FLAG
;
...
...
src/query/tests/rangeMergeTest.cpp
浏览文件 @
a7fab934
...
...
@@ -330,7 +330,7 @@ void intDataTest() {
filterAddRange
(
h
,
ra
+
i
,
TSDB_RELATION_AND
);
}
filterGetRangeNum
(
h
,
&
num
);
ASSERT_EQ
(
num
,
0
);
ASSERT_EQ
(
num
,
1
);
filterFreeRangeCtx
(
h
);
...
...
src/rpc/src/rpcMain.c
浏览文件 @
a7fab934
...
...
@@ -1133,8 +1133,8 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
}
else
{
// for asynchronous API
SRpcEpSet
*
pEpSet
=
NULL
;
if
(
pContext
->
epSet
.
inUse
!=
pContext
->
oldInUse
||
pContext
->
redirect
)
pEpSet
=
&
pContext
->
epSet
;
//
if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect)
pEpSet
=
&
pContext
->
epSet
;
(
*
pRpc
->
cfp
)(
pMsg
,
pEpSet
);
}
...
...
src/util/src/tcompare.c
浏览文件 @
a7fab934
...
...
@@ -327,42 +327,43 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
int32_t
compareStrPatternComp
(
const
void
*
pLeft
,
const
void
*
pRight
)
{
SPatternCompareInfo
pInfo
=
{
'%'
,
'_'
};
char
pattern
[
128
]
=
{
0
};
assert
(
varDataLen
(
pRight
)
<=
TSDB_MAX_FIELD_LEN
);
char
*
pattern
=
calloc
(
varDataLen
(
pRight
)
+
1
,
sizeof
(
char
));
memcpy
(
pattern
,
varDataVal
(
pRight
),
varDataLen
(
pRight
));
assert
(
varDataLen
(
pRight
)
<
128
);
size_t
sz
=
varDataLen
(
pLeft
);
char
*
buf
=
malloc
(
sz
+
1
);
memcpy
(
buf
,
varDataVal
(
pLeft
),
sz
);
char
*
buf
=
malloc
(
sz
+
1
);
memcpy
(
buf
,
varDataVal
(
pLeft
),
sz
);
buf
[
sz
]
=
0
;
int32_t
ret
=
patternMatch
(
pattern
,
buf
,
sz
,
&
pInfo
);
free
(
buf
);
free
(
pattern
);
return
(
ret
==
TSDB_PATTERN_MATCH
)
?
0
:
1
;
}
int32_t
taosArrayCompareString
(
const
void
*
a
,
const
void
*
b
)
{
const
char
*
x
=
*
(
const
char
**
)
a
;
const
char
*
y
=
*
(
const
char
**
)
b
;
return
compareLenPrefixedStr
(
x
,
y
);
}
int32_t
compareFindItemInSet
(
const
void
*
pLeft
,
const
void
*
pRight
)
{
return
NULL
!=
taosHashGet
((
SHashObj
*
)
pRight
,
varDataVal
(
pLeft
),
varDataLen
(
pLeft
))
?
1
:
0
;
return
NULL
!=
taosHashGet
((
SHashObj
*
)
pRight
,
varDataVal
(
pLeft
),
varDataLen
(
pLeft
))
?
1
:
0
;
}
int32_t
compareWStrPatternComp
(
const
void
*
pLeft
,
const
void
*
pRight
)
{
SPatternCompareInfo
pInfo
=
{
'%'
,
'_'
};
wchar_t
pattern
[
128
]
=
{
0
}
;
assert
(
TSDB_PATTERN_STRING_MAX_LEN
<
128
);
assert
(
varDataLen
(
pRight
)
<=
TSDB_MAX_FIELD_LEN
*
TSDB_NCHAR_SIZE
)
;
wchar_t
*
pattern
=
calloc
(
varDataLen
(
pRight
)
+
1
,
sizeof
(
wchar_t
)
);
memcpy
(
pattern
,
varDataVal
(
pRight
),
varDataLen
(
pRight
));
assert
(
varDataLen
(
pRight
)
<
128
);
int32_t
ret
=
WCSPatternMatch
(
pattern
,
varDataVal
(
pLeft
),
varDataLen
(
pLeft
)
/
TSDB_NCHAR_SIZE
,
&
pInfo
);
free
(
pattern
);
return
(
ret
==
TSDB_PATTERN_MATCH
)
?
0
:
1
;
}
...
...
tests/perftest-scripts/perftest-query.sh
浏览文件 @
a7fab934
...
...
@@ -101,7 +101,14 @@ function runQueryPerfTest {
python3 insert/insertFromCSVPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
400
-D
400
-B
200
-t
10000
-r
1000 |
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
1900
-D
1900
-B
200
-t
10000
-r
1000 |
tee
-a
$PERFORMANCE_TEST_REPORT
}
...
...
tests/pytest/crash_gen/valgrind_taos.supp
浏览文件 @
a7fab934
...
...
@@ -17742,4 +17742,370 @@
fun:taosGetFqdn
fun:taosCheckGlobalCfg
fun:taos_init_imp
}
\ No newline at end of file
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/bin/python3.8
fun:PyObject_GetItem
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:PyCode_NewWithPosOnlyArgs
fun:PyCode_New
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/local/lib/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun: malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun: malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
obj:/usr/bin/python3.8
fun:PyObject_CallFunctionObjArgs
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_GetAttr
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8)
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8)
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8)
obj:/usr/bin/python3.8)
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_Pack
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so
fun:PyModule_ExecDef
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
...
obj:/usr/local/lib/python3.8/dist-packages/pandas/*
...
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyObject_MakeTpCall
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:PyObject_GetAttr
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
obj:/usr/bin/python3.8
fun:PyTuple_New
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:_PyFunction_Vectorcall
}
\ No newline at end of file
tests/pytest/fulltest.sh
浏览文件 @
a7fab934
...
...
@@ -284,7 +284,7 @@ python3 ./test.py -f alter/alterTabAddTagWithNULL.py
python3 ./test.py
-f
alter/alterTimestampColDataProcess.py
# client
python3 ./test.py
-f
client/client.py
#
python3 ./test.py -f client/client.py
python3 ./test.py
-f
client/version.py
python3 ./test.py
-f
client/alterDatabase.py
python3 ./test.py
-f
client/noConnectionErrorTest.py
...
...
@@ -343,6 +343,7 @@ python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py
-f
functions/function_twa_test2.py
python3 ./test.py
-f
functions/function_stddev_td2555.py
python3 ./test.py
-f
functions/showOfflineThresholdIs864000.py
python3 ./test.py
-f
functions/function_interp.py
python3 ./test.py
-f
insert/metadataUpdate.py
python3 ./test.py
-f
query/last_cache.py
python3 ./test.py
-f
query/last_row_cache.py
...
...
tests/pytest/functions/function_interp.py
0 → 100644
浏览文件 @
a7fab934
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
"create table t(ts timestamp, k int)"
)
tdSql
.
execute
(
"insert into t values('2021-1-1 1:1:1', 12);"
)
tdSql
.
query
(
"select interp(*) from t where ts='2021-1-1 1:1:1'"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
1
,
12
)
tdSql
.
error
(
"select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/query/queryWildcardLength.py
浏览文件 @
a7fab934
...
...
@@ -157,19 +157,6 @@ class TDTestCase:
tdSql
.
execute
(
f
'create table
{
table_name
}
_sub1 using
{
table_name
}
tags ("
{
table_name
}
", "
{
table_name
}
")'
)
tdSql
.
execute
(
f
'insert into
{
table_name
}
_sub1 values (now, "
{
table_name
}
", "
{
table_name
}
");'
)
# TODO sc1 leave a bug ---> TD-5918
# sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"',
# f'select * from {table_name} where bi1 like "{lp_name}"',
# f'select * from {table_name} where bi1 like "{ul_name}"',
# f'select * from {table_name} where nc1 like "{hp_name}"',
# f'select * from {table_name} where nc1 like "{lp_name}"',
# f'select * from {table_name} where nc1 like "{ul_name}"',
# f'select * from {table_name} where si1 like "{hp_name}"',
# f'select * from {table_name} where si1 like "{lp_name}"',
# f'select * from {table_name} where si1 like "{ul_name}"',
# f'select * from {table_name} where sc1 like "{hp_name}"',
# f'select * from {table_name} where sc1 like "{lp_name}"',
# f'select * from {table_name} where sc1 like "{ul_name}"']
sql_list
=
[
f
'select * from
{
table_name
}
where bi1 like "
{
hp_name
}
"'
,
f
'select * from
{
table_name
}
where bi1 like "
{
lp_name
}
"'
,
f
'select * from
{
table_name
}
where bi1 like "
{
ul_name
}
"'
,
...
...
@@ -178,7 +165,11 @@ class TDTestCase:
f
'select * from
{
table_name
}
where nc1 like "
{
ul_name
}
"'
,
f
'select * from
{
table_name
}
where si1 like "
{
hp_name
}
"'
,
f
'select * from
{
table_name
}
where si1 like "
{
lp_name
}
"'
,
f
'select * from
{
table_name
}
where si1 like "
{
ul_name
}
"'
]
f
'select * from
{
table_name
}
where si1 like "
{
ul_name
}
"'
,
f
'select * from
{
table_name
}
where sc1 like "
{
hp_name
}
"'
,
f
'select * from
{
table_name
}
where sc1 like "
{
lp_name
}
"'
,
f
'select * from
{
table_name
}
where sc1 like "
{
ul_name
}
"'
]
for
sql
in
sql_list
:
tdSql
.
query
(
sql
)
if
len
(
table_name
)
>=
1
:
...
...
@@ -211,7 +202,6 @@ class TDTestCase:
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/tools/taosdemoPerformance.py
浏览文件 @
a7fab934
...
...
@@ -19,11 +19,16 @@ import json
import
sys
class
taosdemoPerformace
:
def
__init__
(
self
,
commitID
,
dbName
,
branch
,
type
):
def
__init__
(
self
,
commitID
,
dbName
,
branch
,
type
,
numOfTables
,
numOfRows
,
numOfInt
,
numOfDouble
,
numOfBinary
):
self
.
commitID
=
commitID
self
.
dbName
=
dbName
self
.
branch
=
branch
self
.
type
=
type
self
.
numOfTables
=
numOfTables
self
.
numOfRows
=
numOfRows
self
.
numOfInt
=
numOfInt
self
.
numOfDouble
=
numOfDouble
self
.
numOfBinary
=
numOfBinary
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
...
...
@@ -51,14 +56,14 @@ class taosdemoPerformace:
stb
=
{
"name"
:
"meters"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10000
,
"childtable_count"
:
self
.
numOfTables
,
"childtable_prefix"
:
"stb_"
,
"auto_create_table"
:
"no"
,
"data_source"
:
"rand"
,
"batch_create_tbl_num"
:
10
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
100000
,
"interlace_rows"
:
10
0
,
"insert_rows"
:
self
.
numOfRows
,
"interlace_rows"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
...
...
@@ -68,7 +73,9 @@ class taosdemoPerformace:
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[
{
"type"
:
"INT"
,
"count"
:
4
}
{
"type"
:
"INT"
,
"count"
:
self
.
numOfInt
},
{
"type"
:
"DOUBLE"
,
"count"
:
self
.
numOfDouble
},
{
"type"
:
"BINARY"
,
"len"
:
128
,
"count"
:
self
.
numOfBinary
}
],
"tags"
:
[
{
"type"
:
"INT"
,
"count"
:
1
},
...
...
@@ -76,6 +83,7 @@ class taosdemoPerformace:
]
}
stables
=
[]
stables
.
append
(
stb
)
...
...
@@ -163,21 +171,21 @@ class taosdemoPerformace:
cursor
.
execute
(
"create database if not exists %s"
%
self
.
dbName
)
cursor
.
execute
(
"use %s"
%
self
.
dbName
)
cursor
.
execute
(
"create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20))"
)
print
(
"==================== taosdemo performance ===================="
)
cursor
.
execute
(
"create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)"
)
print
(
"create tables time: %f"
%
float
(
self
.
createTableTime
))
print
(
"insert records time: %f"
%
float
(
self
.
insertRecordsTime
))
print
(
"records per second: %f"
%
float
(
self
.
recordsPerSecond
))
print
(
"avg delay: %f"
%
float
(
self
.
avgDelay
))
print
(
"max delay: %f"
%
float
(
self
.
maxDelay
))
print
(
"min delay: %f"
%
float
(
self
.
minDelay
))
cursor
.
execute
(
"insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s')"
%
cursor
.
execute
(
"insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s'
, %d, %d, %d, %d, %d
)"
%
(
float
(
self
.
createTableTime
),
float
(
self
.
insertRecordsTime
),
float
(
self
.
recordsPerSecond
),
self
.
commitID
,
float
(
self
.
avgDelay
),
float
(
self
.
maxDelay
),
float
(
self
.
minDelay
),
self
.
branch
,
self
.
type
))
self
.
commitID
,
float
(
self
.
avgDelay
),
float
(
self
.
maxDelay
),
float
(
self
.
minDelay
),
self
.
branch
,
self
.
type
,
self
.
numOfTables
,
self
.
numOfRows
,
self
.
numOfInt
,
self
.
numOfDouble
,
self
.
numOfBinary
))
cursor
.
close
()
cursor1
=
self
.
conn
.
cursor
()
cursor1
.
execute
(
"drop database if exists %s"
%
self
.
insertDB
)
#
cursor1.execute("drop database if exists %s" % self.insertDB)
cursor1
.
close
()
if
__name__
==
'__main__'
:
...
...
@@ -209,8 +217,43 @@ if __name__ == '__main__':
default
=
'glibc'
,
type
=
str
,
help
=
'build type (default: glibc)'
)
parser
.
add_argument
(
'-i'
,
'--num-of-int'
,
action
=
'store'
,
default
=
4
,
type
=
int
,
help
=
'num of int columns (default: 4)'
)
parser
.
add_argument
(
'-D'
,
'--num-of-double'
,
action
=
'store'
,
default
=
0
,
type
=
int
,
help
=
'num of double columns (default: 4)'
)
parser
.
add_argument
(
'-B'
,
'--num-of-binary'
,
action
=
'store'
,
default
=
0
,
type
=
int
,
help
=
'num of binary columns (default: 4)'
)
parser
.
add_argument
(
'-t'
,
'--num-of-tables'
,
action
=
'store'
,
default
=
10000
,
type
=
int
,
help
=
'num of tables (default: 10000)'
)
parser
.
add_argument
(
'-r'
,
'--num-of-rows'
,
action
=
'store'
,
default
=
100000
,
type
=
int
,
help
=
'num of rows (default: 100000)'
)
args
=
parser
.
parse_args
()
perftest
=
taosdemoPerformace
(
args
.
commit_id
,
args
.
database_name
,
args
.
git_branch
,
args
.
build_type
)
perftest
=
taosdemoPerformace
(
args
.
commit_id
,
args
.
database_name
,
args
.
git_branch
,
args
.
build_type
,
args
.
num_of_tables
,
args
.
num_of_rows
,
args
.
num_of_int
,
args
.
num_of_double
,
args
.
num_of_binary
)
perftest
.
insertData
()
perftest
.
createTablesAndStoreData
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录