Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
1e3ca4c6
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1e3ca4c6
编写于
6月 03, 2021
作者:
H
haojun Liao
提交者:
GitHub
6月 03, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into feature/query
上级
7082d8a5
85109726
变更
78
展开全部
显示空白变更内容
内联
并排
Showing
78 changed file
with
4764 addition
and
1344 deletion
+4764
-1344
.appveyor.yml
.appveyor.yml
+49
-49
cmake/env.inc
cmake/env.inc
+8
-6
cmake/input.inc
cmake/input.inc
+6
-0
documentation20/cn/08.connector/01.java/docs.md
documentation20/cn/08.connector/01.java/docs.md
+78
-0
documentation20/cn/08.connector/docs.md
documentation20/cn/08.connector/docs.md
+49
-6
packaging/deb/makedeb.sh
packaging/deb/makedeb.sh
+42
-8
packaging/rpm/makerpm.sh
packaging/rpm/makerpm.sh
+3
-3
packaging/rpm/tdengine.spec
packaging/rpm/tdengine.spec
+63
-18
packaging/tools/install.sh
packaging/tools/install.sh
+178
-131
packaging/tools/make_install.sh
packaging/tools/make_install.sh
+1
-1
packaging/tools/makepkg.sh
packaging/tools/makepkg.sh
+42
-9
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+1
-0
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+4
-0
src/client/src/tscPrepare.c
src/client/src/tscPrepare.c
+9
-2
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+98
-7
src/client/src/tscServer.c
src/client/src/tscServer.c
+2
-2
src/client/src/tscStream.c
src/client/src/tscStream.c
+94
-9
src/client/src/tscSubquery.c
src/client/src/tscSubquery.c
+2
-0
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+3
-1
src/common/inc/tdataformat.h
src/common/inc/tdataformat.h
+1
-1
src/common/inc/tglobal.h
src/common/inc/tglobal.h
+2
-1
src/common/src/tdataformat.c
src/common/src/tdataformat.c
+10
-5
src/common/src/tglobal.c
src/common/src/tglobal.c
+4
-3
src/connector/go
src/connector/go
+1
-1
src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
...est/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
+123
-4
src/connector/python/taos/__init__.py
src/connector/python/taos/__init__.py
+4
-0
src/cq/src/cqMain.c
src/cq/src/cqMain.c
+5
-2
src/dnode/src/dnodeMain.c
src/dnode/src/dnodeMain.c
+11
-0
src/inc/taosdef.h
src/inc/taosdef.h
+2
-0
src/inc/taoserror.h
src/inc/taoserror.h
+1
-1
src/inc/taosmsg.h
src/inc/taosmsg.h
+1
-0
src/inc/tfs.h
src/inc/tfs.h
+2
-0
src/inc/tsdb.h
src/inc/tsdb.h
+3
-0
src/inc/ttokendef.h
src/inc/ttokendef.h
+56
-48
src/mnode/src/mnodeDnode.c
src/mnode/src/mnodeDnode.c
+1
-1
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+13
-13
src/mnode/src/mnodeTable.c
src/mnode/src/mnodeTable.c
+54
-36
src/query/inc/sql.y
src/query/inc/sql.y
+34
-0
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+1
-1
src/query/src/qSqlParser.c
src/query/src/qSqlParser.c
+1
-1
src/query/src/sql.c
src/query/src/sql.c
+1062
-670
src/sync/src/syncMain.c
src/sync/src/syncMain.c
+6
-1
src/tsdb/CMakeLists.txt
src/tsdb/CMakeLists.txt
+4
-0
src/tsdb/inc/tsdbCommit.h
src/tsdb/inc/tsdbCommit.h
+7
-0
src/tsdb/inc/tsdbCommitQueue.h
src/tsdb/inc/tsdbCommitQueue.h
+3
-1
src/tsdb/inc/tsdbCompact.h
src/tsdb/inc/tsdbCompact.h
+28
-0
src/tsdb/inc/tsdbint.h
src/tsdb/inc/tsdbint.h
+2
-0
src/tsdb/src/tsdbCommit.c
src/tsdb/src/tsdbCommit.c
+172
-156
src/tsdb/src/tsdbCommitQueue.c
src/tsdb/src/tsdbCommitQueue.c
+18
-7
src/tsdb/src/tsdbCompact.c
src/tsdb/src/tsdbCompact.c
+9
-1
src/tsdb/src/tsdbMemTable.c
src/tsdb/src/tsdbMemTable.c
+1
-1
src/tsdb/src/tsdbMeta.c
src/tsdb/src/tsdbMeta.c
+2
-5
src/tsdb/src/tsdbRead.c
src/tsdb/src/tsdbRead.c
+21
-22
src/tsdb/src/tsdbReadImpl.c
src/tsdb/src/tsdbReadImpl.c
+2
-2
src/util/inc/ttoken.h
src/util/inc/ttoken.h
+1
-0
src/util/src/terror.c
src/util/src/terror.c
+1
-0
src/util/src/ttokenizer.c
src/util/src/ttokenizer.c
+14
-1
src/vnode/src/vnodeMgmt.c
src/vnode/src/vnodeMgmt.c
+4
-4
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+11
-0
tests/pytest/alter/alter_cacheLastRow.py
tests/pytest/alter/alter_cacheLastRow.py
+109
-0
tests/pytest/crash_gen/crash_gen_main.py
tests/pytest/crash_gen/crash_gen_main.py
+102
-37
tests/pytest/crash_gen/service_manager.py
tests/pytest/crash_gen/service_manager.py
+7
-3
tests/pytest/crash_gen/shared/misc.py
tests/pytest/crash_gen/shared/misc.py
+2
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+5
-2
tests/pytest/functions/function_session.py
tests/pytest/functions/function_session.py
+86
-0
tests/pytest/functions/function_stateWindow.py
tests/pytest/functions/function_stateWindow.py
+109
-0
tests/pytest/insert/nchar.py
tests/pytest/insert/nchar.py
+4
-0
tests/pytest/manualTest/manual_alter_block.py
tests/pytest/manualTest/manual_alter_block.py
+82
-0
tests/pytest/manualTest/manual_alter_comp.py
tests/pytest/manualTest/manual_alter_comp.py
+126
-0
tests/pytest/query/queryInsertValue.py
tests/pytest/query/queryInsertValue.py
+1
-1
tests/pytest/table/tablename-boundary.py
tests/pytest/table/tablename-boundary.py
+55
-5
tests/pytest/tag_lite/drop_auto_create.py
tests/pytest/tag_lite/drop_auto_create.py
+47
-0
tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json
tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json
+60
-0
tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json
tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json
+60
-0
tests/pytest/tools/taosdemoAllTest/manual_block2.json
tests/pytest/tools/taosdemoAllTest/manual_block2.json
+60
-0
tests/pytest/util/dnodes.py
tests/pytest/util/dnodes.py
+3
-3
tests/script/api/stmtBatchTest.c
tests/script/api/stmtBatchTest.c
+1298
-51
tests/script/general/parser/alter_column.sim
tests/script/general/parser/alter_column.sim
+118
-0
未找到文件。
.appveyor.yml
浏览文件 @
1e3ca4c6
...
@@ -27,7 +27,7 @@ for:
...
@@ -27,7 +27,7 @@ for:
build_script
:
build_script
:
-
cd build
-
cd build
-
cmake -G "NMake Makefiles" ..
-
cmake -G "NMake Makefiles" ..
-DBUILD_JDBC=false
-
nmake install
-
nmake install
-
-
matrix
:
matrix
:
...
...
cmake/env.inc
浏览文件 @
1e3ca4c6
...
@@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
...
@@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
MESSAGE
(
STATUS
"Project executable files output path: "
$
{
EXECUTABLE_OUTPUT_PATH
})
MESSAGE
(
STATUS
"Project executable files output path: "
$
{
EXECUTABLE_OUTPUT_PATH
})
MESSAGE
(
STATUS
"Project library files output path: "
$
{
LIBRARY_OUTPUT_PATH
})
MESSAGE
(
STATUS
"Project library files output path: "
$
{
LIBRARY_OUTPUT_PATH
})
FIND_PROGRAM
(
TD_MVN_INSTALLED
mvn
)
IF
(
TD_BUILD_JDBC
)
IF
(
TD_MVN_INSTALLED
)
FIND_PROGRAM
(
TD_MVN_INSTALLED
mvn
)
IF
(
TD_MVN_INSTALLED
)
MESSAGE
(
STATUS
"MVN is installed and JDBC will be compiled"
)
MESSAGE
(
STATUS
"MVN is installed and JDBC will be compiled"
)
ELSE
()
ELSE
()
MESSAGE
(
STATUS
"MVN is not installed and JDBC is not compiled"
)
MESSAGE
(
STATUS
"MVN is not installed and JDBC is not compiled"
)
ENDIF
()
ENDIF
()
ENDIF
()
#
#
...
...
cmake/input.inc
浏览文件 @
1e3ca4c6
...
@@ -77,3 +77,9 @@ IF (${JEMALLOC_ENABLED} MATCHES "true")
...
@@ -77,3 +77,9 @@ IF (${JEMALLOC_ENABLED} MATCHES "true")
SET
(
TD_JEMALLOC_ENABLED
TRUE
)
SET
(
TD_JEMALLOC_ENABLED
TRUE
)
MESSAGE
(
STATUS
"build with jemalloc enabled"
)
MESSAGE
(
STATUS
"build with jemalloc enabled"
)
ENDIF
()
ENDIF
()
SET
(
TD_BUILD_JDBC
TRUE
)
IF
(
$
{
BUILD_JDBC
}
MATCHES
"false"
)
SET
(
TD_BUILD_JDBC
FALSE
)
ENDIF
()
documentation20/cn/08.connector/01.java/docs.md
浏览文件 @
1e3ca4c6
...
@@ -266,7 +266,9 @@ while(resultSet.next()){
...
@@ -266,7 +266,9 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
### 处理异常
在报错后,通过SQLException可以获取到错误的信息和错误码:
在报错后,通过SQLException可以获取到错误的信息和错误码:
```
java
```
java
try
(
Statement
statement
=
connection
.
createStatement
())
{
try
(
Statement
statement
=
connection
.
createStatement
())
{
// executeQuery
// executeQuery
...
@@ -279,11 +281,87 @@ try (Statement statement = connection.createStatement()) {
...
@@ -279,11 +281,87 @@ try (Statement statement = connection.createStatement()) {
e
.
printStackTrace
();
e
.
printStackTrace
();
}
}
```
```
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
具体的错误码请参考:
具体的错误码请参考:
*
https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
*
https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
*
https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
*
https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
从 2.1.2.0 版本开始,TDengine 的
**JDBC-JNI**
实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:
**JDBC-RESTful**
实现并不提供参数绑定这种使用方式。)
```
java
Statement
stmt
=
conn
.
createStatement
();
Random
r
=
new
Random
();
// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
TSDBPreparedStatement
s
=
(
TSDBPreparedStatement
)
conn
.
prepareStatement
(
"insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"
);
// 设定数据表名:
s
.
setTableName
(
"w1"
);
// 设定 TAGS 取值:
s
.
setTagInt
(
0
,
r
.
nextInt
(
10
));
s
.
setTagString
(
1
,
"Beijing"
);
int
numOfRows
=
10
;
// VALUES 部分以逐列的方式进行设置:
ArrayList
<
Long
>
ts
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++){
ts
.
add
(
System
.
currentTimeMillis
()
+
i
);
}
s
.
setTimestamp
(
0
,
ts
);
ArrayList
<
Integer
>
s1
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++){
s1
.
add
(
r
.
nextInt
(
100
));
}
s
.
setInt
(
1
,
s1
);
ArrayList
<
String
>
s2
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++){
s2
.
add
(
"test"
+
r
.
nextInt
(
100
));
}
s
.
setString
(
2
,
s2
,
10
);
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入:
s
.
columnDataAddBatch
();
// 执行语句:
s
.
columnDataExecuteBatch
();
// 执行完毕,释放资源:
s
.
columnDataCloseBatch
();
```
用于设定 TAGS 取值的方法总共有:
```
java
public
void
setTagNull
(
int
index
,
int
type
)
public
void
setTagBoolean
(
int
index
,
boolean
value
)
public
void
setTagInt
(
int
index
,
int
value
)
public
void
setTagByte
(
int
index
,
byte
value
)
public
void
setTagShort
(
int
index
,
short
value
)
public
void
setTagLong
(
int
index
,
long
value
)
public
void
setTagTimestamp
(
int
index
,
long
value
)
public
void
setTagFloat
(
int
index
,
float
value
)
public
void
setTagDouble
(
int
index
,
double
value
)
public
void
setTagString
(
int
index
,
String
value
)
public
void
setTagNString
(
int
index
,
String
value
)
```
用于设定 VALUES 数据列的取值的方法总共有:
```
java
public
void
setInt
(
int
columnIndex
,
ArrayList
<
Integer
>
list
)
throws
SQLException
public
void
setFloat
(
int
columnIndex
,
ArrayList
<
Float
>
list
)
throws
SQLException
public
void
setTimestamp
(
int
columnIndex
,
ArrayList
<
Long
>
list
)
throws
SQLException
public
void
setLong
(
int
columnIndex
,
ArrayList
<
Long
>
list
)
throws
SQLException
public
void
setDouble
(
int
columnIndex
,
ArrayList
<
Double
>
list
)
throws
SQLException
public
void
setBoolean
(
int
columnIndex
,
ArrayList
<
Boolean
>
list
)
throws
SQLException
public
void
setByte
(
int
columnIndex
,
ArrayList
<
Byte
>
list
)
throws
SQLException
public
void
setShort
(
int
columnIndex
,
ArrayList
<
Short
>
list
)
throws
SQLException
public
void
setString
(
int
columnIndex
,
ArrayList
<
String
>
list
,
int
size
)
throws
SQLException
public
void
setNString
(
int
columnIndex
,
ArrayList
<
String
>
list
,
int
size
)
throws
SQLException
```
### <a class="anchor" id="subscribe"></a>订阅
### <a class="anchor" id="subscribe"></a>订阅
#### 创建
#### 创建
...
...
documentation20/cn/08.connector/docs.md
浏览文件 @
1e3ca4c6
...
@@ -291,9 +291,25 @@ typedef struct taosField {
...
@@ -291,9 +291,25 @@ typedef struct taosField {
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,
**客户端应用必须确保对同一张表的操作完全串行化**
,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,
**客户端应用必须确保对同一张表的操作完全串行化**
,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
### 参数绑定API
<a
class=
"anchor"
id=
"stmt"
></a>
### 参数绑定 API
除了直接调用
`taos_query`
进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号
`?`
来代表待绑定的参数,具体如下:
除了直接调用
`taos_query`
进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号
`?`
来代表待绑定的参数。
从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下:
1.
调用
`taos_stmt_init`
创建参数绑定对象;
2.
调用
`taos_stmt_prepare`
解析 INSERT 语句;
3.
如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用
`taos_stmt_set_tbname`
来设置表名;
4.
如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用
`taos_stmt_set_tbname_tags`
来设置表名和 TAGS 的值;
5.
调用
`taos_stmt_bind_param_batch`
以多列的方式设置 VALUES 的值;
6.
调用
`taos_stmt_add_batch`
把当前绑定的参数加入批处理;
7.
可以重复第 3~6 步,为批处理加入更多的数据行;
8.
调用
`taos_stmt_execute`
执行已经准备好的批处理指令;
9.
执行完毕,调用
`taos_stmt_close`
释放所有资源。
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:
[
参数绑定接口的 Java 用法
](
https://www.taosdata.com/cn/documentation/connector/java#stmt-java
)
。
接口相关的具体函数如下(也可以参考
[
apitest.c
](
https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c
)
文件中使用对应函数的方式):
-
`TAOS_STMT* taos_stmt_init(TAOS *taos)`
-
`TAOS_STMT* taos_stmt_init(TAOS *taos)`
...
@@ -301,11 +317,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
...
@@ -301,11 +317,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
-
`int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
-
`int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
解析一条
sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此参数作为sql语句的长度,如等于0,将自动判断sql
语句的长度。
解析一条
SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL
语句的长度。
-
`int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
-
`int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下:
不如
`taos_stmt_bind_param_batch`
效率高,但可以支持非 INSERT 类型的 SQL 语句。
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 一致,具体定义如下:
```
c
```
c
typedef
struct
TAOS_BIND
{
typedef
struct
TAOS_BIND
{
...
@@ -319,9 +336,35 @@ typedef struct TAOS_BIND {
...
@@ -319,9 +336,35 @@ typedef struct TAOS_BIND {
}
TAOS_BIND
;
}
TAOS_BIND
;
```
```
-
`int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
(2.1.1.0 版本新增)
当 SQL 语句中的表名使用了
`?`
占位时,可以使用此函数绑定一个具体的表名。
-
`int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
(2.1.2.0 版本新增)
当 SQL 语句中的表名和 TAGS 都使用了
`?`
占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
-
`int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
(2.1.1.0 版本新增)
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
```
c
typedef
struct
TAOS_MULTI_BIND
{
int
buffer_type
;
void
*
buffer
;
uintptr_t
buffer_length
;
int32_t
*
length
;
char
*
is_null
;
int
num
;
// 列的个数,即 buffer 中的参数个数
}
TAOS_MULTI_BIND
;
```
-
`int taos_stmt_add_batch(TAOS_STMT *stmt)`
-
`int taos_stmt_add_batch(TAOS_STMT *stmt)`
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用
`taos_stmt_bind_param`
绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL
语句,将返回错误。
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用
`taos_stmt_bind_param`
或
`taos_stmt_bind_param_batch`
绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL
语句,将返回错误。
-
`int taos_stmt_execute(TAOS_STMT *stmt)`
-
`int taos_stmt_execute(TAOS_STMT *stmt)`
...
@@ -329,7 +372,7 @@ typedef struct TAOS_BIND {
...
@@ -329,7 +372,7 @@ typedef struct TAOS_BIND {
-
`TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
-
`TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用
`taos_free_result`
以释放资源。
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用
`taos_free_result`
以释放资源。
-
`int taos_stmt_close(TAOS_STMT *stmt)`
-
`int taos_stmt_close(TAOS_STMT *stmt)`
...
...
packaging/deb/makedeb.sh
浏览文件 @
1e3ca4c6
...
@@ -67,7 +67,41 @@ fi
...
@@ -67,7 +67,41 @@ fi
cp
-r
${
top_dir
}
/src/connector/python
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/python
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/go
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/go
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/nodejs
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/nodejs
${
pkg_dir
}${
install_home_path
}
/connector
cp
${
compile_dir
}
/build/lib/taos-jdbcdriver
*
dist.
*
${
pkg_dir
}${
install_home_path
}
/connector
||
:
cp
${
compile_dir
}
/build/lib/taos-jdbcdriver
*
.
*
${
pkg_dir
}${
install_home_path
}
/connector
||
:
if
[
-f
${
compile_dir
}
/build/bin/jemalloc-config
]
;
then
install_user_local_path
=
"/usr/local"
mkdir
-p
${
pkg_dir
}${
install_user_local_path
}
/
{
bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3
}
cp
${
compile_dir
}
/build/bin/jemalloc-config
${
pkg_dir
}${
install_user_local_path
}
/bin/
if
[
-f
${
compile_dir
}
/build/bin/jemalloc.sh
]
;
then
cp
${
compile_dir
}
/build/bin/jemalloc.sh
${
pkg_dir
}${
install_user_local_path
}
/bin/
fi
if
[
-f
${
compile_dir
}
/build/bin/jeprof
]
;
then
cp
${
compile_dir
}
/build/bin/jeprof
${
pkg_dir
}${
install_user_local_path
}
/bin/
fi
if
[
-f
${
compile_dir
}
/build/include/jemalloc/jemalloc.h
]
;
then
cp
${
compile_dir
}
/build/include/jemalloc/jemalloc.h
${
pkg_dir
}${
install_user_local_path
}
/include/jemalloc/
fi
if
[
-f
${
compile_dir
}
/build/lib/libjemalloc.so.2
]
;
then
cp
${
compile_dir
}
/build/lib/libjemalloc.so.2
${
pkg_dir
}${
install_user_local_path
}
/lib/
ln
-sf
libjemalloc.so.2
${
pkg_dir
}${
install_user_local_path
}
/lib/libjemalloc.so
fi
if
[
-f
${
compile_dir
}
/build/lib/libjemalloc.a
]
;
then
cp
${
compile_dir
}
/build/lib/libjemalloc.a
${
pkg_dir
}${
install_user_local_path
}
/lib/
fi
if
[
-f
${
compile_dir
}
/build/lib/libjemalloc_pic.a
]
;
then
cp
${
compile_dir
}
/build/lib/libjemalloc_pic.a
${
pkg_dir
}${
install_user_local_path
}
/lib/
fi
if
[
-f
${
compile_dir
}
/build/lib/pkgconfig/jemalloc.pc
]
;
then
cp
${
compile_dir
}
/build/lib/pkgconfig/jemalloc.pc
${
pkg_dir
}${
install_user_local_path
}
/lib/pkgconfig/
fi
if
[
-f
${
compile_dir
}
/build/share/doc/jemalloc/jemalloc.html
]
;
then
cp
${
compile_dir
}
/build/share/doc/jemalloc/jemalloc.html
${
pkg_dir
}${
install_user_local_path
}
/share/doc/jemalloc/
fi
if
[
-f
${
compile_dir
}
/build/share/man/man3/jemalloc.3
]
;
then
cp
${
compile_dir
}
/build/share/man/man3/jemalloc.3
${
pkg_dir
}${
install_user_local_path
}
/share/man/man3/
fi
fi
cp
-r
${
compile_dir
}
/../packaging/deb/DEBIAN
${
pkg_dir
}
/
cp
-r
${
compile_dir
}
/../packaging/deb/DEBIAN
${
pkg_dir
}
/
chmod
755
${
pkg_dir
}
/DEBIAN/
*
chmod
755
${
pkg_dir
}
/DEBIAN/
*
...
...
packaging/rpm/makerpm.sh
浏览文件 @
1e3ca4c6
packaging/rpm/tdengine.spec
浏览文件 @
1e3ca4c6
%define homepath /usr/local/taos
%define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define cfg_install_dir /etc/taos
%define __strip /bin/true
%define __strip /bin/true
...
@@ -75,9 +76,53 @@ fi
...
@@ -75,9 +76,53 @@ fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*
dist.*
%{buildroot}%{homepath}/connector ||:
cp %{_compiledir}/build/lib/taos-jdbcdriver*
.*
%{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation
#Scripts executed before installation
%pre
%pre
csudo=""
csudo=""
...
...
packaging/tools/install.sh
浏览文件 @
1e3ca4c6
...
@@ -227,6 +227,52 @@ function install_lib() {
...
@@ -227,6 +227,52 @@ function install_lib() {
${
csudo
}
ldconfig
${
csudo
}
ldconfig
}
}
function
install_jemalloc
()
{
jemalloc_dir
=
${
script_dir
}
/jemalloc
if
[
-d
${
jemalloc_dir
}
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/bin
if
[
-f
${
jemalloc_dir
}
/bin/jemalloc-config
]
;
then
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/bin/jemalloc-config /usr/local/bin
fi
if
[
-f
${
jemalloc_dir
}
/bin/jemalloc.sh
]
;
then
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/bin/jemalloc.sh /usr/local/bin
fi
if
[
-f
${
jemalloc_dir
}
/bin/jeprof
]
;
then
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/bin/jeprof /usr/local/bin
fi
if
[
-f
${
jemalloc_dir
}
/include/jemalloc/jemalloc.h
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/include/jemalloc
${
csudo
}
/usr/bin/install
-c
-m
644
${
jemalloc_dir
}
/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if
[
-f
${
jemalloc_dir
}
/lib/libjemalloc.so.2
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/lib
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/lib/libjemalloc.so.2 /usr/local/lib
${
csudo
}
ln
-sf
libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/lib
if
[
-f
${
jemalloc_dir
}
/lib/libjemalloc.a
]
;
then
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/lib/libjemalloc.a /usr/local/lib
fi
if
[
-f
${
jemalloc_dir
}
/lib/libjemalloc_pic.a
]
;
then
${
csudo
}
/usr/bin/install
-c
-m
755
${
jemalloc_dir
}
/lib/libjemalloc_pic.a /usr/local/lib
fi
if
[
-f
${
jemalloc_dir
}
/lib/libjemalloc_pic.a
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/lib/pkgconfig
${
csudo
}
/usr/bin/install
-c
-m
644
${
jemalloc_dir
}
/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if
[
-f
${
jemalloc_dir
}
/share/doc/jemalloc/jemalloc.html
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/share/doc/jemalloc
${
csudo
}
/usr/bin/install
-c
-m
644
${
jemalloc_dir
}
/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if
[
-f
${
jemalloc_dir
}
/share/man/man3/jemalloc.3
]
;
then
${
csudo
}
/usr/bin/install
-c
-d
/usr/local/share/man/man3
${
csudo
}
/usr/bin/install
-c
-m
644
${
jemalloc_dir
}
/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function
install_header
()
{
function
install_header
()
{
${
csudo
}
rm
-f
${
inc_link_dir
}
/taos.h
${
inc_link_dir
}
/taoserror.h
||
:
${
csudo
}
rm
-f
${
inc_link_dir
}
/taos.h
${
inc_link_dir
}
/taoserror.h
||
:
${
csudo
}
cp
-f
${
script_dir
}
/inc/
*
${
install_main_dir
}
/include
&&
${
csudo
}
chmod
644
${
install_main_dir
}
/include/
*
${
csudo
}
cp
-f
${
script_dir
}
/inc/
*
${
install_main_dir
}
/include
&&
${
csudo
}
chmod
644
${
install_main_dir
}
/include/
*
...
@@ -776,6 +822,7 @@ function update_TDengine() {
...
@@ -776,6 +822,7 @@ function update_TDengine() {
install_log
install_log
install_header
install_header
install_lib
install_lib
install_jemalloc
if
[
"
$pagMode
"
!=
"lite"
]
;
then
if
[
"
$pagMode
"
!=
"lite"
]
;
then
install_connector
install_connector
fi
fi
...
...
packaging/tools/make_install.sh
浏览文件 @
1e3ca4c6
...
@@ -204,7 +204,7 @@ function install_jemalloc() {
...
@@ -204,7 +204,7 @@ function install_jemalloc() {
if
[
-f
${
binary_dir
}
/build/lib/libjemalloc_pic.a
]
;
then
if
[
-f
${
binary_dir
}
/build/lib/libjemalloc_pic.a
]
;
then
/usr/bin/install
-c
-m
755
${
binary_dir
}
/build/lib/libjemalloc_pic.a /usr/local/lib
/usr/bin/install
-c
-m
755
${
binary_dir
}
/build/lib/libjemalloc_pic.a /usr/local/lib
fi
fi
if
[
-f
${
binary_dir
}
/build/lib/
libjemalloc_pic.a
]
;
then
if
[
-f
${
binary_dir
}
/build/lib/
pkgconfig/jemalloc.pc
]
;
then
/usr/bin/install
-c
-d
/usr/local/lib/pkgconfig
/usr/bin/install
-c
-d
/usr/local/lib/pkgconfig
/usr/bin/install
-c
-m
644
${
binary_dir
}
/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
/usr/bin/install
-c
-m
644
${
binary_dir
}
/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
...
...
packaging/tools/makepkg.sh
浏览文件 @
1e3ca4c6
...
@@ -30,7 +30,7 @@ else
...
@@ -30,7 +30,7 @@ else
install_dir
=
"
${
release_dir
}
/TDengine-server-
${
version
}
"
install_dir
=
"
${
release_dir
}
/TDengine-server-
${
version
}
"
fi
fi
# Directories and files
.
# Directories and files
if
[
"
$pagMode
"
==
"lite"
]
;
then
if
[
"
$pagMode
"
==
"lite"
]
;
then
strip
${
build_dir
}
/bin/taosd
strip
${
build_dir
}
/bin/taosd
strip
${
build_dir
}
/bin/taos
strip
${
build_dir
}
/bin/taos
...
@@ -73,6 +73,39 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
...
@@ -73,6 +73,39 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir
-p
${
install_dir
}
/init.d
&&
cp
${
init_file_tarbitrator_deb
}
${
install_dir
}
/init.d/tarbitratord.deb
||
:
mkdir
-p
${
install_dir
}
/init.d
&&
cp
${
init_file_tarbitrator_deb
}
${
install_dir
}
/init.d/tarbitratord.deb
||
:
mkdir
-p
${
install_dir
}
/init.d
&&
cp
${
init_file_tarbitrator_rpm
}
${
install_dir
}
/init.d/tarbitratord.rpm
||
:
mkdir
-p
${
install_dir
}
/init.d
&&
cp
${
init_file_tarbitrator_rpm
}
${
install_dir
}
/init.d/tarbitratord.rpm
||
:
if
[
-f
${
build_dir
}
/bin/jemalloc-config
]
;
then
mkdir
-p
${
install_dir
}
/jemalloc/
{
bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3
}
cp
${
build_dir
}
/bin/jemalloc-config
${
install_dir
}
/jemalloc/bin
if
[
-f
${
build_dir
}
/bin/jemalloc.sh
]
;
then
cp
${
build_dir
}
/bin/jemalloc.sh
${
install_dir
}
/jemalloc/bin
fi
if
[
-f
${
build_dir
}
/bin/jeprof
]
;
then
cp
${
build_dir
}
/bin/jeprof
${
install_dir
}
/jemalloc/bin
fi
if
[
-f
${
build_dir
}
/include/jemalloc/jemalloc.h
]
;
then
cp
${
build_dir
}
/include/jemalloc/jemalloc.h
${
install_dir
}
/jemalloc/include/jemalloc
fi
if
[
-f
${
build_dir
}
/lib/libjemalloc.so.2
]
;
then
cp
${
build_dir
}
/lib/libjemalloc.so.2
${
install_dir
}
/jemalloc/lib
ln
-sf
libjemalloc.so.2
${
install_dir
}
/jemalloc/lib/libjemalloc.so
fi
if
[
-f
${
build_dir
}
/lib/libjemalloc.a
]
;
then
cp
${
build_dir
}
/lib/libjemalloc.a
${
install_dir
}
/jemalloc/lib
fi
if
[
-f
${
build_dir
}
/lib/libjemalloc_pic.a
]
;
then
cp
${
build_dir
}
/lib/libjemalloc_pic.a
${
install_dir
}
/jemalloc/lib
fi
if
[
-f
${
build_dir
}
/lib/pkgconfig/jemalloc.pc
]
;
then
cp
${
build_dir
}
/lib/pkgconfig/jemalloc.pc
${
install_dir
}
/jemalloc/lib/pkgconfig
fi
if
[
-f
${
build_dir
}
/share/doc/jemalloc/jemalloc.html
]
;
then
cp
${
build_dir
}
/share/doc/jemalloc/jemalloc.html
${
install_dir
}
/jemalloc/share/doc/jemalloc
fi
if
[
-f
${
build_dir
}
/share/man/man3/jemalloc.3
]
;
then
cp
${
build_dir
}
/share/man/man3/jemalloc.3
${
install_dir
}
/jemalloc/share/man/man3
fi
fi
if
[
"
$verMode
"
==
"cluster"
]
;
then
if
[
"
$verMode
"
==
"cluster"
]
;
then
sed
's/verMode=edge/verMode=cluster/g'
${
install_dir
}
/bin/remove.sh
>>
remove_temp.sh
sed
's/verMode=edge/verMode=cluster/g'
${
install_dir
}
/bin/remove.sh
>>
remove_temp.sh
mv
remove_temp.sh
${
install_dir
}
/bin/remove.sh
mv
remove_temp.sh
${
install_dir
}
/bin/remove.sh
...
...
src/client/inc/tsclient.h
浏览文件 @
1e3ca4c6
...
@@ -283,6 +283,7 @@ typedef struct SSqlStream {
...
@@ -283,6 +283,7 @@ typedef struct SSqlStream {
int64_t
ctime
;
// stream created time
int64_t
ctime
;
// stream created time
int64_t
stime
;
// stream next executed time
int64_t
stime
;
// stream next executed time
int64_t
etime
;
// stream end query time, when time is larger then etime, the stream will be closed
int64_t
etime
;
// stream end query time, when time is larger then etime, the stream will be closed
int64_t
ltime
;
// stream last row time in stream table
SInterval
interval
;
SInterval
interval
;
void
*
pTimer
;
void
*
pTimer
;
...
...
src/client/src/tscParseInsert.c
浏览文件 @
1e3ca4c6
...
@@ -774,6 +774,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
...
@@ -774,6 +774,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
index
=
0
;
index
=
0
;
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
sToken
=
tStrGetToken
(
sql
,
&
index
,
false
);
if
(
sToken
.
type
==
TK_ILLEGAL
)
{
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"unrecognized token"
,
sToken
.
z
);
}
if
(
sToken
.
type
==
TK_RP
)
{
if
(
sToken
.
type
==
TK_RP
)
{
break
;
break
;
}
}
...
...
src/client/src/tscPrepare.c
浏览文件 @
1e3ca4c6
...
@@ -48,6 +48,7 @@ typedef struct SMultiTbStmt {
...
@@ -48,6 +48,7 @@ typedef struct SMultiTbStmt {
bool
nameSet
;
bool
nameSet
;
bool
tagSet
;
bool
tagSet
;
uint64_t
currentUid
;
uint64_t
currentUid
;
char
*
sqlstr
;
uint32_t
tbNum
;
uint32_t
tbNum
;
SStrToken
tbname
;
SStrToken
tbname
;
SStrToken
stbname
;
SStrToken
stbname
;
...
@@ -1290,6 +1291,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
...
@@ -1290,6 +1291,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
}
}
pStmt
->
mtb
.
values
=
sToken
;
pStmt
->
mtb
.
values
=
sToken
;
}
}
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -1369,7 +1371,12 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
...
@@ -1369,7 +1371,12 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
break
;
break
;
}
}
free
(
pSql
->
sqlstr
);
if
(
pStmt
->
mtb
.
sqlstr
==
NULL
)
{
pStmt
->
mtb
.
sqlstr
=
pSql
->
sqlstr
;
}
else
{
tfree
(
pSql
->
sqlstr
);
}
pSql
->
sqlstr
=
str
;
pSql
->
sqlstr
=
str
;
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -1555,7 +1562,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
...
@@ -1555,7 +1562,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
}
}
pStmt
->
mtb
.
nameSet
=
true
;
pStmt
->
mtb
.
nameSet
=
true
;
pStmt
->
mtb
.
tagSet
=
true
;
tscDebug
(
"0x%"
PRIx64
" SQL: %s"
,
pSql
->
self
,
pSql
->
sqlstr
);
tscDebug
(
"0x%"
PRIx64
" SQL: %s"
,
pSql
->
self
,
pSql
->
sqlstr
);
...
@@ -1628,6 +1634,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
...
@@ -1628,6 +1634,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
taosHashCleanup
(
pStmt
->
pSql
->
cmd
.
insertParam
.
pTableBlockHashList
);
taosHashCleanup
(
pStmt
->
pSql
->
cmd
.
insertParam
.
pTableBlockHashList
);
pStmt
->
pSql
->
cmd
.
insertParam
.
pTableBlockHashList
=
NULL
;
pStmt
->
pSql
->
cmd
.
insertParam
.
pTableBlockHashList
=
NULL
;
taosArrayDestroy
(
pStmt
->
mtb
.
tags
);
taosArrayDestroy
(
pStmt
->
mtb
.
tags
);
tfree
(
pStmt
->
mtb
.
sqlstr
);
}
}
}
}
...
...
src/client/src/tscSQLParser.c
浏览文件 @
1e3ca4c6
...
@@ -396,11 +396,18 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
...
@@ -396,11 +396,18 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const
char
*
msg2
=
"name too long"
;
const
char
*
msg2
=
"name too long"
;
SCreateDbInfo
*
pCreateDB
=
&
(
pInfo
->
pMiscInfo
->
dbOpt
);
SCreateDbInfo
*
pCreateDB
=
&
(
pInfo
->
pMiscInfo
->
dbOpt
);
if
(
tscValidateName
(
&
pCreateDB
->
dbname
)
!=
TSDB_CODE_SUCCESS
)
{
if
(
pCreateDB
->
dbname
.
n
>=
TSDB_DB_NAME_LEN
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
}
char
buf
[
TSDB_DB_NAME_LEN
]
=
{
0
};
SStrToken
token
=
taosTokenDup
(
&
pCreateDB
->
dbname
,
buf
,
tListLen
(
buf
));
if
(
tscValidateName
(
&
token
)
!=
TSDB_CODE_SUCCESS
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg1
);
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg1
);
}
}
int32_t
ret
=
tNameSetDbName
(
&
pTableMetaInfo
->
name
,
getAccountId
(
pSql
),
&
(
pCreateDB
->
dbname
)
);
int32_t
ret
=
tNameSetDbName
(
&
pTableMetaInfo
->
name
,
getAccountId
(
pSql
),
&
token
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
}
}
...
@@ -5138,6 +5145,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
...
@@ -5138,6 +5145,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const
char
*
msg18
=
"primary timestamp column cannot be dropped"
;
const
char
*
msg18
=
"primary timestamp column cannot be dropped"
;
const
char
*
msg19
=
"invalid new tag name"
;
const
char
*
msg19
=
"invalid new tag name"
;
const
char
*
msg20
=
"table is not super table"
;
const
char
*
msg20
=
"table is not super table"
;
const
char
*
msg21
=
"only binary/nchar column length could be modified"
;
const
char
*
msg22
=
"new column length should be bigger than old one"
;
const
char
*
msg23
=
"only column length coulbe be modified"
;
const
char
*
msg24
=
"invalid binary/nchar column length"
;
int32_t
code
=
TSDB_CODE_SUCCESS
;
int32_t
code
=
TSDB_CODE_SUCCESS
;
...
@@ -5168,13 +5179,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
...
@@ -5168,13 +5179,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
}
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_ADD_TAG_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_DROP_TAG_COLUMN
||
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_ADD_TAG_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_DROP_TAG_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN
)
{
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN
)
{
if
(
UTIL_TABLE_IS_NORMAL
_TABLE
(
pTableMetaInfo
))
{
if
(
!
UTIL_TABLE_IS_SUPER
_TABLE
(
pTableMetaInfo
))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
}
}
}
else
if
((
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_UPDATE_TAG_VAL
)
&&
(
UTIL_TABLE_IS_SUPER_TABLE
(
pTableMetaInfo
)))
{
}
else
if
((
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_UPDATE_TAG_VAL
)
&&
(
UTIL_TABLE_IS_SUPER_TABLE
(
pTableMetaInfo
)))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
}
else
if
((
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_ADD_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
)
&&
}
else
if
((
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_ADD_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
||
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
&&
UTIL_TABLE_IS_CHILD_TABLE
(
pTableMetaInfo
))
{
UTIL_TABLE_IS_CHILD_TABLE
(
pTableMetaInfo
))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
}
}
...
@@ -5390,6 +5401,85 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
...
@@ -5390,6 +5401,85 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tstrncpy
(
name1
,
pItem
->
pVar
.
pz
,
sizeof
(
name1
));
tstrncpy
(
name1
,
pItem
->
pVar
.
pz
,
sizeof
(
name1
));
TAOS_FIELD
f
=
tscCreateField
(
TSDB_DATA_TYPE_INT
,
name1
,
tDataTypes
[
TSDB_DATA_TYPE_INT
].
bytes
);
TAOS_FIELD
f
=
tscCreateField
(
TSDB_DATA_TYPE_INT
,
name1
,
tDataTypes
[
TSDB_DATA_TYPE_INT
].
bytes
);
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
}
else
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
{
if
(
taosArrayGetSize
(
pAlterSQL
->
pAddColumns
)
>=
2
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg16
);
}
TAOS_FIELD
*
pItem
=
taosArrayGet
(
pAlterSQL
->
pAddColumns
,
0
);
if
(
pItem
->
type
!=
TSDB_DATA_TYPE_BINARY
&&
pItem
->
type
!=
TSDB_DATA_TYPE_NCHAR
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg21
);
}
SColumnIndex
columnIndex
=
COLUMN_INDEX_INITIALIZER
;
SStrToken
name
=
{.
type
=
TK_STRING
,
.
z
=
pItem
->
name
,
.
n
=
(
uint32_t
)
strlen
(
pItem
->
name
)};
if
(
getColumnIndexByName
(
pCmd
,
&
name
,
pQueryInfo
,
&
columnIndex
)
!=
TSDB_CODE_SUCCESS
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg17
);
}
SSchema
*
pColSchema
=
tscGetTableColumnSchema
(
pTableMetaInfo
->
pTableMeta
,
columnIndex
.
columnIndex
);
if
(
pColSchema
->
type
!=
TSDB_DATA_TYPE_BINARY
&&
pColSchema
->
type
!=
TSDB_DATA_TYPE_NCHAR
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg21
);
}
if
(
pItem
->
type
!=
pColSchema
->
type
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg23
);
}
if
((
pItem
->
type
==
TSDB_DATA_TYPE_BINARY
&&
(
pItem
->
bytes
<=
0
||
pItem
->
bytes
>
TSDB_MAX_BINARY_LEN
))
||
(
pItem
->
type
==
TSDB_DATA_TYPE_NCHAR
&&
(
pItem
->
bytes
<=
0
||
pItem
->
bytes
>
TSDB_MAX_NCHAR_LEN
)))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg24
);
}
if
(
pItem
->
bytes
<=
pColSchema
->
bytes
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg22
);
}
TAOS_FIELD
f
=
tscCreateField
(
pColSchema
->
type
,
name
.
z
,
pItem
->
bytes
);
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
}
else
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN
)
{
if
(
taosArrayGetSize
(
pAlterSQL
->
pAddColumns
)
>=
2
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg16
);
}
TAOS_FIELD
*
pItem
=
taosArrayGet
(
pAlterSQL
->
pAddColumns
,
0
);
if
(
pItem
->
type
!=
TSDB_DATA_TYPE_BINARY
&&
pItem
->
type
!=
TSDB_DATA_TYPE_NCHAR
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg21
);
}
SColumnIndex
columnIndex
=
COLUMN_INDEX_INITIALIZER
;
SStrToken
name
=
{.
type
=
TK_STRING
,
.
z
=
pItem
->
name
,
.
n
=
(
uint32_t
)
strlen
(
pItem
->
name
)};
if
(
getColumnIndexByName
(
pCmd
,
&
name
,
pQueryInfo
,
&
columnIndex
)
!=
TSDB_CODE_SUCCESS
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg17
);
}
SSchema
*
pColSchema
=
tscGetTableColumnSchema
(
pTableMetaInfo
->
pTableMeta
,
columnIndex
.
columnIndex
);
if
(
columnIndex
.
columnIndex
<
tscGetNumOfColumns
(
pTableMetaInfo
->
pTableMeta
))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg10
);
}
if
(
pColSchema
->
type
!=
TSDB_DATA_TYPE_BINARY
&&
pColSchema
->
type
!=
TSDB_DATA_TYPE_NCHAR
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg21
);
}
if
(
pItem
->
type
!=
pColSchema
->
type
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg23
);
}
if
((
pItem
->
type
==
TSDB_DATA_TYPE_BINARY
&&
(
pItem
->
bytes
<=
0
||
pItem
->
bytes
>
TSDB_MAX_BINARY_LEN
))
||
(
pItem
->
type
==
TSDB_DATA_TYPE_NCHAR
&&
(
pItem
->
bytes
<=
0
||
pItem
->
bytes
>
TSDB_MAX_NCHAR_LEN
)))
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg24
);
}
if
(
pItem
->
bytes
<=
pColSchema
->
bytes
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg22
);
}
TAOS_FIELD
f
=
tscCreateField
(
pColSchema
->
type
,
name
.
z
,
pItem
->
bytes
);
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
}
}
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -7189,8 +7279,9 @@ static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList
...
@@ -7189,8 +7279,9 @@ static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList
}
}
SName
name
=
{
0
};
SName
name
=
{
0
};
if
(
tscSetTableFullName
(
&
name
,
t
,
pSql
)
!=
TSDB_CODE_SUCCESS
)
{
int32_t
code
=
tscSetTableFullName
(
&
name
,
t
,
pSql
);
return
invalidOperationMsg
(
msgBuf
,
msg1
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
}
taosArrayPush
(
tableNameList
,
&
name
);
taosArrayPush
(
tableNameList
,
&
name
);
...
...
src/client/src/tscServer.c
浏览文件 @
1e3ca4c6
...
@@ -2469,7 +2469,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
...
@@ -2469,7 +2469,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
pNew
->
fp
=
fp
;
pNew
->
fp
=
fp
;
pNew
->
param
=
(
void
*
)
pSql
->
self
;
pNew
->
param
=
(
void
*
)
pSql
->
self
;
tscDebug
(
"0x%"
PRIx64
" metaRid from
%"
PRId64
" to %"
PRId
64
,
pSql
->
self
,
pSql
->
metaRid
,
pNew
->
self
);
tscDebug
(
"0x%"
PRIx64
" metaRid from
0x%"
PRIx64
" to 0x%"
PRIx
64
,
pSql
->
self
,
pSql
->
metaRid
,
pNew
->
self
);
pSql
->
metaRid
=
pNew
->
self
;
pSql
->
metaRid
=
pNew
->
self
;
int32_t
code
=
tscBuildAndSendRequest
(
pNew
,
NULL
);
int32_t
code
=
tscBuildAndSendRequest
(
pNew
,
NULL
);
...
...
src/client/src/tscStream.c
浏览文件 @
1e3ca4c6
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
#include "tutil.h"
#include "tutil.h"
#include "tscProfile.h"
#include "tscProfile.h"
#include "tscSubquery.h"
static
void
tscProcessStreamQueryCallback
(
void
*
param
,
TAOS_RES
*
tres
,
int
numOfRows
);
static
void
tscProcessStreamQueryCallback
(
void
*
param
,
TAOS_RES
*
tres
,
int
numOfRows
);
static
void
tscProcessStreamRetrieveResult
(
void
*
param
,
TAOS_RES
*
res
,
int
numOfRows
);
static
void
tscProcessStreamRetrieveResult
(
void
*
param
,
TAOS_RES
*
res
,
int
numOfRows
);
...
@@ -47,8 +48,8 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
...
@@ -47,8 +48,8 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
static
int64_t
tscGetRetryDelayTime
(
SSqlStream
*
pStream
,
int64_t
slidingTime
,
int16_t
prec
)
{
static
int64_t
tscGetRetryDelayTime
(
SSqlStream
*
pStream
,
int64_t
slidingTime
,
int16_t
prec
)
{
float
retryRangeFactor
=
0
.
3
f
;
float
retryRangeFactor
=
0
.
3
f
;
int64_t
retryDelta
=
(
int64_t
)(
ts
StreamCompRetry
Delay
*
retryRangeFactor
);
int64_t
retryDelta
=
(
int64_t
)(
ts
RetryStreamComp
Delay
*
retryRangeFactor
);
retryDelta
=
((
rand
()
%
retryDelta
)
+
ts
StreamCompRetry
Delay
)
*
1000L
;
retryDelta
=
((
rand
()
%
retryDelta
)
+
ts
RetryStreamComp
Delay
)
*
1000L
;
if
(
pStream
->
interval
.
intervalUnit
!=
'n'
&&
pStream
->
interval
.
intervalUnit
!=
'y'
)
{
if
(
pStream
->
interval
.
intervalUnit
!=
'n'
&&
pStream
->
interval
.
intervalUnit
!=
'y'
)
{
// change to ms
// change to ms
...
@@ -575,6 +576,14 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
...
@@ -575,6 +576,14 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
pStream
->
stime
=
tscGetStreamStartTimestamp
(
pSql
,
pStream
,
pStream
->
stime
);
pStream
->
stime
=
tscGetStreamStartTimestamp
(
pSql
,
pStream
,
pStream
->
stime
);
// set stime with ltime if ltime > stime
const
char
*
dstTable
=
pStream
->
dstTable
?
pStream
->
dstTable
:
""
;
tscDebug
(
" CQ table=%s ltime is %"
PRId64
,
dstTable
,
pStream
->
ltime
);
if
(
pStream
->
ltime
!=
INT64_MIN
&&
pStream
->
ltime
>
pStream
->
stime
)
{
tscWarn
(
" CQ set stream %s stime=%"
PRId64
" replace with ltime=%"
PRId64
" if ltime>0 "
,
dstTable
,
pStream
->
stime
,
pStream
->
ltime
);
pStream
->
stime
=
pStream
->
ltime
;
}
int64_t
starttime
=
tscGetLaunchTimestamp
(
pStream
);
int64_t
starttime
=
tscGetLaunchTimestamp
(
pStream
);
pCmd
->
command
=
TSDB_SQL_SELECT
;
pCmd
->
command
=
TSDB_SQL_SELECT
;
...
@@ -590,7 +599,66 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {
...
@@ -590,7 +599,66 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {
pStream
->
dstTable
=
dstTable
;
pStream
->
dstTable
=
dstTable
;
}
}
TAOS_STREAM
*
taos_open_stream
(
TAOS
*
taos
,
const
char
*
sqlstr
,
void
(
*
fp
)(
void
*
param
,
TAOS_RES
*
,
TAOS_ROW
row
),
// fetchFp call back
void
fetchFpStreamLastRow
(
void
*
param
,
TAOS_RES
*
res
,
int
num
)
{
SSqlStream
*
pStream
=
(
SSqlStream
*
)
param
;
SSqlObj
*
pSql
=
res
;
// get row data set to ltime
tscSetSqlOwner
(
pSql
);
TAOS_ROW
row
=
doSetResultRowData
(
pSql
);
if
(
row
&&
row
[
0
]
)
{
pStream
->
ltime
=
*
((
int64_t
*
)
row
[
0
]);
const
char
*
dstTable
=
pStream
->
dstTable
?
pStream
->
dstTable
:
""
;
tscDebug
(
" CQ stream table=%s last row time=%"
PRId64
" ."
,
dstTable
,
pStream
->
ltime
);
}
tscClearSqlOwner
(
pSql
);
// no condition call
tscCreateStream
(
param
,
pStream
->
pSql
,
TSDB_CODE_SUCCESS
);
taos_free_result
(
res
);
}
// fp callback
void
fpStreamLastRow
(
void
*
param
,
TAOS_RES
*
res
,
int
code
)
{
// check result successful
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscCreateStream
(
param
,
res
,
TSDB_CODE_SUCCESS
);
taos_free_result
(
res
);
return
;
}
// asynchronous fetch last row data
taos_fetch_rows_a
(
res
,
fetchFpStreamLastRow
,
param
);
}
void
cbParseSql
(
void
*
param
,
TAOS_RES
*
res
,
int
code
)
{
// check result successful
SSqlStream
*
pStream
=
(
SSqlStream
*
)
param
;
SSqlObj
*
pSql
=
pStream
->
pSql
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
pSql
->
res
.
code
=
code
;
tscDebug
(
"0x%"
PRIx64
" open stream parse sql failed, sql:%s, reason:%s, code:%s"
,
pSql
->
self
,
pSql
->
sqlstr
,
pCmd
->
payload
,
tstrerror
(
code
));
pStream
->
fp
(
pStream
->
param
,
NULL
,
NULL
);
return
;
}
// check dstTable valid
if
(
pStream
->
dstTable
==
NULL
||
strlen
(
pStream
->
dstTable
)
==
0
)
{
tscDebug
(
" cbParseSql dstTable is empty."
);
tscCreateStream
(
param
,
res
,
code
);
return
;
}
// query stream last row time async
char
sql
[
128
]
=
""
;
sprintf
(
sql
,
"select last_row(*) from %s;"
,
pStream
->
dstTable
);
taos_query_a
(
pSql
->
pTscObj
,
sql
,
fpStreamLastRow
,
param
);
return
;
}
TAOS_STREAM
*
taos_open_stream_withname
(
TAOS
*
taos
,
const
char
*
dstTable
,
const
char
*
sqlstr
,
void
(
*
fp
)(
void
*
param
,
TAOS_RES
*
,
TAOS_ROW
row
),
int64_t
stime
,
void
*
param
,
void
(
*
callback
)(
void
*
))
{
int64_t
stime
,
void
*
param
,
void
(
*
callback
)(
void
*
))
{
STscObj
*
pObj
=
(
STscObj
*
)
taos
;
STscObj
*
pObj
=
(
STscObj
*
)
taos
;
if
(
pObj
==
NULL
||
pObj
->
signature
!=
pObj
)
return
NULL
;
if
(
pObj
==
NULL
||
pObj
->
signature
!=
pObj
)
return
NULL
;
...
@@ -613,11 +681,16 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
...
@@ -613,11 +681,16 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return
NULL
;
return
NULL
;
}
}
pStream
->
ltime
=
INT64_MIN
;
pStream
->
stime
=
stime
;
pStream
->
stime
=
stime
;
pStream
->
fp
=
fp
;
pStream
->
fp
=
fp
;
pStream
->
callback
=
callback
;
pStream
->
callback
=
callback
;
pStream
->
param
=
param
;
pStream
->
param
=
param
;
pStream
->
pSql
=
pSql
;
pStream
->
pSql
=
pSql
;
pSql
->
pStream
=
pStream
;
pSql
->
param
=
pStream
;
pSql
->
maxRetry
=
TSDB_MAX_REPLICA
;
tscSetStreamDestTable
(
pStream
,
dstTable
);
pSql
->
pStream
=
pStream
;
pSql
->
pStream
=
pStream
;
pSql
->
param
=
pStream
;
pSql
->
param
=
pStream
;
...
@@ -640,10 +713,17 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
...
@@ -640,10 +713,17 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
tscDebugL
(
"0x%"
PRIx64
" SQL: %s"
,
pSql
->
self
,
pSql
->
sqlstr
);
tscDebugL
(
"0x%"
PRIx64
" SQL: %s"
,
pSql
->
self
,
pSql
->
sqlstr
);
pSql
->
fp
=
cbParseSql
;
pSql
->
fetchFp
=
cbParseSql
;
registerSqlObj
(
pSql
);
int32_t
code
=
tsParseSql
(
pSql
,
true
);
int32_t
code
=
tsParseSql
(
pSql
,
true
);
if
(
code
==
TSDB_CODE_SUCCESS
)
{
if
(
code
==
TSDB_CODE_SUCCESS
)
{
tscCreateStream
(
pStream
,
pSql
,
code
);
cbParseSql
(
pStream
,
pSql
,
code
);
}
else
if
(
code
!=
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
}
else
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
tscDebug
(
" CQ taso_open_stream IN Process. sql=%s"
,
sqlstr
);
}
else
{
tscError
(
"0x%"
PRIx64
" open stream failed, sql:%s, code:%s"
,
pSql
->
self
,
sqlstr
,
tstrerror
(
code
));
tscError
(
"0x%"
PRIx64
" open stream failed, sql:%s, code:%s"
,
pSql
->
self
,
sqlstr
,
tstrerror
(
code
));
taosReleaseRef
(
tscObjRef
,
pSql
->
self
);
taosReleaseRef
(
tscObjRef
,
pSql
->
self
);
free
(
pStream
);
free
(
pStream
);
...
@@ -653,6 +733,11 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
...
@@ -653,6 +733,11 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return
pStream
;
return
pStream
;
}
}
TAOS_STREAM
*
taos_open_stream
(
TAOS
*
taos
,
const
char
*
sqlstr
,
void
(
*
fp
)(
void
*
param
,
TAOS_RES
*
,
TAOS_ROW
row
),
int64_t
stime
,
void
*
param
,
void
(
*
callback
)(
void
*
))
{
return
taos_open_stream_withname
(
taos
,
""
,
sqlstr
,
fp
,
stime
,
param
,
callback
);
}
void
taos_close_stream
(
TAOS_STREAM
*
handle
)
{
void
taos_close_stream
(
TAOS_STREAM
*
handle
)
{
SSqlStream
*
pStream
=
(
SSqlStream
*
)
handle
;
SSqlStream
*
pStream
=
(
SSqlStream
*
)
handle
;
...
...
src/client/src/tscSubquery.c
浏览文件 @
1e3ca4c6
...
@@ -1469,6 +1469,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
...
@@ -1469,6 +1469,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes
*
pRes1
=
&
pParentSql
->
pSubs
[
i
]
->
res
;
SSqlRes
*
pRes1
=
&
pParentSql
->
pSubs
[
i
]
->
res
;
pParentSql
->
res
.
precision
=
pRes1
->
precision
;
if
(
pRes1
->
row
>
0
&&
pRes1
->
numOfRows
>
0
)
{
if
(
pRes1
->
row
>
0
&&
pRes1
->
numOfRows
>
0
)
{
tscDebug
(
"0x%"
PRIx64
" sub:0x%"
PRIx64
" index:%d numOfRows:%d total:%"
PRId64
" (not retrieve)"
,
pParentSql
->
self
,
tscDebug
(
"0x%"
PRIx64
" sub:0x%"
PRIx64
" index:%d numOfRows:%d total:%"
PRId64
" (not retrieve)"
,
pParentSql
->
self
,
pParentSql
->
pSubs
[
i
]
->
self
,
i
,
pRes1
->
numOfRows
,
pRes1
->
numOfTotal
);
pParentSql
->
pSubs
[
i
]
->
self
,
i
,
pRes1
->
numOfRows
,
pRes1
->
numOfTotal
);
...
...
src/client/src/tscUtil.c
浏览文件 @
1e3ca4c6
...
@@ -1119,6 +1119,8 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
...
@@ -1119,6 +1119,8 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
SOperatorInfo
*
pSourceOperator
=
createDummyInputOperator
(
pSqlObjList
[
0
],
pSchema
,
numOfCol1
,
pFilterInfo
,
numOfFilterCols
);
SOperatorInfo
*
pSourceOperator
=
createDummyInputOperator
(
pSqlObjList
[
0
],
pSchema
,
numOfCol1
,
pFilterInfo
,
numOfFilterCols
);
pOutput
->
precision
=
pSqlObjList
[
0
]
->
res
.
precision
;
SSchema
*
schema
=
NULL
;
SSchema
*
schema
=
NULL
;
if
(
px
->
numOfTables
>
1
)
{
if
(
px
->
numOfTables
>
1
)
{
SOperatorInfo
**
p
=
calloc
(
px
->
numOfTables
,
POINTER_BYTES
);
SOperatorInfo
**
p
=
calloc
(
px
->
numOfTables
,
POINTER_BYTES
);
...
...
src/common/inc/tdataformat.h
浏览文件 @
1e3ca4c6
...
@@ -319,7 +319,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
...
@@ -319,7 +319,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols
*
tdDupDataCols
(
SDataCols
*
pCols
,
bool
keepData
);
SDataCols
*
tdDupDataCols
(
SDataCols
*
pCols
,
bool
keepData
);
SDataCols
*
tdFreeDataCols
(
SDataCols
*
pCols
);
SDataCols
*
tdFreeDataCols
(
SDataCols
*
pCols
);
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
STSchema
*
pSchema
,
SDataCols
*
pCols
);
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
STSchema
*
pSchema
,
SDataCols
*
pCols
);
int
tdMergeDataCols
(
SDataCols
*
target
,
SDataCols
*
s
rc
,
int
rowsToMerge
);
int
tdMergeDataCols
(
SDataCols
*
target
,
SDataCols
*
s
ource
,
int
rowsToMerge
,
int
*
pOffset
);
// ----------------- K-V data row structure
// ----------------- K-V data row structure
/*
/*
...
...
src/common/inc/tglobal.h
浏览文件 @
1e3ca4c6
...
@@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
...
@@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
extern
char
tsEmail
[];
extern
char
tsEmail
[];
extern
char
tsArbitrator
[];
extern
char
tsArbitrator
[];
extern
int8_t
tsArbOnline
;
extern
int8_t
tsArbOnline
;
extern
int64_t
tsArbOnlineTimestamp
;
extern
int32_t
tsDnodeId
;
extern
int32_t
tsDnodeId
;
// common
// common
...
@@ -75,7 +76,7 @@ extern int32_t tsMinSlidingTime;
...
@@ -75,7 +76,7 @@ extern int32_t tsMinSlidingTime;
extern
int32_t
tsMinIntervalTime
;
extern
int32_t
tsMinIntervalTime
;
extern
int32_t
tsMaxStreamComputDelay
;
extern
int32_t
tsMaxStreamComputDelay
;
extern
int32_t
tsStreamCompStartDelay
;
extern
int32_t
tsStreamCompStartDelay
;
extern
int32_t
ts
StreamCompRetry
Delay
;
extern
int32_t
ts
RetryStreamComp
Delay
;
extern
float
tsStreamComputDelayRatio
;
// the delayed computing ration of the whole time window
extern
float
tsStreamComputDelayRatio
;
// the delayed computing ration of the whole time window
extern
int32_t
tsProjectExecInterval
;
extern
int32_t
tsProjectExecInterval
;
extern
int64_t
tsMaxRetentWindow
;
extern
int64_t
tsMaxRetentWindow
;
...
...
src/common/src/tdataformat.c
浏览文件 @
1e3ca4c6
...
@@ -441,30 +441,35 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
...
@@ -441,30 +441,35 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols)
pCols
->
numOfRows
++
;
pCols
->
numOfRows
++
;
}
}
int
tdMergeDataCols
(
SDataCols
*
target
,
SDataCols
*
source
,
int
rowsToMerge
)
{
int
tdMergeDataCols
(
SDataCols
*
target
,
SDataCols
*
source
,
int
rowsToMerge
,
int
*
pOffset
)
{
ASSERT
(
rowsToMerge
>
0
&&
rowsToMerge
<=
source
->
numOfRows
);
ASSERT
(
rowsToMerge
>
0
&&
rowsToMerge
<=
source
->
numOfRows
);
ASSERT
(
target
->
numOfCols
==
source
->
numOfCols
);
ASSERT
(
target
->
numOfCols
==
source
->
numOfCols
);
int
offset
=
0
;
if
(
pOffset
==
NULL
)
{
pOffset
=
&
offset
;
}
SDataCols
*
pTarget
=
NULL
;
SDataCols
*
pTarget
=
NULL
;
if
(
dataColsKeyLast
(
target
)
<
dataColsKeyFirst
(
source
))
{
// No overlap
if
(
(
target
->
numOfRows
==
0
)
||
(
dataColsKeyLast
(
target
)
<
dataColsKeyFirst
(
source
)
))
{
// No overlap
ASSERT
(
target
->
numOfRows
+
rowsToMerge
<=
target
->
maxPoints
);
ASSERT
(
target
->
numOfRows
+
rowsToMerge
<=
target
->
maxPoints
);
for
(
int
i
=
0
;
i
<
rowsToMerge
;
i
++
)
{
for
(
int
i
=
0
;
i
<
rowsToMerge
;
i
++
)
{
for
(
int
j
=
0
;
j
<
source
->
numOfCols
;
j
++
)
{
for
(
int
j
=
0
;
j
<
source
->
numOfCols
;
j
++
)
{
if
(
source
->
cols
[
j
].
len
>
0
)
{
if
(
source
->
cols
[
j
].
len
>
0
)
{
dataColAppendVal
(
target
->
cols
+
j
,
tdGetColDataOfRow
(
source
->
cols
+
j
,
i
),
target
->
numOfRows
,
dataColAppendVal
(
target
->
cols
+
j
,
tdGetColDataOfRow
(
source
->
cols
+
j
,
i
+
(
*
pOffset
)
),
target
->
numOfRows
,
target
->
maxPoints
);
target
->
maxPoints
);
}
}
}
}
target
->
numOfRows
++
;
target
->
numOfRows
++
;
}
}
(
*
pOffset
)
+=
rowsToMerge
;
}
else
{
}
else
{
pTarget
=
tdDupDataCols
(
target
,
true
);
pTarget
=
tdDupDataCols
(
target
,
true
);
if
(
pTarget
==
NULL
)
goto
_err
;
if
(
pTarget
==
NULL
)
goto
_err
;
int
iter1
=
0
;
int
iter1
=
0
;
int
iter2
=
0
;
tdMergeTwoDataCols
(
target
,
pTarget
,
&
iter1
,
pTarget
->
numOfRows
,
source
,
pOffset
,
source
->
numOfRows
,
tdMergeTwoDataCols
(
target
,
pTarget
,
&
iter1
,
pTarget
->
numOfRows
,
source
,
&
iter2
,
source
->
numOfRows
,
pTarget
->
numOfRows
+
rowsToMerge
);
pTarget
->
numOfRows
+
rowsToMerge
);
}
}
...
...
src/common/src/tglobal.c
浏览文件 @
1e3ca4c6
...
@@ -42,11 +42,12 @@ int32_t tsNumOfMnodes = 3;
...
@@ -42,11 +42,12 @@ int32_t tsNumOfMnodes = 3;
int8_t
tsEnableVnodeBak
=
1
;
int8_t
tsEnableVnodeBak
=
1
;
int8_t
tsEnableTelemetryReporting
=
1
;
int8_t
tsEnableTelemetryReporting
=
1
;
int8_t
tsArbOnline
=
0
;
int8_t
tsArbOnline
=
0
;
int64_t
tsArbOnlineTimestamp
=
TSDB_ARB_DUMMY_TIME
;
char
tsEmail
[
TSDB_FQDN_LEN
]
=
{
0
};
char
tsEmail
[
TSDB_FQDN_LEN
]
=
{
0
};
int32_t
tsDnodeId
=
0
;
int32_t
tsDnodeId
=
0
;
// common
// common
int32_t
tsRpcTimer
=
10
00
;
int32_t
tsRpcTimer
=
3
00
;
int32_t
tsRpcMaxTime
=
600
;
// seconds;
int32_t
tsRpcMaxTime
=
600
;
// seconds;
int32_t
tsRpcForceTcp
=
0
;
//disable this, means query, show command use udp protocol as default
int32_t
tsRpcForceTcp
=
0
;
//disable this, means query, show command use udp protocol as default
int32_t
tsMaxShellConns
=
50000
;
int32_t
tsMaxShellConns
=
50000
;
...
@@ -93,7 +94,7 @@ int32_t tsMaxStreamComputDelay = 20000;
...
@@ -93,7 +94,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t
tsStreamCompStartDelay
=
10000
;
int32_t
tsStreamCompStartDelay
=
10000
;
// the stream computing delay time after executing failed, change accordingly
// the stream computing delay time after executing failed, change accordingly
int32_t
ts
StreamCompRetryDelay
=
1
0
;
int32_t
ts
RetryStreamCompDelay
=
10
*
100
0
;
// The delayed computing ration. 10% of the whole computing time window by default.
// The delayed computing ration. 10% of the whole computing time window by default.
float
tsStreamComputDelayRatio
=
0
.
1
f
;
float
tsStreamComputDelayRatio
=
0
.
1
f
;
...
@@ -710,7 +711,7 @@ static void doInitGlobalConfig(void) {
...
@@ -710,7 +711,7 @@ static void doInitGlobalConfig(void) {
taosInitConfigOption
(
cfg
);
taosInitConfigOption
(
cfg
);
cfg
.
option
=
"retryStreamCompDelay"
;
cfg
.
option
=
"retryStreamCompDelay"
;
cfg
.
ptr
=
&
ts
StreamCompRetry
Delay
;
cfg
.
ptr
=
&
ts
RetryStreamComp
Delay
;
cfg
.
valType
=
TAOS_CFG_VTYPE_INT32
;
cfg
.
valType
=
TAOS_CFG_VTYPE_INT32
;
cfg
.
cfgType
=
TSDB_CFG_CTYPE_B_CONFIG
|
TSDB_CFG_CTYPE_B_SHOW
;
cfg
.
cfgType
=
TSDB_CFG_CTYPE_B_CONFIG
|
TSDB_CFG_CTYPE_B_SHOW
;
cfg
.
minValue
=
10
;
cfg
.
minValue
=
10
;
...
...
go
@
7a26c432
比较
8ce6d865
...
7a26c432
Subproject commit
8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
Subproject commit
7a26c432f8b4203e42344ff3290b9b9b01b983d5
src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
浏览文件 @
1e3ca4c6
...
@@ -243,15 +243,93 @@ public class TSDBPreparedStatementTest {
...
@@ -243,15 +243,93 @@ public class TSDBPreparedStatementTest {
s
.
setNString
(
1
,
s2
,
4
);
s
.
setNString
(
1
,
s2
,
4
);
random
=
10
+
r
.
nextInt
(
5
);
random
=
10
+
r
.
nextInt
(
5
);
ArrayList
<
String
>
s
5
=
new
ArrayList
<
String
>();
ArrayList
<
String
>
s
3
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
if
(
i
%
random
==
0
)
{
if
(
i
%
random
==
0
)
{
s
5
.
add
(
null
);
s
3
.
add
(
null
);
}
else
{
}
else
{
s5
.
add
(
"test"
+
i
%
10
);
s3
.
add
(
"test"
+
i
%
10
);
}
}
s
.
setString
(
2
,
s3
,
10
);
s
.
columnDataAddBatch
();
s
.
columnDataExecuteBatch
();
s
.
columnDataCloseBatch
();
String
sql
=
"select * from weather_test"
;
PreparedStatement
statement
=
conn
.
prepareStatement
(
sql
);
ResultSet
rs
=
statement
.
executeQuery
();
int
rows
=
0
;
while
(
rs
.
next
())
{
rows
++;
}
Assert
.
assertEquals
(
numOfRows
,
rows
);
}
}
@Test
public
void
bindDataWithSingleTagTest
()
throws
SQLException
{
Statement
stmt
=
conn
.
createStatement
();
String
types
[]
=
new
String
[]
{
"tinyint"
,
"smallint"
,
"int"
,
"bigint"
,
"bool"
,
"float"
,
"double"
,
"binary(10)"
,
"nchar(10)"
};
for
(
String
type
:
types
)
{
stmt
.
execute
(
"drop table if exists weather_test"
);
stmt
.
execute
(
"create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t "
+
type
+
")"
);
int
numOfRows
=
1
;
TSDBPreparedStatement
s
=
(
TSDBPreparedStatement
)
conn
.
prepareStatement
(
"insert into ? using weather_test tags(?) values(?, ?, ?)"
);
Random
r
=
new
Random
();
s
.
setTableName
(
"w1"
);
switch
(
type
)
{
case
"tinyint"
:
case
"smallint"
:
case
"int"
:
case
"bigint"
:
s
.
setTagInt
(
0
,
1
);
break
;
case
"float"
:
s
.
setTagFloat
(
0
,
1.23f
);
break
;
case
"double"
:
s
.
setTagDouble
(
0
,
3.14159265
);
break
;
case
"bool"
:
s
.
setTagBoolean
(
0
,
true
);
break
;
case
"binary(10)"
:
s
.
setTagString
(
0
,
"test"
);
break
;
case
"nchar(10)"
:
s
.
setTagNString
(
0
,
"test"
);
break
;
default
:
break
;
}
ArrayList
<
Long
>
ts
=
new
ArrayList
<
Long
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
ts
.
add
(
System
.
currentTimeMillis
()
+
i
);
}
s
.
setTimestamp
(
0
,
ts
);
int
random
=
10
+
r
.
nextInt
(
5
);
ArrayList
<
String
>
s2
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
s2
.
add
(
"分支"
+
i
%
4
);
}
}
s
.
setNString
(
1
,
s2
,
10
);
random
=
10
+
r
.
nextInt
(
5
);
ArrayList
<
String
>
s3
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
s3
.
add
(
"test"
+
i
%
4
);
}
}
s
.
setString
(
2
,
s
5
,
10
);
s
.
setString
(
2
,
s
3
,
10
);
s
.
columnDataAddBatch
();
s
.
columnDataAddBatch
();
s
.
columnDataExecuteBatch
();
s
.
columnDataExecuteBatch
();
...
@@ -269,6 +347,47 @@ public class TSDBPreparedStatementTest {
...
@@ -269,6 +347,47 @@ public class TSDBPreparedStatementTest {
}
}
@Test
public
void
bindDataWithMultipleTagsTest
()
throws
SQLException
{
Statement
stmt
=
conn
.
createStatement
();
stmt
.
execute
(
"drop table if exists weather_test"
);
stmt
.
execute
(
"create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))"
);
int
numOfRows
=
1
;
TSDBPreparedStatement
s
=
(
TSDBPreparedStatement
)
conn
.
prepareStatement
(
"insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"
);
s
.
setTableName
(
"w2"
);
s
.
setTagInt
(
0
,
1
);
s
.
setTagString
(
1
,
"test"
);
ArrayList
<
Long
>
ts
=
new
ArrayList
<
Long
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
ts
.
add
(
System
.
currentTimeMillis
()
+
i
);
}
s
.
setTimestamp
(
0
,
ts
);
ArrayList
<
String
>
s2
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
s2
.
add
(
"test"
+
i
%
4
);
}
s
.
setString
(
1
,
s2
,
10
);
s
.
columnDataAddBatch
();
s
.
columnDataExecuteBatch
();
s
.
columnDataCloseBatch
();
String
sql
=
"select * from weather_test"
;
PreparedStatement
statement
=
conn
.
prepareStatement
(
sql
);
ResultSet
rs
=
statement
.
executeQuery
();
int
rows
=
0
;
while
(
rs
.
next
())
{
rows
++;
}
Assert
.
assertEquals
(
numOfRows
,
rows
);
}
@Test
@Test
public
void
setBoolean
()
throws
SQLException
{
public
void
setBoolean
()
throws
SQLException
{
...
...
src/connector/python/taos/__init__.py
浏览文件 @
1e3ca4c6
...
@@ -2,6 +2,10 @@
...
@@ -2,6 +2,10 @@
from
.connection
import
TDengineConnection
from
.connection
import
TDengineConnection
from
.cursor
import
TDengineCursor
from
.cursor
import
TDengineCursor
# For some reason, the following is needed for VS Code (through PyLance) to
# recognize that "error" is a valid module of the "taos" package.
from
.error
import
ProgrammingError
# Globals
# Globals
threadsafety
=
0
threadsafety
=
0
paramstyle
=
'pyformat'
paramstyle
=
'pyformat'
...
...
src/cq/src/cqMain.c
浏览文件 @
1e3ca4c6
...
@@ -437,6 +437,10 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
...
@@ -437,6 +437,10 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
taosReleaseRef
(
cqObjRef
,
(
int64_t
)
param
);
taosReleaseRef
(
cqObjRef
,
(
int64_t
)
param
);
}
}
// inner implement in tscStream.c
TAOS_STREAM
*
taos_open_stream_withname
(
TAOS
*
taos
,
const
char
*
desName
,
const
char
*
sqlstr
,
void
(
*
fp
)(
void
*
param
,
TAOS_RES
*
,
TAOS_ROW
row
),
int64_t
stime
,
void
*
param
,
void
(
*
callback
)(
void
*
));
static
void
cqCreateStream
(
SCqContext
*
pContext
,
SCqObj
*
pObj
)
{
static
void
cqCreateStream
(
SCqContext
*
pContext
,
SCqObj
*
pObj
)
{
pObj
->
pContext
=
pContext
;
pObj
->
pContext
=
pContext
;
...
@@ -449,11 +453,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
...
@@ -449,11 +453,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj
->
tmrId
=
0
;
pObj
->
tmrId
=
0
;
if
(
pObj
->
pStream
==
NULL
)
{
if
(
pObj
->
pStream
==
NULL
)
{
pObj
->
pStream
=
taos_open_stream
(
pContext
->
dbConn
,
pObj
->
sqlStr
,
cqProcessStreamRes
,
INT64_MIN
,
(
void
*
)
pObj
->
rid
,
NULL
);
pObj
->
pStream
=
taos_open_stream
_withname
(
pContext
->
dbConn
,
pObj
->
dstTable
,
pObj
->
sqlStr
,
cqProcessStreamRes
,
INT64_MIN
,
(
void
*
)
pObj
->
rid
,
NULL
);
// TODO the pObj->pStream may be released if error happens
// TODO the pObj->pStream may be released if error happens
if
(
pObj
->
pStream
)
{
if
(
pObj
->
pStream
)
{
tscSetStreamDestTable
(
pObj
->
pStream
,
pObj
->
dstTable
);
pContext
->
num
++
;
pContext
->
num
++
;
cDebug
(
"vgId:%d, id:%d CQ:%s is opened"
,
pContext
->
vgId
,
pObj
->
tid
,
pObj
->
sqlStr
);
cDebug
(
"vgId:%d, id:%d CQ:%s is opened"
,
pContext
->
vgId
,
pObj
->
tid
,
pObj
->
sqlStr
);
}
else
{
}
else
{
...
...
src/dnode/src/dnodeMain.c
浏览文件 @
1e3ca4c6
...
@@ -88,13 +88,24 @@ static SStep tsDnodeSteps[] = {
...
@@ -88,13 +88,24 @@ static SStep tsDnodeSteps[] = {
static
SStep
tsDnodeCompactSteps
[]
=
{
static
SStep
tsDnodeCompactSteps
[]
=
{
{
"dnode-tfile"
,
tfInit
,
tfCleanup
},
{
"dnode-tfile"
,
tfInit
,
tfCleanup
},
{
"dnode-globalcfg"
,
taosCheckGlobalCfg
,
NULL
},
{
"dnode-storage"
,
dnodeInitStorage
,
dnodeCleanupStorage
},
{
"dnode-storage"
,
dnodeInitStorage
,
dnodeCleanupStorage
},
{
"dnode-cfg"
,
dnodeInitCfg
,
dnodeCleanupCfg
},
{
"dnode-eps"
,
dnodeInitEps
,
dnodeCleanupEps
},
{
"dnode-eps"
,
dnodeInitEps
,
dnodeCleanupEps
},
{
"dnode-minfos"
,
dnodeInitMInfos
,
dnodeCleanupMInfos
},
{
"dnode-wal"
,
walInit
,
walCleanUp
},
{
"dnode-wal"
,
walInit
,
walCleanUp
},
{
"dnode-sync"
,
syncInit
,
syncCleanUp
},
{
"dnode-vread"
,
dnodeInitVRead
,
dnodeCleanupVRead
},
{
"dnode-vwrite"
,
dnodeInitVWrite
,
dnodeCleanupVWrite
},
{
"dnode-vmgmt"
,
dnodeInitVMgmt
,
dnodeCleanupVMgmt
},
{
"dnode-mread"
,
dnodeInitMRead
,
NULL
},
{
"dnode-mread"
,
dnodeInitMRead
,
NULL
},
{
"dnode-mwrite"
,
dnodeInitMWrite
,
NULL
},
{
"dnode-mwrite"
,
dnodeInitMWrite
,
NULL
},
{
"dnode-mpeer"
,
dnodeInitMPeer
,
NULL
},
{
"dnode-mpeer"
,
dnodeInitMPeer
,
NULL
},
{
"dnode-vnodes"
,
dnodeInitVnodes
,
dnodeCleanupVnodes
},
{
"dnode-modules"
,
dnodeInitModules
,
dnodeCleanupModules
},
{
"dnode-modules"
,
dnodeInitModules
,
dnodeCleanupModules
},
{
"dnode-mread"
,
NULL
,
dnodeCleanupMRead
},
{
"dnode-mwrite"
,
NULL
,
dnodeCleanupMWrite
},
{
"dnode-mpeer"
,
NULL
,
dnodeCleanupMPeer
},
};
};
static
int
dnodeCreateDir
(
const
char
*
dir
)
{
static
int
dnodeCreateDir
(
const
char
*
dir
)
{
...
...
src/inc/taosdef.h
浏览文件 @
1e3ca4c6
...
@@ -375,6 +375,8 @@ do { \
...
@@ -375,6 +375,8 @@ do { \
#define TSDB_MAX_WAL_SIZE (1024*1024*3)
#define TSDB_MAX_WAL_SIZE (1024*1024*3)
#define TSDB_ARB_DUMMY_TIME 4765104000000 // 2121-01-01 00:00:00.000, :P
typedef
enum
{
typedef
enum
{
TAOS_QTYPE_RPC
=
0
,
TAOS_QTYPE_RPC
=
0
,
TAOS_QTYPE_FWD
=
1
,
TAOS_QTYPE_FWD
=
1
,
...
...
src/inc/taoserror.h
浏览文件 @
1e3ca4c6
...
@@ -215,11 +215,11 @@ int32_t* taosGetErrno();
...
@@ -215,11 +215,11 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit")
#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit")
#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping")
#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping")
#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing")
#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing")
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing")
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing")
// tsdb
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
...
...
src/inc/taosmsg.h
浏览文件 @
1e3ca4c6
...
@@ -161,6 +161,7 @@ enum _mgmt_table {
...
@@ -161,6 +161,7 @@ enum _mgmt_table {
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
#define TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN 8
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1
#define TSDB_FILL_NULL 1
...
...
src/inc/tfs.h
浏览文件 @
1e3ca4c6
...
@@ -31,6 +31,8 @@ typedef struct {
...
@@ -31,6 +31,8 @@ typedef struct {
#define TFS_UNDECIDED_ID -1
#define TFS_UNDECIDED_ID -1
#define TFS_PRIMARY_LEVEL 0
#define TFS_PRIMARY_LEVEL 0
#define TFS_PRIMARY_ID 0
#define TFS_PRIMARY_ID 0
#define TFS_MIN_LEVEL 0
#define TFS_MAX_LEVEL (TSDB_MAX_TIERS - 1)
// FS APIs ====================================
// FS APIs ====================================
typedef
struct
{
typedef
struct
{
...
...
src/inc/tsdb.h
浏览文件 @
1e3ca4c6
...
@@ -409,6 +409,9 @@ void tsdbDecCommitRef(int vgId);
...
@@ -409,6 +409,9 @@ void tsdbDecCommitRef(int vgId);
int
tsdbSyncSend
(
void
*
pRepo
,
SOCKET
socketFd
);
int
tsdbSyncSend
(
void
*
pRepo
,
SOCKET
socketFd
);
int
tsdbSyncRecv
(
void
*
pRepo
,
SOCKET
socketFd
);
int
tsdbSyncRecv
(
void
*
pRepo
,
SOCKET
socketFd
);
// For TSDB Compact
int
tsdbCompact
(
STsdbRepo
*
pRepo
);
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/inc/ttokendef.h
浏览文件 @
1e3ca4c6
...
@@ -156,54 +156,62 @@
...
@@ -156,54 +156,62 @@
#define TK_SYNCDB 137
#define TK_SYNCDB 137
#define TK_ADD 138
#define TK_ADD 138
#define TK_COLUMN 139
#define TK_COLUMN 139
#define TK_TAG 140
#define TK_MODIFY 140
#define TK_CHANGE 141
#define TK_TAG 141
#define TK_SET 142
#define TK_CHANGE 142
#define TK_KILL 143
#define TK_SET 143
#define TK_CONNECTION 144
#define TK_KILL 144
#define TK_STREAM 145
#define TK_CONNECTION 145
#define TK_COLON 146
#define TK_STREAM 146
#define TK_ABORT 147
#define TK_COLON 147
#define TK_AFTER 148
#define TK_ABORT 148
#define TK_ATTACH 149
#define TK_AFTER 149
#define TK_BEFORE 150
#define TK_ATTACH 150
#define TK_BEGIN 151
#define TK_BEFORE 151
#define TK_CASCADE 152
#define TK_BEGIN 152
#define TK_CLUSTER 153
#define TK_CASCADE 153
#define TK_CONFLICT 154
#define TK_CLUSTER 154
#define TK_COPY 155
#define TK_CONFLICT 155
#define TK_DEFERRED 156
#define TK_COPY 156
#define TK_DELIMITERS 157
#define TK_DEFERRED 157
#define TK_DETACH 158
#define TK_DELIMITERS 158
#define TK_EACH 159
#define TK_DETACH 159
#define TK_END 160
#define TK_EACH 160
#define TK_EXPLAIN 161
#define TK_END 161
#define TK_FAIL 162
#define TK_EXPLAIN 162
#define TK_FOR 163
#define TK_FAIL 163
#define TK_IGNORE 164
#define TK_FOR 164
#define TK_IMMEDIATE 165
#define TK_IGNORE 165
#define TK_INITIALLY 166
#define TK_IMMEDIATE 166
#define TK_INSTEAD 167
#define TK_INITIALLY 167
#define TK_MATCH 168
#define TK_INSTEAD 168
#define TK_KEY 169
#define TK_MATCH 169
#define TK_OF 170
#define TK_KEY 170
#define TK_RAISE 171
#define TK_OF 171
#define TK_REPLACE 172
#define TK_RAISE 172
#define TK_RESTRICT 173
#define TK_REPLACE 173
#define TK_ROW 174
#define TK_RESTRICT 174
#define TK_STATEMENT 175
#define TK_ROW 175
#define TK_TRIGGER 176
#define TK_STATEMENT 176
#define TK_VIEW 177
#define TK_TRIGGER 177
#define TK_SEMI 178
#define TK_VIEW 178
#define TK_NONE 179
#define TK_SEMI 179
#define TK_PREV 180
#define TK_NONE 180
#define TK_LINEAR 181
#define TK_PREV 181
#define TK_IMPORT 182
#define TK_LINEAR 182
#define TK_TBNAME 183
#define TK_IMPORT 183
#define TK_JOIN 184
#define TK_TBNAME 184
#define TK_INSERT 185
#define TK_JOIN 185
#define TK_INTO 186
#define TK_INSERT 186
#define TK_VALUES 187
#define TK_INTO 187
#define TK_VALUES 188
#define TK_SPACE 300
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_COMMENT 301
...
...
src/mnode/src/mnodeDnode.c
浏览文件 @
1e3ca4c6
...
@@ -941,7 +941,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
...
@@ -941,7 +941,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
cols
++
;
cols
++
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
*
(
int64_t
*
)
pWrite
=
0
;
*
(
int64_t
*
)
pWrite
=
tsArbOnlineTimestamp
;
cols
++
;
cols
++
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
...
...
src/mnode/src/mnodeSdb.c
浏览文件 @
1e3ca4c6
...
@@ -656,8 +656,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
...
@@ -656,8 +656,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
dnodeReportStep
(
"mnode-sdb"
,
stepDesc
,
0
);
dnodeReportStep
(
"mnode-sdb"
,
stepDesc
,
0
);
}
}
if
(
qtype
==
TAOS_QTYPE_QUERY
)
return
sdbPerformDeleteAction
(
pHead
,
pTable
);
pthread_mutex_lock
(
&
tsSdbMgmt
.
mutex
);
pthread_mutex_lock
(
&
tsSdbMgmt
.
mutex
);
if
(
pHead
->
version
==
0
)
{
if
(
pHead
->
version
==
0
)
{
...
@@ -721,13 +719,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
...
@@ -721,13 +719,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
if
(
action
==
SDB_ACTION_INSERT
)
{
if
(
action
==
SDB_ACTION_INSERT
)
{
return
sdbPerformInsertAction
(
pHead
,
pTable
);
return
sdbPerformInsertAction
(
pHead
,
pTable
);
}
else
if
(
action
==
SDB_ACTION_DELETE
)
{
}
else
if
(
action
==
SDB_ACTION_DELETE
)
{
//if (qtype == TAOS_QTYPE_FWD) {
if
(
qtype
==
TAOS_QTYPE_FWD
)
{
// Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue
// Drop database/stable may take a long time and cause a timeout, so we confirm first
// sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
syncConfirmForward
(
tsSdbMgmt
.
sync
,
pHead
->
version
,
TSDB_CODE_SUCCESS
,
false
);
// return TSDB_CODE_SUCCESS;
}
//} else {
return
sdbPerformDeleteAction
(
pHead
,
pTable
);
return
sdbPerformDeleteAction
(
pHead
,
pTable
);
//}
}
else
if
(
action
==
SDB_ACTION_UPDATE
)
{
}
else
if
(
action
==
SDB_ACTION_UPDATE
)
{
return
sdbPerformUpdateAction
(
pHead
,
pTable
);
return
sdbPerformUpdateAction
(
pHead
,
pTable
);
}
else
{
}
else
{
...
@@ -1140,8 +1136,11 @@ static void *sdbWorkerFp(void *pWorker) {
...
@@ -1140,8 +1136,11 @@ static void *sdbWorkerFp(void *pWorker) {
sdbConfirmForward
(
1
,
pRow
,
pRow
->
code
);
sdbConfirmForward
(
1
,
pRow
,
pRow
->
code
);
}
else
{
}
else
{
if
(
qtype
==
TAOS_QTYPE_FWD
)
{
if
(
qtype
==
TAOS_QTYPE_FWD
)
{
int32_t
action
=
pRow
->
pHead
.
msgType
%
10
;
if
(
action
!=
SDB_ACTION_DELETE
)
{
syncConfirmForward
(
tsSdbMgmt
.
sync
,
pRow
->
pHead
.
version
,
pRow
->
code
,
false
);
syncConfirmForward
(
tsSdbMgmt
.
sync
,
pRow
->
pHead
.
version
,
pRow
->
code
,
false
);
}
}
}
sdbFreeFromQueue
(
pRow
);
sdbFreeFromQueue
(
pRow
);
}
}
}
}
...
@@ -1177,9 +1176,10 @@ int32_t mnodeCompactWal() {
...
@@ -1177,9 +1176,10 @@ int32_t mnodeCompactWal() {
return
-
1
;
return
-
1
;
}
}
// close wal
// close sdb and sync to disk
walFsync
(
tsSdbMgmt
.
wal
,
true
);
//walFsync(tsSdbMgmt.wal, true);
walClose
(
tsSdbMgmt
.
wal
);
//walClose(tsSdbMgmt.wal);
sdbCleanUp
();
// rename old wal to wal_bak
// rename old wal to wal_bak
if
(
taosRename
(
tsMnodeDir
,
tsMnodeBakDir
)
!=
0
)
{
if
(
taosRename
(
tsMnodeDir
,
tsMnodeBakDir
)
!=
0
)
{
...
...
src/mnode/src/mnodeTable.c
浏览文件 @
1e3ca4c6
...
@@ -93,6 +93,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg);
...
@@ -93,6 +93,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg);
static
void
mnodeProcessAlterTableRsp
(
SRpcMsg
*
rpcMsg
);
static
void
mnodeProcessAlterTableRsp
(
SRpcMsg
*
rpcMsg
);
static
int32_t
mnodeFindSuperTableColumnIndex
(
SSTableObj
*
pStable
,
char
*
colName
);
static
int32_t
mnodeFindSuperTableColumnIndex
(
SSTableObj
*
pStable
,
char
*
colName
);
static
int32_t
mnodeChangeSuperTableColumn
(
SMnodeMsg
*
pMsg
);
static
int32_t
mnodeChangeSuperTableTag
(
SMnodeMsg
*
pMsg
);
static
int32_t
mnodeChangeNormalTableColumn
(
SMnodeMsg
*
pMsg
);
static
void
mnodeDestroyChildTable
(
SCTableObj
*
pTable
)
{
static
void
mnodeDestroyChildTable
(
SCTableObj
*
pTable
)
{
tfree
(
pTable
->
info
.
tableId
);
tfree
(
pTable
->
info
.
tableId
);
...
@@ -1457,31 +1460,52 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
...
@@ -1457,31 +1460,52 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
return
code
;
return
code
;
}
}
static
int32_t
mnodeChangeSuperTableColumn
(
SMnodeMsg
*
pMsg
,
char
*
oldName
,
char
*
newName
)
{
static
int32_t
mnodeChangeSuperTableColumn
(
SMnodeMsg
*
pMsg
)
{
SAlterTableMsg
*
pAlter
=
pMsg
->
rpcMsg
.
pCont
;
char
*
name
=
pAlter
->
schema
[
0
].
name
;
SSTableObj
*
pStable
=
(
SSTableObj
*
)
pMsg
->
pTable
;
SSTableObj
*
pStable
=
(
SSTableObj
*
)
pMsg
->
pTable
;
int32_t
col
=
mnodeFindSuperTableColumnIndex
(
pStable
,
oldN
ame
);
int32_t
col
=
mnodeFindSuperTableColumnIndex
(
pStable
,
n
ame
);
if
(
col
<
0
)
{
if
(
col
<
0
)
{
mError
(
"msg:%p, app:%p stable:%s, change column,
oldName:%s, newN
ame:%s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
mError
(
"msg:%p, app:%p stable:%s, change column,
n
ame:%s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
oldName
,
newN
ame
);
pStable
->
info
.
tableId
,
n
ame
);
return
TSDB_CODE_MND_FIELD_NOT_EXIST
;
return
TSDB_CODE_MND_FIELD_NOT_EXIST
;
}
}
// int32_t rowSize = 0;
// update
uint32_t
len
=
(
uint32_t
)
strlen
(
newName
);
SSchema
*
schema
=
(
SSchema
*
)
(
pStable
->
schema
+
col
);
if
(
len
>=
TSDB_COL_NAME_LEN
)
{
ASSERT
(
schema
->
type
==
TSDB_DATA_TYPE_BINARY
||
schema
->
type
==
TSDB_DATA_TYPE_NCHAR
);
return
TSDB_CODE_MND_COL_NAME_TOO_LONG
;
schema
->
bytes
=
pAlter
->
schema
[
0
].
bytes
;
}
mInfo
(
"msg:%p, app:%p stable %s, start to modify column %s len to %d"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
name
,
schema
->
bytes
);
if
(
mnodeFindSuperTableColumnIndex
(
pStable
,
newName
)
>=
0
)
{
SSdbRow
row
=
{
return
TSDB_CODE_MND_FIELD_ALREAY_EXIST
;
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsSuperTableSdb
,
.
pObj
=
pStable
,
.
pMsg
=
pMsg
,
.
fpRsp
=
mnodeChangeSuperTableColumnCb
};
return
sdbUpdateRow
(
&
row
);
}
static
int32_t
mnodeChangeSuperTableTag
(
SMnodeMsg
*
pMsg
)
{
SAlterTableMsg
*
pAlter
=
pMsg
->
rpcMsg
.
pCont
;
char
*
name
=
pAlter
->
schema
[
0
].
name
;
SSTableObj
*
pStable
=
(
SSTableObj
*
)
pMsg
->
pTable
;
int32_t
col
=
mnodeFindSuperTableTagIndex
(
pStable
,
name
);
if
(
col
<
0
)
{
mError
(
"msg:%p, app:%p stable:%s, change column, name:%s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
name
);
return
TSDB_CODE_MND_FIELD_NOT_EXIST
;
}
}
// update
// update
SSchema
*
schema
=
(
SSchema
*
)
(
pStable
->
schema
+
col
);
SSchema
*
schema
=
(
SSchema
*
)
(
pStable
->
schema
+
col
);
tstrncpy
(
schema
->
name
,
newName
,
sizeof
(
schema
->
name
)
);
ASSERT
(
schema
->
type
==
TSDB_DATA_TYPE_BINARY
||
schema
->
type
==
TSDB_DATA_TYPE_NCHAR
);
schema
->
bytes
=
pAlter
->
schema
[
0
].
bytes
;
mInfo
(
"msg:%p, app:%p stable %s, start to modify
column %s to %s
"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
mInfo
(
"msg:%p, app:%p stable %s, start to modify
tag len %s to %d
"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
oldName
,
newName
);
name
,
schema
->
bytes
);
SSdbRow
row
=
{
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
type
=
SDB_OPER_GLOBAL
,
...
@@ -2355,31 +2379,23 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
...
@@ -2355,31 +2379,23 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
return
sdbUpdateRow
(
&
row
);
return
sdbUpdateRow
(
&
row
);
}
}
static
int32_t
mnodeChangeNormalTableColumn
(
SMnodeMsg
*
pMsg
,
char
*
oldName
,
char
*
newName
)
{
static
int32_t
mnodeChangeNormalTableColumn
(
SMnodeMsg
*
pMsg
)
{
SAlterTableMsg
*
pAlter
=
pMsg
->
rpcMsg
.
pCont
;
char
*
name
=
pAlter
->
schema
[
0
].
name
;
SCTableObj
*
pTable
=
(
SCTableObj
*
)
pMsg
->
pTable
;
SCTableObj
*
pTable
=
(
SCTableObj
*
)
pMsg
->
pTable
;
int32_t
col
=
mnodeFindNormalTableColumnIndex
(
pTable
,
oldN
ame
);
int32_t
col
=
mnodeFindNormalTableColumnIndex
(
pTable
,
n
ame
);
if
(
col
<
0
)
{
if
(
col
<
0
)
{
mError
(
"msg:%p, app:%p ctable:%s, change column,
oldName: %s, newN
ame: %s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
mError
(
"msg:%p, app:%p ctable:%s, change column,
n
ame: %s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pTable
->
info
.
tableId
,
oldName
,
newN
ame
);
pTable
->
info
.
tableId
,
n
ame
);
return
TSDB_CODE_MND_FIELD_NOT_EXIST
;
return
TSDB_CODE_MND_FIELD_NOT_EXIST
;
}
}
// int32_t rowSize = 0;
uint32_t
len
=
(
uint32_t
)
strlen
(
newName
);
if
(
len
>=
TSDB_COL_NAME_LEN
)
{
return
TSDB_CODE_MND_COL_NAME_TOO_LONG
;
}
if
(
mnodeFindNormalTableColumnIndex
(
pTable
,
newName
)
>=
0
)
{
return
TSDB_CODE_MND_FIELD_ALREAY_EXIST
;
}
// update
SSchema
*
schema
=
(
SSchema
*
)
(
pTable
->
schema
+
col
);
SSchema
*
schema
=
(
SSchema
*
)
(
pTable
->
schema
+
col
);
tstrncpy
(
schema
->
name
,
newName
,
sizeof
(
schema
->
name
));
ASSERT
(
schema
->
type
==
TSDB_DATA_TYPE_BINARY
||
schema
->
type
==
TSDB_DATA_TYPE_NCHAR
);
schema
->
bytes
=
pAlter
->
schema
[
0
].
bytes
;
mInfo
(
"msg:%p, app:%p ctable %s, start to modify column %s
to %s
"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pTable
->
info
.
tableId
,
mInfo
(
"msg:%p, app:%p ctable %s, start to modify column %s
len to %d
"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pTable
->
info
.
tableId
,
oldName
,
newName
);
name
,
schema
->
bytes
);
SSdbRow
row
=
{
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
type
=
SDB_OPER_GLOBAL
,
...
@@ -3214,7 +3230,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
...
@@ -3214,7 +3230,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
)
{
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
)
{
code
=
mnodeDropSuperTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
);
code
=
mnodeDropSuperTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
);
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
{
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
{
code
=
mnodeChangeSuperTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
,
pAlter
->
schema
[
1
].
name
);
code
=
mnodeChangeSuperTableColumn
(
pMsg
);
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN
)
{
code
=
mnodeChangeSuperTableTag
(
pMsg
);
}
else
{
}
else
{
}
}
}
else
{
}
else
{
...
@@ -3226,7 +3244,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
...
@@ -3226,7 +3244,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
)
{
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_DROP_COLUMN
)
{
code
=
mnodeDropNormalTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
);
code
=
mnodeDropNormalTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
);
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
{
}
else
if
(
pAlter
->
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
)
{
code
=
mnodeChangeNormalTableColumn
(
pMsg
,
pAlter
->
schema
[
0
].
name
,
pAlter
->
schema
[
1
].
name
);
code
=
mnodeChangeNormalTableColumn
(
pMsg
);
}
else
{
}
else
{
}
}
}
}
...
...
src/query/inc/sql.y
浏览文件 @
1e3ca4c6
...
@@ -759,6 +759,12 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
...
@@ -759,6 +759,12 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
}
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
X.n += F.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
X.n += Y.n;
X.n += Y.n;
...
@@ -799,6 +805,11 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
...
@@ -799,6 +805,11 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
}
cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
X.n += F.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
...
@@ -817,6 +828,12 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
...
@@ -817,6 +828,12 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
}
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). {
X.n += F.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
X.n += Y.n;
X.n += Y.n;
...
@@ -846,6 +863,23 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
...
@@ -846,6 +863,23 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
}
cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
X.n += F.n;
toTSDBType(Y.type);
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
A = tVariantListAppend(A, &Z, -1);
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). {
X.n += F.n;
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
////////////////////////////////////////kill statement///////////////////////////////////////
////////////////////////////////////////kill statement///////////////////////////////////////
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);}
...
...
src/query/src/qExecutor.c
浏览文件 @
1e3ca4c6
...
@@ -1347,8 +1347,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
...
@@ -1347,8 +1347,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
pInfo
->
start
=
j
;
pInfo
->
start
=
j
;
}
else
if
(
tsList
[
j
]
-
pInfo
->
prevTs
<=
gap
)
{
}
else
if
(
tsList
[
j
]
-
pInfo
->
prevTs
<=
gap
)
{
pInfo
->
curWindow
.
ekey
=
tsList
[
j
];
pInfo
->
curWindow
.
ekey
=
tsList
[
j
];
pInfo
->
numOfRows
+=
1
;
pInfo
->
prevTs
=
tsList
[
j
];
pInfo
->
prevTs
=
tsList
[
j
];
pInfo
->
numOfRows
+=
1
;
if
(
j
==
0
&&
pInfo
->
start
!=
0
)
{
if
(
j
==
0
&&
pInfo
->
start
!=
0
)
{
pInfo
->
numOfRows
=
1
;
pInfo
->
numOfRows
=
1
;
pInfo
->
start
=
0
;
pInfo
->
start
=
0
;
...
...
src/query/src/qSqlParser.c
浏览文件 @
1e3ca4c6
...
@@ -893,7 +893,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray
...
@@ -893,7 +893,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray
pAlterTable
->
type
=
type
;
pAlterTable
->
type
=
type
;
pAlterTable
->
tableType
=
tableType
;
pAlterTable
->
tableType
=
tableType
;
if
(
type
==
TSDB_ALTER_TABLE_ADD_COLUMN
||
type
==
TSDB_ALTER_TABLE_ADD_TAG_COLUMN
)
{
if
(
type
==
TSDB_ALTER_TABLE_ADD_COLUMN
||
type
==
TSDB_ALTER_TABLE_ADD_TAG_COLUMN
||
type
==
TSDB_ALTER_TABLE_CHANGE_COLUMN
||
type
==
TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN
)
{
pAlterTable
->
pAddColumns
=
pCols
;
pAlterTable
->
pAddColumns
=
pCols
;
assert
(
pVals
==
NULL
);
assert
(
pVals
==
NULL
);
}
else
{
}
else
{
...
...
src/query/src/sql.c
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
src/sync/src/syncMain.c
浏览文件 @
1e3ca4c6
...
@@ -1150,7 +1150,12 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
...
@@ -1150,7 +1150,12 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
pPeer
->
peerFd
=
connFd
;
pPeer
->
peerFd
=
connFd
;
pPeer
->
role
=
TAOS_SYNC_ROLE_UNSYNCED
;
pPeer
->
role
=
TAOS_SYNC_ROLE_UNSYNCED
;
pPeer
->
pConn
=
syncAllocateTcpConn
(
tsTcpPool
,
pPeer
->
rid
,
connFd
);
pPeer
->
pConn
=
syncAllocateTcpConn
(
tsTcpPool
,
pPeer
->
rid
,
connFd
);
if
(
pPeer
->
isArb
)
tsArbOnline
=
1
;
if
(
pPeer
->
isArb
)
{
tsArbOnline
=
1
;
if
(
tsArbOnlineTimestamp
==
TSDB_ARB_DUMMY_TIME
)
{
tsArbOnlineTimestamp
=
taosGetTimestampMs
();
}
}
}
else
{
}
else
{
sDebug
(
"%s, failed to setup peer connection to server since %s, try later"
,
pPeer
->
id
,
strerror
(
errno
));
sDebug
(
"%s, failed to setup peer connection to server since %s, try later"
,
pPeer
->
id
,
strerror
(
errno
));
taosCloseSocket
(
connFd
);
taosCloseSocket
(
connFd
);
...
...
src/tsdb/CMakeLists.txt
浏览文件 @
1e3ca4c6
...
@@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(src SRC)
...
@@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY
(
tsdb
${
SRC
}
)
ADD_LIBRARY
(
tsdb
${
SRC
}
)
TARGET_LINK_LIBRARIES
(
tsdb tfs common tutil
)
TARGET_LINK_LIBRARIES
(
tsdb tfs common tutil
)
IF
(
TD_TSDB_PLUGINS
)
TARGET_LINK_LIBRARIES
(
tsdb tsdbPlugins
)
ENDIF
()
IF
(
TD_LINUX
)
IF
(
TD_LINUX
)
# Someone has no gtest directory, so comment it
# Someone has no gtest directory, so comment it
# ADD_SUBDIRECTORY(tests)
# ADD_SUBDIRECTORY(tests)
...
...
src/tsdb/inc/tsdbCommit.h
浏览文件 @
1e3ca4c6
...
@@ -29,10 +29,17 @@ typedef struct {
...
@@ -29,10 +29,17 @@ typedef struct {
int64_t
size
;
int64_t
size
;
}
SKVRecord
;
}
SKVRecord
;
#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5)
void
tsdbGetRtnSnap
(
STsdbRepo
*
pRepo
,
SRtn
*
pRtn
);
void
tsdbGetRtnSnap
(
STsdbRepo
*
pRepo
,
SRtn
*
pRtn
);
int
tsdbEncodeKVRecord
(
void
**
buf
,
SKVRecord
*
pRecord
);
int
tsdbEncodeKVRecord
(
void
**
buf
,
SKVRecord
*
pRecord
);
void
*
tsdbDecodeKVRecord
(
void
*
buf
,
SKVRecord
*
pRecord
);
void
*
tsdbDecodeKVRecord
(
void
*
buf
,
SKVRecord
*
pRecord
);
void
*
tsdbCommitData
(
STsdbRepo
*
pRepo
);
void
*
tsdbCommitData
(
STsdbRepo
*
pRepo
);
int
tsdbApplyRtnOnFSet
(
STsdbRepo
*
pRepo
,
SDFileSet
*
pSet
,
SRtn
*
pRtn
);
int
tsdbWriteBlockInfoImpl
(
SDFile
*
pHeadf
,
STable
*
pTable
,
SArray
*
pSupA
,
SArray
*
pSubA
,
void
**
ppBuf
,
SBlockIdx
*
pIdx
);
int
tsdbWriteBlockIdx
(
SDFile
*
pHeadf
,
SArray
*
pIdxA
,
void
**
ppBuf
);
int
tsdbWriteBlockImpl
(
STsdbRepo
*
pRepo
,
STable
*
pTable
,
SDFile
*
pDFile
,
SDataCols
*
pDataCols
,
SBlock
*
pBlock
,
bool
isLast
,
bool
isSuper
,
void
**
ppBuf
,
void
**
ppCBuf
);
int
tsdbApplyRtn
(
STsdbRepo
*
pRepo
);
int
tsdbApplyRtn
(
STsdbRepo
*
pRepo
);
static
FORCE_INLINE
int
tsdbGetFidLevel
(
int
fid
,
SRtn
*
pRtn
)
{
static
FORCE_INLINE
int
tsdbGetFidLevel
(
int
fid
,
SRtn
*
pRtn
)
{
...
...
src/tsdb/inc/tsdbCommitQueue.h
浏览文件 @
1e3ca4c6
...
@@ -16,6 +16,8 @@
...
@@ -16,6 +16,8 @@
#ifndef _TD_TSDB_COMMIT_QUEUE_H_
#ifndef _TD_TSDB_COMMIT_QUEUE_H_
#define _TD_TSDB_COMMIT_QUEUE_H_
#define _TD_TSDB_COMMIT_QUEUE_H_
int
tsdbScheduleCommit
(
STsdbRepo
*
pRepo
);
typedef
enum
{
COMMIT_REQ
,
COMPACT_REQ
}
TSDB_REQ_T
;
int
tsdbScheduleCommit
(
STsdbRepo
*
pRepo
,
TSDB_REQ_T
req
);
#endif
/* _TD_TSDB_COMMIT_QUEUE_H_ */
#endif
/* _TD_TSDB_COMMIT_QUEUE_H_ */
\ No newline at end of file
src/tsdb/inc/tsdbCompact.h
0 → 100644
浏览文件 @
1e3ca4c6
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_TSDB_COMPACT_H_
#define _TD_TSDB_COMPACT_H_
#ifdef __cplusplus
extern
"C"
{
#endif
void
*
tsdbCompactImpl
(
STsdbRepo
*
pRepo
);
#ifdef __cplusplus
}
#endif
#endif
/* _TD_TSDB_COMPACT_H_ */
\ No newline at end of file
src/tsdb/inc/tsdbint.h
浏览文件 @
1e3ca4c6
...
@@ -64,6 +64,8 @@ extern "C" {
...
@@ -64,6 +64,8 @@ extern "C" {
#include "tsdbReadImpl.h"
#include "tsdbReadImpl.h"
// Commit
// Commit
#include "tsdbCommit.h"
#include "tsdbCommit.h"
// Compact
#include "tsdbCompact.h"
// Commit Queue
// Commit Queue
#include "tsdbCommitQueue.h"
#include "tsdbCommitQueue.h"
// Main definitions
// Main definitions
...
...
src/tsdb/src/tsdbCommit.c
浏览文件 @
1e3ca4c6
...
@@ -51,7 +51,7 @@ typedef struct {
...
@@ -51,7 +51,7 @@ typedef struct {
#define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST)
#define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST)
#define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh))
#define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh))
#define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh))
#define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh))
#define TSDB_COMMIT_DEFAULT_ROWS(ch)
(TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock * 4 / 5
)
#define TSDB_COMMIT_DEFAULT_ROWS(ch)
TSDB_DEFAULT_BLOCK_ROWS(TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock
)
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
static
int
tsdbCommitMeta
(
STsdbRepo
*
pRepo
);
static
int
tsdbCommitMeta
(
STsdbRepo
*
pRepo
);
...
@@ -72,7 +72,6 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid);
...
@@ -72,7 +72,6 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid);
static
int
tsdbSetCommitTable
(
SCommitH
*
pCommith
,
STable
*
pTable
);
static
int
tsdbSetCommitTable
(
SCommitH
*
pCommith
,
STable
*
pTable
);
static
int
tsdbComparKeyBlock
(
const
void
*
arg1
,
const
void
*
arg2
);
static
int
tsdbComparKeyBlock
(
const
void
*
arg1
,
const
void
*
arg2
);
static
int
tsdbWriteBlockInfo
(
SCommitH
*
pCommih
);
static
int
tsdbWriteBlockInfo
(
SCommitH
*
pCommih
);
static
int
tsdbWriteBlockIdx
(
SCommitH
*
pCommih
);
static
int
tsdbCommitMemData
(
SCommitH
*
pCommith
,
SCommitIter
*
pIter
,
TSKEY
keyLimit
,
bool
toData
);
static
int
tsdbCommitMemData
(
SCommitH
*
pCommith
,
SCommitIter
*
pIter
,
TSKEY
keyLimit
,
bool
toData
);
static
int
tsdbMergeMemData
(
SCommitH
*
pCommith
,
SCommitIter
*
pIter
,
int
bidx
);
static
int
tsdbMergeMemData
(
SCommitH
*
pCommith
,
SCommitIter
*
pIter
,
int
bidx
);
static
int
tsdbMoveBlock
(
SCommitH
*
pCommith
,
int
bidx
);
static
int
tsdbMoveBlock
(
SCommitH
*
pCommith
,
int
bidx
);
...
@@ -86,7 +85,6 @@ static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError);
...
@@ -86,7 +85,6 @@ static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError);
static
bool
tsdbCanAddSubBlock
(
SCommitH
*
pCommith
,
SBlock
*
pBlock
,
SMergeInfo
*
pInfo
);
static
bool
tsdbCanAddSubBlock
(
SCommitH
*
pCommith
,
SBlock
*
pBlock
,
SMergeInfo
*
pInfo
);
static
void
tsdbLoadAndMergeFromCache
(
SDataCols
*
pDataCols
,
int
*
iter
,
SCommitIter
*
pCommitIter
,
SDataCols
*
pTarget
,
static
void
tsdbLoadAndMergeFromCache
(
SDataCols
*
pDataCols
,
int
*
iter
,
SCommitIter
*
pCommitIter
,
SDataCols
*
pTarget
,
TSKEY
maxKey
,
int
maxRows
,
int8_t
update
);
TSKEY
maxKey
,
int
maxRows
,
int8_t
update
);
static
int
tsdbApplyRtnOnFSet
(
STsdbRepo
*
pRepo
,
SDFileSet
*
pSet
,
SRtn
*
pRtn
);
void
*
tsdbCommitData
(
STsdbRepo
*
pRepo
)
{
void
*
tsdbCommitData
(
STsdbRepo
*
pRepo
)
{
if
(
pRepo
->
imem
==
NULL
)
{
if
(
pRepo
->
imem
==
NULL
)
{
...
@@ -117,6 +115,151 @@ _err:
...
@@ -117,6 +115,151 @@ _err:
return
NULL
;
return
NULL
;
}
}
int
tsdbApplyRtnOnFSet
(
STsdbRepo
*
pRepo
,
SDFileSet
*
pSet
,
SRtn
*
pRtn
)
{
SDiskID
did
;
SDFileSet
nSet
;
STsdbFS
*
pfs
=
REPO_FS
(
pRepo
);
int
level
;
ASSERT
(
pSet
->
fid
>=
pRtn
->
minFid
);
level
=
tsdbGetFidLevel
(
pSet
->
fid
,
pRtn
);
tfsAllocDisk
(
level
,
&
(
did
.
level
),
&
(
did
.
id
));
if
(
did
.
level
==
TFS_UNDECIDED_LEVEL
)
{
terrno
=
TSDB_CODE_TDB_NO_AVAIL_DISK
;
return
-
1
;
}
if
(
did
.
level
>
TSDB_FSET_LEVEL
(
pSet
))
{
// Need to move the FSET to higher level
tsdbInitDFileSet
(
&
nSet
,
did
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
FS_TXN_VERSION
(
pfs
));
if
(
tsdbCopyDFileSet
(
pSet
,
&
nSet
)
<
0
)
{
tsdbError
(
"vgId:%d failed to copy FSET %d from level %d to level %d since %s"
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
TSDB_FSET_LEVEL
(
pSet
),
did
.
level
,
tstrerror
(
terrno
));
return
-
1
;
}
if
(
tsdbUpdateDFileSet
(
pfs
,
&
nSet
)
<
0
)
{
return
-
1
;
}
tsdbInfo
(
"vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d"
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
TSDB_FSET_LEVEL
(
pSet
),
TSDB_FSET_ID
(
pSet
),
did
.
level
,
did
.
id
);
}
else
{
// On a correct level
if
(
tsdbUpdateDFileSet
(
pfs
,
pSet
)
<
0
)
{
return
-
1
;
}
}
return
0
;
}
int
tsdbWriteBlockInfoImpl
(
SDFile
*
pHeadf
,
STable
*
pTable
,
SArray
*
pSupA
,
SArray
*
pSubA
,
void
**
ppBuf
,
SBlockIdx
*
pIdx
)
{
size_t
nSupBlocks
;
size_t
nSubBlocks
;
uint32_t
tlen
;
SBlockInfo
*
pBlkInfo
;
int64_t
offset
;
SBlock
*
pBlock
;
memset
(
pIdx
,
0
,
sizeof
(
*
pIdx
));
nSupBlocks
=
taosArrayGetSize
(
pSupA
);
nSubBlocks
=
(
pSubA
==
NULL
)
?
0
:
taosArrayGetSize
(
pSubA
);
if
(
nSupBlocks
<=
0
)
{
// No data (data all deleted)
return
0
;
}
tlen
=
(
uint32_t
)(
sizeof
(
SBlockInfo
)
+
sizeof
(
SBlock
)
*
(
nSupBlocks
+
nSubBlocks
)
+
sizeof
(
TSCKSUM
));
if
(
tsdbMakeRoom
(
ppBuf
,
tlen
)
<
0
)
return
-
1
;
pBlkInfo
=
*
ppBuf
;
pBlkInfo
->
delimiter
=
TSDB_FILE_DELIMITER
;
pBlkInfo
->
tid
=
TABLE_TID
(
pTable
);
pBlkInfo
->
uid
=
TABLE_UID
(
pTable
);
memcpy
((
void
*
)(
pBlkInfo
->
blocks
),
taosArrayGet
(
pSupA
,
0
),
nSupBlocks
*
sizeof
(
SBlock
));
if
(
nSubBlocks
>
0
)
{
memcpy
((
void
*
)(
pBlkInfo
->
blocks
+
nSupBlocks
),
taosArrayGet
(
pSubA
,
0
),
nSubBlocks
*
sizeof
(
SBlock
));
for
(
int
i
=
0
;
i
<
nSupBlocks
;
i
++
)
{
pBlock
=
pBlkInfo
->
blocks
+
i
;
if
(
pBlock
->
numOfSubBlocks
>
1
)
{
pBlock
->
offset
+=
(
sizeof
(
SBlockInfo
)
+
sizeof
(
SBlock
)
*
nSupBlocks
);
}
}
}
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)
pBlkInfo
,
tlen
);
if
(
tsdbAppendDFile
(
pHeadf
,
(
void
*
)
pBlkInfo
,
tlen
,
&
offset
)
<
0
)
{
return
-
1
;
}
tsdbUpdateDFileMagic
(
pHeadf
,
POINTER_SHIFT
(
pBlkInfo
,
tlen
-
sizeof
(
TSCKSUM
)));
// Set pIdx
pBlock
=
taosArrayGetLast
(
pSupA
);
pIdx
->
tid
=
TABLE_TID
(
pTable
);
pIdx
->
uid
=
TABLE_UID
(
pTable
);
pIdx
->
hasLast
=
pBlock
->
last
?
1
:
0
;
pIdx
->
maxKey
=
pBlock
->
keyLast
;
pIdx
->
numOfBlocks
=
(
uint32_t
)
nSupBlocks
;
pIdx
->
len
=
tlen
;
pIdx
->
offset
=
(
uint32_t
)
offset
;
return
0
;
}
int
tsdbWriteBlockIdx
(
SDFile
*
pHeadf
,
SArray
*
pIdxA
,
void
**
ppBuf
)
{
SBlockIdx
*
pBlkIdx
;
size_t
nidx
=
taosArrayGetSize
(
pIdxA
);
int
tlen
=
0
,
size
;
int64_t
offset
;
if
(
nidx
<=
0
)
{
// All data are deleted
pHeadf
->
info
.
offset
=
0
;
pHeadf
->
info
.
len
=
0
;
return
0
;
}
for
(
size_t
i
=
0
;
i
<
nidx
;
i
++
)
{
pBlkIdx
=
(
SBlockIdx
*
)
taosArrayGet
(
pIdxA
,
i
);
size
=
tsdbEncodeSBlockIdx
(
NULL
,
pBlkIdx
);
if
(
tsdbMakeRoom
(
ppBuf
,
tlen
+
size
)
<
0
)
return
-
1
;
void
*
ptr
=
POINTER_SHIFT
(
*
ppBuf
,
tlen
);
tsdbEncodeSBlockIdx
(
&
ptr
,
pBlkIdx
);
tlen
+=
size
;
}
tlen
+=
sizeof
(
TSCKSUM
);
if
(
tsdbMakeRoom
(
ppBuf
,
tlen
)
<
0
)
return
-
1
;
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)(
*
ppBuf
),
tlen
);
if
(
tsdbAppendDFile
(
pHeadf
,
*
ppBuf
,
tlen
,
&
offset
)
<
tlen
)
{
return
-
1
;
}
tsdbUpdateDFileMagic
(
pHeadf
,
POINTER_SHIFT
(
*
ppBuf
,
tlen
-
sizeof
(
TSCKSUM
)));
pHeadf
->
info
.
offset
=
(
uint32_t
)
offset
;
pHeadf
->
info
.
len
=
tlen
;
return
0
;
}
// =================== Commit Meta Data
// =================== Commit Meta Data
static
int
tsdbCommitMeta
(
STsdbRepo
*
pRepo
)
{
static
int
tsdbCommitMeta
(
STsdbRepo
*
pRepo
)
{
STsdbFS
*
pfs
=
REPO_FS
(
pRepo
);
STsdbFS
*
pfs
=
REPO_FS
(
pRepo
);
...
@@ -446,7 +589,8 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
...
@@ -446,7 +589,8 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
}
}
}
}
if
(
tsdbWriteBlockIdx
(
pCommith
)
<
0
)
{
if
(
tsdbWriteBlockIdx
(
TSDB_COMMIT_HEAD_FILE
(
pCommith
),
pCommith
->
aBlkIdx
,
(
void
**
)(
&
(
TSDB_COMMIT_BUF
(
pCommith
))))
<
0
)
{
tsdbError
(
"vgId:%d failed to write SBlockIdx part to FSET %d since %s"
,
REPO_ID
(
pRepo
),
fid
,
tstrerror
(
terrno
));
tsdbError
(
"vgId:%d failed to write SBlockIdx part to FSET %d since %s"
,
REPO_ID
(
pRepo
),
fid
,
tstrerror
(
terrno
));
tsdbCloseCommitFile
(
pCommith
,
true
);
tsdbCloseCommitFile
(
pCommith
,
true
);
// revert the file change
// revert the file change
...
@@ -754,23 +898,21 @@ static int tsdbComparKeyBlock(const void *arg1, const void *arg2) {
...
@@ -754,23 +898,21 @@ static int tsdbComparKeyBlock(const void *arg1, const void *arg2) {
}
}
}
}
static
int
tsdbWriteBlock
(
SCommitH
*
pCommith
,
SDFile
*
pDFile
,
SDataCols
*
pDataCols
,
SBlock
*
pBlock
,
bool
isLast
,
int
tsdbWriteBlockImpl
(
STsdbRepo
*
pRepo
,
STable
*
pTable
,
SDFile
*
pDFile
,
SDataCols
*
pDataCols
,
SBlock
*
pBlock
,
bool
isSuper
)
{
bool
isLast
,
bool
isSuper
,
void
**
ppBuf
,
void
**
ppCBuf
)
{
STsdbRepo
*
pRepo
=
TSDB_COMMIT_REPO
(
pCommith
);
STsdbCfg
*
pCfg
=
REPO_CFG
(
pRepo
);
STsdbCfg
*
pCfg
=
REPO_CFG
(
pRepo
);
SBlockData
*
pBlockData
;
SBlockData
*
pBlockData
;
int64_t
offset
=
0
;
int64_t
offset
=
0
;
STable
*
pTable
=
TSDB_COMMIT_TABLE
(
pCommith
);
int
rowsToWrite
=
pDataCols
->
numOfRows
;
int
rowsToWrite
=
pDataCols
->
numOfRows
;
ASSERT
(
rowsToWrite
>
0
&&
rowsToWrite
<=
pCfg
->
maxRowsPerFileBlock
);
ASSERT
(
rowsToWrite
>
0
&&
rowsToWrite
<=
pCfg
->
maxRowsPerFileBlock
);
ASSERT
((
!
isLast
)
||
rowsToWrite
<
pCfg
->
minRowsPerFileBlock
);
ASSERT
((
!
isLast
)
||
rowsToWrite
<
pCfg
->
minRowsPerFileBlock
);
// Make buffer space
// Make buffer space
if
(
tsdbMakeRoom
(
(
void
**
)(
&
TSDB_COMMIT_BUF
(
pCommith
))
,
TSDB_BLOCK_STATIS_SIZE
(
pDataCols
->
numOfCols
))
<
0
)
{
if
(
tsdbMakeRoom
(
ppBuf
,
TSDB_BLOCK_STATIS_SIZE
(
pDataCols
->
numOfCols
))
<
0
)
{
return
-
1
;
return
-
1
;
}
}
pBlockData
=
(
SBlockData
*
)
TSDB_COMMIT_BUF
(
pCommith
);
pBlockData
=
(
SBlockData
*
)
(
*
ppBuf
);
// Get # of cols not all NULL(not including key column)
// Get # of cols not all NULL(not including key column)
int
nColsNotAllNull
=
0
;
int
nColsNotAllNull
=
0
;
...
@@ -816,23 +958,23 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
...
@@ -816,23 +958,23 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
void
*
tptr
;
void
*
tptr
;
// Make room
// Make room
if
(
tsdbMakeRoom
(
(
void
**
)(
&
TSDB_COMMIT_BUF
(
pCommith
))
,
lsize
+
tlen
+
COMP_OVERFLOW_BYTES
+
sizeof
(
TSCKSUM
))
<
0
)
{
if
(
tsdbMakeRoom
(
ppBuf
,
lsize
+
tlen
+
COMP_OVERFLOW_BYTES
+
sizeof
(
TSCKSUM
))
<
0
)
{
return
-
1
;
return
-
1
;
}
}
pBlockData
=
(
SBlockData
*
)
TSDB_COMMIT_BUF
(
pCommith
);
pBlockData
=
(
SBlockData
*
)
(
*
ppBuf
);
pBlockCol
=
pBlockData
->
cols
+
tcol
;
pBlockCol
=
pBlockData
->
cols
+
tcol
;
tptr
=
POINTER_SHIFT
(
pBlockData
,
lsize
);
tptr
=
POINTER_SHIFT
(
pBlockData
,
lsize
);
if
(
pCfg
->
compression
==
TWO_STAGE_COMP
&&
if
(
pCfg
->
compression
==
TWO_STAGE_COMP
&&
tsdbMakeRoom
(
(
void
**
)(
&
TSDB_COMMIT_COMP_BUF
(
pCommith
))
,
tlen
+
COMP_OVERFLOW_BYTES
)
<
0
)
{
tsdbMakeRoom
(
ppCBuf
,
tlen
+
COMP_OVERFLOW_BYTES
)
<
0
)
{
return
-
1
;
return
-
1
;
}
}
// Compress or just copy
// Compress or just copy
if
(
pCfg
->
compression
)
{
if
(
pCfg
->
compression
)
{
flen
=
(
*
(
tDataTypes
[
pDataCol
->
type
].
compFunc
))((
char
*
)
pDataCol
->
pData
,
tlen
,
rowsToWrite
,
tptr
,
flen
=
(
*
(
tDataTypes
[
pDataCol
->
type
].
compFunc
))((
char
*
)
pDataCol
->
pData
,
tlen
,
rowsToWrite
,
tptr
,
tlen
+
COMP_OVERFLOW_BYTES
,
pCfg
->
compression
,
tlen
+
COMP_OVERFLOW_BYTES
,
pCfg
->
compression
,
*
ppCBuf
,
TSDB_COMMIT_COMP_BUF
(
pCommith
),
tlen
+
COMP_OVERFLOW_BYTES
);
tlen
+
COMP_OVERFLOW_BYTES
);
}
else
{
}
else
{
flen
=
tlen
;
flen
=
tlen
;
memcpy
(
tptr
,
pDataCol
->
pData
,
flen
);
memcpy
(
tptr
,
pDataCol
->
pData
,
flen
);
...
@@ -888,117 +1030,33 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
...
@@ -888,117 +1030,33 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo
return
0
;
return
0
;
}
}
static
int
tsdbWriteBlock
(
SCommitH
*
pCommith
,
SDFile
*
pDFile
,
SDataCols
*
pDataCols
,
SBlock
*
pBlock
,
bool
isLast
,
bool
isSuper
)
{
return
tsdbWriteBlockImpl
(
TSDB_COMMIT_REPO
(
pCommith
),
TSDB_COMMIT_TABLE
(
pCommith
),
pDFile
,
pDataCols
,
pBlock
,
isLast
,
isSuper
,
(
void
**
)(
&
(
TSDB_COMMIT_BUF
(
pCommith
))),
(
void
**
)(
&
(
TSDB_COMMIT_COMP_BUF
(
pCommith
))));
}
static
int
tsdbWriteBlockInfo
(
SCommitH
*
pCommih
)
{
static
int
tsdbWriteBlockInfo
(
SCommitH
*
pCommih
)
{
SDFile
*
pHeadf
=
TSDB_COMMIT_HEAD_FILE
(
pCommih
);
SDFile
*
pHeadf
=
TSDB_COMMIT_HEAD_FILE
(
pCommih
);
SBlockIdx
blkIdx
;
SBlockIdx
blkIdx
;
STable
*
pTable
=
TSDB_COMMIT_TABLE
(
pCommih
);
STable
*
pTable
=
TSDB_COMMIT_TABLE
(
pCommih
);
SBlock
*
pBlock
;
size_t
nSupBlocks
;
size_t
nSubBlocks
;
uint32_t
tlen
;
SBlockInfo
*
pBlkInfo
;
int64_t
offset
;
nSupBlocks
=
taosArrayGetSize
(
pCommih
->
aSupBlk
);
nSubBlocks
=
taosArrayGetSize
(
pCommih
->
aSubBlk
);
if
(
nSupBlocks
<=
0
)
{
// No data (data all deleted)
return
0
;
}
tlen
=
(
uint32_t
)(
sizeof
(
SBlockInfo
)
+
sizeof
(
SBlock
)
*
(
nSupBlocks
+
nSubBlocks
)
+
sizeof
(
TSCKSUM
));
// Write SBlockInfo part
if
(
tsdbMakeRoom
((
void
**
)(
&
(
TSDB_COMMIT_BUF
(
pCommih
))),
tlen
)
<
0
)
return
-
1
;
pBlkInfo
=
TSDB_COMMIT_BUF
(
pCommih
);
pBlkInfo
->
delimiter
=
TSDB_FILE_DELIMITER
;
pBlkInfo
->
tid
=
TABLE_TID
(
pTable
);
pBlkInfo
->
uid
=
TABLE_UID
(
pTable
);
memcpy
((
void
*
)(
pBlkInfo
->
blocks
),
taosArrayGet
(
pCommih
->
aSupBlk
,
0
),
nSupBlocks
*
sizeof
(
SBlock
));
if
(
nSubBlocks
>
0
)
{
memcpy
((
void
*
)(
pBlkInfo
->
blocks
+
nSupBlocks
),
taosArrayGet
(
pCommih
->
aSubBlk
,
0
),
nSubBlocks
*
sizeof
(
SBlock
));
for
(
int
i
=
0
;
i
<
nSupBlocks
;
i
++
)
{
pBlock
=
pBlkInfo
->
blocks
+
i
;
if
(
pBlock
->
numOfSubBlocks
>
1
)
{
pBlock
->
offset
+=
(
sizeof
(
SBlockInfo
)
+
sizeof
(
SBlock
)
*
nSupBlocks
);
}
}
}
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)
pBlkInfo
,
tlen
);
if
(
tsdbWriteBlockInfoImpl
(
pHeadf
,
pTable
,
pCommih
->
aSupBlk
,
pCommih
->
aSubBlk
,
(
void
**
)(
&
(
TSDB_COMMIT_BUF
(
pCommih
))),
&
blkIdx
)
<
0
)
{
if
(
tsdbAppendDFile
(
pHeadf
,
TSDB_COMMIT_BUF
(
pCommih
),
tlen
,
&
offset
)
<
0
)
{
return
-
1
;
return
-
1
;
}
}
tsdbUpdateDFileMagic
(
pHeadf
,
POINTER_SHIFT
(
pBlkInfo
,
tlen
-
sizeof
(
TSCKSUM
)));
if
(
blkIdx
.
numOfBlocks
==
0
)
{
// Set blkIdx
pBlock
=
taosArrayGet
(
pCommih
->
aSupBlk
,
nSupBlocks
-
1
);
blkIdx
.
tid
=
TABLE_TID
(
pTable
);
blkIdx
.
uid
=
TABLE_UID
(
pTable
);
blkIdx
.
hasLast
=
pBlock
->
last
?
1
:
0
;
blkIdx
.
maxKey
=
pBlock
->
keyLast
;
blkIdx
.
numOfBlocks
=
(
uint32_t
)
nSupBlocks
;
blkIdx
.
len
=
tlen
;
blkIdx
.
offset
=
(
uint32_t
)
offset
;
ASSERT
(
blkIdx
.
numOfBlocks
>
0
);
if
(
taosArrayPush
(
pCommih
->
aBlkIdx
,
(
void
*
)(
&
blkIdx
))
==
NULL
)
{
terrno
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
return
-
1
;
}
return
0
;
}
static
int
tsdbWriteBlockIdx
(
SCommitH
*
pCommih
)
{
SBlockIdx
*
pBlkIdx
=
NULL
;
SDFile
*
pHeadf
=
TSDB_COMMIT_HEAD_FILE
(
pCommih
);
size_t
nidx
=
taosArrayGetSize
(
pCommih
->
aBlkIdx
);
int
tlen
=
0
,
size
=
0
;
int64_t
offset
=
0
;
if
(
nidx
<=
0
)
{
// All data are deleted
pHeadf
->
info
.
offset
=
0
;
pHeadf
->
info
.
len
=
0
;
return
0
;
return
0
;
}
}
for
(
size_t
i
=
0
;
i
<
nidx
;
i
++
)
{
if
(
taosArrayPush
(
pCommih
->
aBlkIdx
,
(
void
*
)(
&
blkIdx
))
==
NULL
)
{
pBlkIdx
=
(
SBlockIdx
*
)
taosArrayGet
(
pCommih
->
aBlkIdx
,
i
);
terrno
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
size
=
tsdbEncodeSBlockIdx
(
NULL
,
pBlkIdx
);
if
(
tsdbMakeRoom
((
void
**
)(
&
TSDB_COMMIT_BUF
(
pCommih
)),
tlen
+
size
)
<
0
)
return
-
1
;
void
*
ptr
=
POINTER_SHIFT
(
TSDB_COMMIT_BUF
(
pCommih
),
tlen
);
tsdbEncodeSBlockIdx
(
&
ptr
,
pBlkIdx
);
tlen
+=
size
;
}
tlen
+=
sizeof
(
TSCKSUM
);
if
(
tsdbMakeRoom
((
void
**
)(
&
TSDB_COMMIT_BUF
(
pCommih
)),
tlen
)
<
0
)
return
-
1
;
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)
TSDB_COMMIT_BUF
(
pCommih
),
tlen
);
if
(
tsdbAppendDFile
(
pHeadf
,
TSDB_COMMIT_BUF
(
pCommih
),
tlen
,
&
offset
)
<
tlen
)
{
tsdbError
(
"vgId:%d failed to write block index part to file %s since %s"
,
TSDB_COMMIT_REPO_ID
(
pCommih
),
TSDB_FILE_FULL_NAME
(
pHeadf
),
tstrerror
(
terrno
));
return
-
1
;
return
-
1
;
}
}
tsdbUpdateDFileMagic
(
pHeadf
,
POINTER_SHIFT
(
TSDB_COMMIT_BUF
(
pCommih
),
tlen
-
sizeof
(
TSCKSUM
)));
pHeadf
->
info
.
offset
=
(
uint32_t
)
offset
;
pHeadf
->
info
.
len
=
tlen
;
return
0
;
return
0
;
}
}
...
@@ -1454,45 +1512,3 @@ int tsdbApplyRtn(STsdbRepo *pRepo) {
...
@@ -1454,45 +1512,3 @@ int tsdbApplyRtn(STsdbRepo *pRepo) {
return
0
;
return
0
;
}
}
static
int
tsdbApplyRtnOnFSet
(
STsdbRepo
*
pRepo
,
SDFileSet
*
pSet
,
SRtn
*
pRtn
)
{
SDiskID
did
;
SDFileSet
nSet
;
STsdbFS
*
pfs
=
REPO_FS
(
pRepo
);
int
level
;
ASSERT
(
pSet
->
fid
>=
pRtn
->
minFid
);
level
=
tsdbGetFidLevel
(
pSet
->
fid
,
pRtn
);
tfsAllocDisk
(
level
,
&
(
did
.
level
),
&
(
did
.
id
));
if
(
did
.
level
==
TFS_UNDECIDED_LEVEL
)
{
terrno
=
TSDB_CODE_TDB_NO_AVAIL_DISK
;
return
-
1
;
}
if
(
did
.
level
>
TSDB_FSET_LEVEL
(
pSet
))
{
// Need to move the FSET to higher level
tsdbInitDFileSet
(
&
nSet
,
did
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
FS_TXN_VERSION
(
pfs
));
if
(
tsdbCopyDFileSet
(
pSet
,
&
nSet
)
<
0
)
{
tsdbError
(
"vgId:%d failed to copy FSET %d from level %d to level %d since %s"
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
TSDB_FSET_LEVEL
(
pSet
),
did
.
level
,
tstrerror
(
terrno
));
return
-
1
;
}
if
(
tsdbUpdateDFileSet
(
pfs
,
&
nSet
)
<
0
)
{
return
-
1
;
}
tsdbInfo
(
"vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d"
,
REPO_ID
(
pRepo
),
pSet
->
fid
,
TSDB_FSET_LEVEL
(
pSet
),
TSDB_FSET_ID
(
pSet
),
did
.
level
,
did
.
id
);
}
else
{
// On a correct level
if
(
tsdbUpdateDFileSet
(
pfs
,
pSet
)
<
0
)
{
return
-
1
;
}
}
return
0
;
}
\ No newline at end of file
src/tsdb/src/tsdbCommitQueue.c
浏览文件 @
1e3ca4c6
...
@@ -26,8 +26,9 @@ typedef struct {
...
@@ -26,8 +26,9 @@ typedef struct {
}
SCommitQueue
;
}
SCommitQueue
;
typedef
struct
{
typedef
struct
{
TSDB_REQ_T
req
;
STsdbRepo
*
pRepo
;
STsdbRepo
*
pRepo
;
}
S
Commit
Req
;
}
SReq
;
static
void
*
tsdbLoopCommit
(
void
*
arg
);
static
void
*
tsdbLoopCommit
(
void
*
arg
);
...
@@ -90,16 +91,17 @@ void tsdbDestroyCommitQueue() {
...
@@ -90,16 +91,17 @@ void tsdbDestroyCommitQueue() {
pthread_mutex_destroy
(
&
(
pQueue
->
lock
));
pthread_mutex_destroy
(
&
(
pQueue
->
lock
));
}
}
int
tsdbScheduleCommit
(
STsdbRepo
*
pRepo
)
{
int
tsdbScheduleCommit
(
STsdbRepo
*
pRepo
,
TSDB_REQ_T
req
)
{
SCommitQueue
*
pQueue
=
&
tsCommitQueue
;
SCommitQueue
*
pQueue
=
&
tsCommitQueue
;
SListNode
*
pNode
=
(
SListNode
*
)
calloc
(
1
,
sizeof
(
SListNode
)
+
sizeof
(
S
Commit
Req
));
SListNode
*
pNode
=
(
SListNode
*
)
calloc
(
1
,
sizeof
(
SListNode
)
+
sizeof
(
SReq
));
if
(
pNode
==
NULL
)
{
if
(
pNode
==
NULL
)
{
terrno
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
terrno
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
return
-
1
;
return
-
1
;
}
}
((
SCommitReq
*
)
pNode
->
data
)
->
pRepo
=
pRepo
;
((
SReq
*
)
pNode
->
data
)
->
req
=
req
;
((
SReq
*
)
pNode
->
data
)
->
pRepo
=
pRepo
;
pthread_mutex_lock
(
&
(
pQueue
->
lock
));
pthread_mutex_lock
(
&
(
pQueue
->
lock
));
...
@@ -154,6 +156,7 @@ static void *tsdbLoopCommit(void *arg) {
...
@@ -154,6 +156,7 @@ static void *tsdbLoopCommit(void *arg) {
SCommitQueue
*
pQueue
=
&
tsCommitQueue
;
SCommitQueue
*
pQueue
=
&
tsCommitQueue
;
SListNode
*
pNode
=
NULL
;
SListNode
*
pNode
=
NULL
;
STsdbRepo
*
pRepo
=
NULL
;
STsdbRepo
*
pRepo
=
NULL
;
TSDB_REQ_T
req
;
while
(
true
)
{
while
(
true
)
{
pthread_mutex_lock
(
&
(
pQueue
->
lock
));
pthread_mutex_lock
(
&
(
pQueue
->
lock
));
...
@@ -174,14 +177,22 @@ static void *tsdbLoopCommit(void *arg) {
...
@@ -174,14 +177,22 @@ static void *tsdbLoopCommit(void *arg) {
pthread_mutex_unlock
(
&
(
pQueue
->
lock
));
pthread_mutex_unlock
(
&
(
pQueue
->
lock
));
pRepo
=
((
SCommitReq
*
)
pNode
->
data
)
->
pRepo
;
req
=
((
SReq
*
)
pNode
->
data
)
->
req
;
pRepo
=
((
SReq
*
)
pNode
->
data
)
->
pRepo
;
// check if need to apply new config
// check if need to apply new config
if
(
pRepo
->
config_changed
)
{
if
(
pRepo
->
config_changed
)
{
tsdbApplyRepoConfig
(
pRepo
);
tsdbApplyRepoConfig
(
pRepo
);
}
}
if
(
req
==
COMMIT_REQ
)
{
tsdbCommitData
(
pRepo
);
tsdbCommitData
(
pRepo
);
}
else
if
(
req
==
COMPACT_REQ
)
{
tsdbCompactImpl
(
pRepo
);
}
else
{
ASSERT
(
0
);
}
listNodeFree
(
pNode
);
listNodeFree
(
pNode
);
}
}
...
...
src/tsdb/src/tsdbCompact.c
浏览文件 @
1e3ca4c6
...
@@ -12,3 +12,11 @@
...
@@ -12,3 +12,11 @@
* You should have received a copy of the GNU Affero General Public License
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
*/
#include "tsdb.h"
#ifndef _TSDB_PLUGINS
int
tsdbCompact
(
STsdbRepo
*
pRepo
)
{
return
0
;
}
void
*
tsdbCompactImpl
(
STsdbRepo
*
pRepo
)
{
return
NULL
;
}
#endif
\ No newline at end of file
src/tsdb/src/tsdbMemTable.c
浏览文件 @
1e3ca4c6
...
@@ -288,7 +288,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
...
@@ -288,7 +288,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
if
(
tsdbLockRepo
(
pRepo
)
<
0
)
return
-
1
;
if
(
tsdbLockRepo
(
pRepo
)
<
0
)
return
-
1
;
pRepo
->
imem
=
pRepo
->
mem
;
pRepo
->
imem
=
pRepo
->
mem
;
pRepo
->
mem
=
NULL
;
pRepo
->
mem
=
NULL
;
tsdbScheduleCommit
(
pRepo
);
tsdbScheduleCommit
(
pRepo
,
COMMIT_REQ
);
if
(
tsdbUnlockRepo
(
pRepo
)
<
0
)
return
-
1
;
if
(
tsdbUnlockRepo
(
pRepo
)
<
0
)
return
-
1
;
return
0
;
return
0
;
...
...
src/tsdb/src/tsdbMeta.c
浏览文件 @
1e3ca4c6
...
@@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
...
@@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
TABLE_CHAR_NAME
(
pMeta
->
tables
[
tid
]),
TABLE_TID
(
pMeta
->
tables
[
tid
]),
TABLE_UID
(
pMeta
->
tables
[
tid
]));
TABLE_CHAR_NAME
(
pMeta
->
tables
[
tid
]),
TABLE_TID
(
pMeta
->
tables
[
tid
]),
TABLE_UID
(
pMeta
->
tables
[
tid
]));
return
0
;
return
0
;
}
else
{
}
else
{
tsdb
Error
(
"vgId:%d table %s at tid %d uid %"
PRIu64
tsdb
Info
(
"vgId:%d table %s at tid %d uid %"
PRIu64
" exists, replace it with new table, this can be not reasonable"
,
" exists, replace it with new table, this can be not reasonable"
,
REPO_ID
(
pRepo
),
TABLE_CHAR_NAME
(
pMeta
->
tables
[
tid
]),
TABLE_TID
(
pMeta
->
tables
[
tid
]),
REPO_ID
(
pRepo
),
TABLE_CHAR_NAME
(
pMeta
->
tables
[
tid
]),
TABLE_TID
(
pMeta
->
tables
[
tid
]),
TABLE_UID
(
pMeta
->
tables
[
tid
]));
TABLE_UID
(
pMeta
->
tables
[
tid
]));
...
@@ -1055,10 +1055,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
...
@@ -1055,10 +1055,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
STable
*
pSTable
=
pTable
->
pSuper
;
STable
*
pSTable
=
pTable
->
pSuper
;
ASSERT
(
pSTable
!=
NULL
);
ASSERT
(
pSTable
!=
NULL
);
STSchema
*
pSchema
=
tsdbGetTableTagSchema
(
pTable
);
char
*
key
=
getTagIndexKey
(
pTable
);
STColumn
*
pCol
=
schemaColAt
(
pSchema
,
DEFAULT_TAG_INDEX_COLUMN
);
char
*
key
=
tdGetKVRowValOfCol
(
pTable
->
tagVal
,
pCol
->
colId
);
SArray
*
res
=
tSkipListGet
(
pSTable
->
pIndex
,
key
);
SArray
*
res
=
tSkipListGet
(
pSTable
->
pIndex
,
key
);
size_t
size
=
taosArrayGetSize
(
res
);
size_t
size
=
taosArrayGetSize
(
res
);
...
...
src/tsdb/src/tsdbRead.c
浏览文件 @
1e3ca4c6
...
@@ -368,20 +368,21 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
...
@@ -368,20 +368,21 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
goto
out_of_memory
;
goto
out_of_memory
;
}
}
assert
(
pCond
!=
NULL
&&
p
Cond
->
numOfCols
>
0
&&
p
MemRef
!=
NULL
);
assert
(
pCond
!=
NULL
&&
pMemRef
!=
NULL
);
if
(
ASCENDING_TRAVERSE
(
pCond
->
order
))
{
if
(
ASCENDING_TRAVERSE
(
pCond
->
order
))
{
assert
(
pQueryHandle
->
window
.
skey
<=
pQueryHandle
->
window
.
ekey
);
assert
(
pQueryHandle
->
window
.
skey
<=
pQueryHandle
->
window
.
ekey
);
}
else
{
}
else
{
assert
(
pQueryHandle
->
window
.
skey
>=
pQueryHandle
->
window
.
ekey
);
assert
(
pQueryHandle
->
window
.
skey
>=
pQueryHandle
->
window
.
ekey
);
}
}
if
(
pCond
->
numOfCols
>
0
)
{
// allocate buffer in order to load data blocks from file
// allocate buffer in order to load data blocks from file
pQueryHandle
->
statis
=
calloc
(
pCond
->
numOfCols
,
sizeof
(
SDataStatis
));
pQueryHandle
->
statis
=
calloc
(
pCond
->
numOfCols
,
sizeof
(
SDataStatis
));
if
(
pQueryHandle
->
statis
==
NULL
)
{
if
(
pQueryHandle
->
statis
==
NULL
)
{
goto
out_of_memory
;
goto
out_of_memory
;
}
}
pQueryHandle
->
pColumns
=
taosArrayInit
(
pCond
->
numOfCols
,
sizeof
(
SColumnInfoData
));
// todo: use list instead of array?
pQueryHandle
->
pColumns
=
taosArrayInit
(
pCond
->
numOfCols
,
sizeof
(
SColumnInfoData
));
// todo: use list instead of array?
if
(
pQueryHandle
->
pColumns
==
NULL
)
{
if
(
pQueryHandle
->
pColumns
==
NULL
)
{
goto
out_of_memory
;
goto
out_of_memory
;
}
}
...
@@ -398,10 +399,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
...
@@ -398,10 +399,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
pQueryHandle
->
statis
[
i
].
colId
=
colInfo
.
info
.
colId
;
pQueryHandle
->
statis
[
i
].
colId
=
colInfo
.
info
.
colId
;
}
}
if
(
pCond
->
numOfCols
>
0
)
{
pQueryHandle
->
defaultLoadColumn
=
getDefaultLoadColumns
(
pQueryHandle
,
true
);
pQueryHandle
->
defaultLoadColumn
=
getDefaultLoadColumns
(
pQueryHandle
,
true
);
}
}
STsdbMeta
*
pMeta
=
tsdbGetMeta
(
tsdb
);
STsdbMeta
*
pMeta
=
tsdbGetMeta
(
tsdb
);
assert
(
pMeta
!=
NULL
);
assert
(
pMeta
!=
NULL
);
...
...
src/tsdb/src/tsdbReadImpl.c
浏览文件 @
1e3ca4c6
...
@@ -258,7 +258,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
...
@@ -258,7 +258,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
for
(
int
i
=
1
;
i
<
pBlock
->
numOfSubBlocks
;
i
++
)
{
for
(
int
i
=
1
;
i
<
pBlock
->
numOfSubBlocks
;
i
++
)
{
iBlock
++
;
iBlock
++
;
if
(
tsdbLoadBlockDataImpl
(
pReadh
,
iBlock
,
pReadh
->
pDCols
[
1
])
<
0
)
return
-
1
;
if
(
tsdbLoadBlockDataImpl
(
pReadh
,
iBlock
,
pReadh
->
pDCols
[
1
])
<
0
)
return
-
1
;
if
(
tdMergeDataCols
(
pReadh
->
pDCols
[
0
],
pReadh
->
pDCols
[
1
],
pReadh
->
pDCols
[
1
]
->
numOfRows
)
<
0
)
return
-
1
;
if
(
tdMergeDataCols
(
pReadh
->
pDCols
[
0
],
pReadh
->
pDCols
[
1
],
pReadh
->
pDCols
[
1
]
->
numOfRows
,
NULL
)
<
0
)
return
-
1
;
}
}
ASSERT
(
pReadh
->
pDCols
[
0
]
->
numOfRows
==
pBlock
->
numOfRows
);
ASSERT
(
pReadh
->
pDCols
[
0
]
->
numOfRows
==
pBlock
->
numOfRows
);
...
@@ -284,7 +284,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo,
...
@@ -284,7 +284,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo,
for
(
int
i
=
1
;
i
<
pBlock
->
numOfSubBlocks
;
i
++
)
{
for
(
int
i
=
1
;
i
<
pBlock
->
numOfSubBlocks
;
i
++
)
{
iBlock
++
;
iBlock
++
;
if
(
tsdbLoadBlockDataColsImpl
(
pReadh
,
iBlock
,
pReadh
->
pDCols
[
1
],
colIds
,
numOfColsIds
)
<
0
)
return
-
1
;
if
(
tsdbLoadBlockDataColsImpl
(
pReadh
,
iBlock
,
pReadh
->
pDCols
[
1
],
colIds
,
numOfColsIds
)
<
0
)
return
-
1
;
if
(
tdMergeDataCols
(
pReadh
->
pDCols
[
0
],
pReadh
->
pDCols
[
1
],
pReadh
->
pDCols
[
1
]
->
numOfRows
)
<
0
)
return
-
1
;
if
(
tdMergeDataCols
(
pReadh
->
pDCols
[
0
],
pReadh
->
pDCols
[
1
],
pReadh
->
pDCols
[
1
]
->
numOfRows
,
NULL
)
<
0
)
return
-
1
;
}
}
ASSERT
(
pReadh
->
pDCols
[
0
]
->
numOfRows
==
pBlock
->
numOfRows
);
ASSERT
(
pReadh
->
pDCols
[
0
]
->
numOfRows
==
pBlock
->
numOfRows
);
...
...
src/util/inc/ttoken.h
浏览文件 @
1e3ca4c6
...
@@ -183,6 +183,7 @@ void taosCleanupKeywordsTable();
...
@@ -183,6 +183,7 @@ void taosCleanupKeywordsTable();
SStrToken
tscReplaceStrToken
(
char
**
str
,
SStrToken
*
token
,
const
char
*
newToken
);
SStrToken
tscReplaceStrToken
(
char
**
str
,
SStrToken
*
token
,
const
char
*
newToken
);
SStrToken
taosTokenDup
(
SStrToken
*
pToken
,
char
*
buf
,
int32_t
len
);
#ifdef __cplusplus
#ifdef __cplusplus
}
}
...
...
src/util/src/terror.c
浏览文件 @
1e3ca4c6
...
@@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu
...
@@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_FLOWCTRL
,
"Database memory is full for waiting commit"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_FLOWCTRL
,
"Database memory is full for waiting commit"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_DROPPING
,
"Database is dropping"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_DROPPING
,
"Database is dropping"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_BALANCING
,
"Database is balancing"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_BALANCING
,
"Database is balancing"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_CLOSING
,
"Database is closing"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NOT_SYNCED
,
"Database suspended"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NOT_SYNCED
,
"Database suspended"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NO_WRITE_AUTH
,
"Database write operation denied"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NO_WRITE_AUTH
,
"Database write operation denied"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_SYNCING
,
"Database is syncing"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_SYNCING
,
"Database is syncing"
)
...
...
src/util/src/ttokenizer.c
浏览文件 @
1e3ca4c6
...
@@ -218,7 +218,8 @@ static SKeyword keywordTable[] = {
...
@@ -218,7 +218,8 @@ static SKeyword keywordTable[] = {
{
"DISTINCT"
,
TK_DISTINCT
},
{
"DISTINCT"
,
TK_DISTINCT
},
{
"PARTITIONS"
,
TK_PARTITIONS
},
{
"PARTITIONS"
,
TK_PARTITIONS
},
{
"TOPIC"
,
TK_TOPIC
},
{
"TOPIC"
,
TK_TOPIC
},
{
"TOPICS"
,
TK_TOPICS
}
{
"TOPICS"
,
TK_TOPICS
},
{
"MODIFY"
,
TK_MODIFY
}
};
};
static
const
char
isIdChar
[]
=
{
static
const
char
isIdChar
[]
=
{
...
@@ -674,3 +675,15 @@ void taosCleanupKeywordsTable() {
...
@@ -674,3 +675,15 @@ void taosCleanupKeywordsTable() {
taosHashCleanup
(
m
);
taosHashCleanup
(
m
);
}
}
}
}
SStrToken
taosTokenDup
(
SStrToken
*
pToken
,
char
*
buf
,
int32_t
len
)
{
assert
(
pToken
!=
NULL
&&
buf
!=
NULL
);
SStrToken
token
=
*
pToken
;
token
.
z
=
buf
;
assert
(
len
>
token
.
n
);
strncpy
(
token
.
z
,
pToken
->
z
,
pToken
->
n
);
token
.
z
[
token
.
n
]
=
0
;
return
token
;
}
src/vnode/src/vnodeMgmt.c
浏览文件 @
1e3ca4c6
...
@@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) {
...
@@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) {
}
}
void
*
vnodeAcquire
(
int32_t
vgId
)
{
void
*
vnodeAcquire
(
int32_t
vgId
)
{
SVnodeObj
*
*
p
pVnode
=
NULL
;
SVnodeObj
*
pVnode
=
NULL
;
if
(
tsVnodesHash
!=
NULL
)
{
if
(
tsVnodesHash
!=
NULL
)
{
ppVnode
=
taosHashGetClone
(
tsVnodesHash
,
&
vgId
,
sizeof
(
int32_t
),
vnodeIncRef
,
NULL
,
sizeof
(
void
*
));
taosHashGetClone
(
tsVnodesHash
,
&
vgId
,
sizeof
(
int32_t
),
vnodeIncRef
,
&
pVnode
,
sizeof
(
void
*
));
}
}
if
(
p
pVnode
==
NULL
||
*
pp
Vnode
==
NULL
)
{
if
(
pVnode
==
NULL
)
{
terrno
=
TSDB_CODE_VND_INVALID_VGROUP_ID
;
terrno
=
TSDB_CODE_VND_INVALID_VGROUP_ID
;
vDebug
(
"vgId:%d, not exist"
,
vgId
);
vDebug
(
"vgId:%d, not exist"
,
vgId
);
return
NULL
;
return
NULL
;
}
}
return
*
p
pVnode
;
return
pVnode
;
}
}
void
vnodeRelease
(
void
*
vparam
)
{
void
vnodeRelease
(
void
*
vparam
)
{
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
1e3ca4c6
...
@@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
...
@@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
}
}
int32_t
vnodeWriteToWQueue
(
void
*
vparam
,
void
*
wparam
,
int32_t
qtype
,
void
*
rparam
)
{
int32_t
vnodeWriteToWQueue
(
void
*
vparam
,
void
*
wparam
,
int32_t
qtype
,
void
*
rparam
)
{
SVnodeObj
*
pVnode
=
vparam
;
if
(
qtype
==
TAOS_QTYPE_RPC
)
{
if
(
!
vnodeInReadyStatus
(
pVnode
))
{
return
TSDB_CODE_APP_NOT_READY
;
// it may be in deleting or closing state
}
if
(
pVnode
->
role
!=
TAOS_SYNC_ROLE_MASTER
)
{
return
TSDB_CODE_APP_NOT_READY
;
}
}
SVWriteMsg
*
pWrite
=
vnodeBuildVWriteMsg
(
vparam
,
wparam
,
qtype
,
rparam
);
SVWriteMsg
*
pWrite
=
vnodeBuildVWriteMsg
(
vparam
,
wparam
,
qtype
,
rparam
);
if
(
pWrite
==
NULL
)
{
if
(
pWrite
==
NULL
)
{
assert
(
terrno
!=
0
);
assert
(
terrno
!=
0
);
...
...
tests/pytest/alter/alter_cacheLastRow.py
0 → 100644
浏览文件 @
1e3ca4c6
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
tdDnodes
from
datetime
import
datetime
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
query
(
'show databases'
)
tdSql
.
checkData
(
0
,
15
,
0
)
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
#write 5M rows into db, then restart to force the data move into disk.
#create 500 tables
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y "
%
binPath
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
'use db'
)
#prepare to query 500 tables last_row()
tableName
=
[]
for
i
in
range
(
500
):
tableName
.
append
(
f
"stb_
{
i
}
"
)
tdSql
.
execute
(
'use db'
)
lastRow_Off_start
=
datetime
.
now
()
slow
=
0
#count time where lastRow on is slower
for
i
in
range
(
5
):
#switch lastRow to off and check
tdSql
.
execute
(
'alter database db cachelast 0'
)
tdSql
.
query
(
'show databases'
)
tdSql
.
checkData
(
0
,
15
,
0
)
#run last_row(*) query 500 times
for
i
in
range
(
500
):
tdSql
.
execute
(
f
'SELECT LAST_ROW(*) FROM
{
tableName
[
i
]
}
'
)
lastRow_Off_end
=
datetime
.
now
()
tdLog
.
debug
(
f
'time used:
{
lastRow_Off_end
-
lastRow_Off_start
}
'
)
#switch lastRow to on and check
tdSql
.
execute
(
'alter database db cachelast 1'
)
tdSql
.
query
(
'show databases'
)
tdSql
.
checkData
(
0
,
15
,
1
)
#run last_row(*) query 500 times
tdSql
.
execute
(
'use db'
)
lastRow_On_start
=
datetime
.
now
()
for
i
in
range
(
500
):
tdSql
.
execute
(
f
'SELECT LAST_ROW(*) FROM
{
tableName
[
i
]
}
'
)
lastRow_On_end
=
datetime
.
now
()
tdLog
.
debug
(
f
'time used:
{
lastRow_On_end
-
lastRow_On_start
}
'
)
#check which one used more time
if
(
lastRow_Off_end
-
lastRow_Off_start
>
lastRow_On_end
-
lastRow_On_start
):
pass
else
:
slow
+=
1
tdLog
.
debug
(
slow
)
if
slow
>
1
:
#tolerance for the first time
tdLog
.
exit
(
'lastRow hot alter failed'
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/crash_gen/crash_gen_main.py
浏览文件 @
1e3ca4c6
...
@@ -37,6 +37,7 @@ import requests
...
@@ -37,6 +37,7 @@ import requests
import
gc
import
gc
import
taos
import
taos
from
.shared.types
import
TdColumns
,
TdTags
from
.shared.types
import
TdColumns
,
TdTags
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
...
@@ -160,6 +161,7 @@ class WorkerThread:
...
@@ -160,6 +161,7 @@ class WorkerThread:
Logging
.
debug
(
"[TRD] Thread Coordinator not running any more, worker thread now stopping..."
)
Logging
.
debug
(
"[TRD] Thread Coordinator not running any more, worker thread now stopping..."
)
break
break
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
try
:
try
:
if
(
Config
.
getConfig
().
per_thread_db_connection
):
# most likely TRUE
if
(
Config
.
getConfig
().
per_thread_db_connection
):
# most likely TRUE
...
@@ -1362,9 +1364,12 @@ class Task():
...
@@ -1362,9 +1364,12 @@ class Task():
Progress
.
emit
(
Progress
.
ACCEPTABLE_ERROR
)
Progress
.
emit
(
Progress
.
ACCEPTABLE_ERROR
)
self
.
_err
=
err
self
.
_err
=
err
else
:
# not an acceptable error
else
:
# not an acceptable error
errMsg
=
"[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}"
.
format
(
shortTid
=
threading
.
get_ident
()
%
10000
errMsg
=
"[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}"
.
format
(
self
.
__class__
.
__name__
,
self
.
__class__
.
__name__
,
errno2
,
err
,
wt
.
getDbConn
().
getLastSql
())
errno2
,
shortTid
,
err
,
wt
.
getDbConn
().
getLastSql
())
self
.
logDebug
(
errMsg
)
self
.
logDebug
(
errMsg
)
if
Config
.
getConfig
().
debug
:
if
Config
.
getConfig
().
debug
:
# raise # so that we see full stack
# raise # so that we see full stack
...
@@ -1411,11 +1416,15 @@ class Task():
...
@@ -1411,11 +1416,15 @@ class Task():
def
lockTable
(
self
,
ftName
):
# full table name
def
lockTable
(
self
,
ftName
):
# full table name
# print(" <<" + ftName + '_', end="", flush=True)
# print(" <<" + ftName + '_', end="", flush=True)
with
Task
.
_lock
:
with
Task
.
_lock
:
# SHORT lock! so we only protect lock creation
if
not
ftName
in
Task
.
_tableLocks
:
if
not
ftName
in
Task
.
_tableLocks
:
# Create new lock and add to list, if needed
Task
.
_tableLocks
[
ftName
]
=
threading
.
Lock
()
Task
.
_tableLocks
[
ftName
]
=
threading
.
Lock
()
Task
.
_tableLocks
[
ftName
].
acquire
()
# No lock protection, anybody can do this any time
lock
=
Task
.
_tableLocks
[
ftName
]
# Logging.info("Acquiring lock: {}, {}".format(ftName, lock))
lock
.
acquire
()
# Logging.info("Acquiring lock successful: {}".format(lock))
def
unlockTable
(
self
,
ftName
):
def
unlockTable
(
self
,
ftName
):
# print('_' + ftName + ">> ", end="", flush=True)
# print('_' + ftName + ">> ", end="", flush=True)
...
@@ -1425,7 +1434,13 @@ class Task():
...
@@ -1425,7 +1434,13 @@ class Task():
lock
=
Task
.
_tableLocks
[
ftName
]
lock
=
Task
.
_tableLocks
[
ftName
]
if
not
lock
.
locked
():
if
not
lock
.
locked
():
raise
RuntimeError
(
"Corrupte state, already unlocked"
)
raise
RuntimeError
(
"Corrupte state, already unlocked"
)
# Important note, we want to protect unlocking under the task level
# locking, because we don't want the lock to be deleted (maybe in the futur)
# while we unlock it
# Logging.info("Releasing lock: {}".format(lock))
lock
.
release
()
lock
.
release
()
# Logging.info("Releasing lock successful: {}".format(lock))
class
ExecutionStats
:
class
ExecutionStats
:
...
@@ -1696,6 +1711,11 @@ class TdSuperTable:
...
@@ -1696,6 +1711,11 @@ class TdSuperTable:
return
dbc
.
query
(
"SELECT * FROM {}.{}"
.
format
(
self
.
_dbName
,
self
.
_stName
))
>
0
return
dbc
.
query
(
"SELECT * FROM {}.{}"
.
format
(
self
.
_dbName
,
self
.
_stName
))
>
0
def
ensureRegTable
(
self
,
task
:
Optional
[
Task
],
dbc
:
DbConn
,
regTableName
:
str
):
def
ensureRegTable
(
self
,
task
:
Optional
[
Task
],
dbc
:
DbConn
,
regTableName
:
str
):
'''
Make sure a regular table exists for this super table, creating it if necessary.
If there is an associated "Task" that wants to do this, "lock" this table so that
others don't access it while we create it.
'''
dbName
=
self
.
_dbName
dbName
=
self
.
_dbName
sql
=
"select tbname from {}.{} where tbname in ('{}')"
.
format
(
dbName
,
self
.
_stName
,
regTableName
)
sql
=
"select tbname from {}.{} where tbname in ('{}')"
.
format
(
dbName
,
self
.
_stName
,
regTableName
)
if
dbc
.
query
(
sql
)
>=
1
:
# reg table exists already
if
dbc
.
query
(
sql
)
>=
1
:
# reg table exists already
...
@@ -1703,18 +1723,24 @@ class TdSuperTable:
...
@@ -1703,18 +1723,24 @@ class TdSuperTable:
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
fullTableName
=
dbName
+
'.'
+
regTableName
fullTableName
=
dbName
+
'.'
+
regTableName
if
task
is
not
None
:
# TODO: what happens if we don't lock the table
if
task
is
not
None
:
# Somethime thie operation is requested on behalf of a "task"
task
.
lockTable
(
fullTableName
)
# Logging.info("Locking table for creation: {}".format(fullTableName))
task
.
lockTable
(
fullTableName
)
# in which case we'll lock this table to ensure serialized access
# Logging.info("Table locked for creation".format(fullTableName))
Progress
.
emit
(
Progress
.
CREATE_TABLE_ATTEMPT
)
# ATTEMPT to create a new table
Progress
.
emit
(
Progress
.
CREATE_TABLE_ATTEMPT
)
# ATTEMPT to create a new table
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
try
:
try
:
sql
=
"CREATE TABLE {} USING {}.{} tags ({})"
.
format
(
sql
=
"CREATE TABLE {} USING {}.{} tags ({})"
.
format
(
fullTableName
,
dbName
,
self
.
_stName
,
self
.
_getTagStrForSql
(
dbc
)
fullTableName
,
dbName
,
self
.
_stName
,
self
.
_getTagStrForSql
(
dbc
)
)
)
# Logging.info("Creating regular with SQL: {}".format(sql))
dbc
.
execute
(
sql
)
dbc
.
execute
(
sql
)
# Logging.info("Regular table created: {}".format(sql))
finally
:
finally
:
if
task
is
not
None
:
if
task
is
not
None
:
# Logging.info("Unlocking table after creation: {}".format(fullTableName))
task
.
unlockTable
(
fullTableName
)
# no matter what
task
.
unlockTable
(
fullTableName
)
# no matter what
# Logging.info("Table unlocked after creation: {}".format(fullTableName))
def
_getTagStrForSql
(
self
,
dbc
)
:
def
_getTagStrForSql
(
self
,
dbc
)
:
tags
=
self
.
_getTags
(
dbc
)
tags
=
self
.
_getTags
(
dbc
)
...
@@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask):
...
@@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask):
def
canBeginFrom
(
cls
,
state
:
AnyState
):
def
canBeginFrom
(
cls
,
state
:
AnyState
):
return
state
.
canAddData
()
return
state
.
canAddData
()
def
_lockTableIfNeeded
(
self
,
fullTableName
,
extraMsg
=
''
):
if
Config
.
getConfig
().
verify_data
:
# Logging.info("Locking table: {}".format(fullTableName))
self
.
lockTable
(
fullTableName
)
# Logging.info("Table locked {}: {}".format(extraMsg, fullTableName))
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
else
:
# Logging.info("Skipping locking table")
pass
def
_unlockTableIfNeeded
(
self
,
fullTableName
):
if
Config
.
getConfig
().
verify_data
:
# Logging.info("Unlocking table: {}".format(fullTableName))
self
.
unlockTable
(
fullTableName
)
# Logging.info("Table unlocked: {}".format(fullTableName))
else
:
pass
# Logging.info("Skipping unlocking table")
def
_addDataInBatch
(
self
,
db
,
dbc
,
regTableName
,
te
:
TaskExecutor
):
def
_addDataInBatch
(
self
,
db
,
dbc
,
regTableName
,
te
:
TaskExecutor
):
numRecords
=
self
.
LARGE_NUMBER_OF_RECORDS
if
Config
.
getConfig
().
larger_data
else
self
.
SMALL_NUMBER_OF_RECORDS
numRecords
=
self
.
LARGE_NUMBER_OF_RECORDS
if
Config
.
getConfig
().
larger_data
else
self
.
SMALL_NUMBER_OF_RECORDS
fullTableName
=
db
.
getName
()
+
'.'
+
regTableName
fullTableName
=
db
.
getName
()
+
'.'
+
regTableName
self
.
_lockTableIfNeeded
(
fullTableName
,
'batch'
)
sql
=
"INSERT INTO {} VALUES "
.
format
(
fullTableName
)
sql
=
"INSERT INTO {} VALUES "
.
format
(
fullTableName
)
for
j
in
range
(
numRecords
):
# number of records per table
for
j
in
range
(
numRecords
):
# number of records per table
...
@@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask):
...
@@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask):
nextTick
=
db
.
getNextTick
()
nextTick
=
db
.
getNextTick
()
nextColor
=
db
.
getNextColor
()
nextColor
=
db
.
getNextColor
()
sql
+=
"('{}', {}, '{}');"
.
format
(
nextTick
,
nextInt
,
nextColor
)
sql
+=
"('{}', {}, '{}');"
.
format
(
nextTick
,
nextInt
,
nextColor
)
# Logging.info("Adding data in batch: {}".format(sql))
try
:
dbc
.
execute
(
sql
)
dbc
.
execute
(
sql
)
finally
:
# Logging.info("Data added in batch: {}".format(sql))
self
.
_unlockTableIfNeeded
(
fullTableName
)
def
_addData
(
self
,
db
:
Database
,
dbc
,
regTableName
,
te
:
TaskExecutor
):
# implied: NOT in batches
def
_addData
(
self
,
db
:
Database
,
dbc
,
regTableName
,
te
:
TaskExecutor
):
# implied: NOT in batches
numRecords
=
self
.
LARGE_NUMBER_OF_RECORDS
if
Config
.
getConfig
().
larger_data
else
self
.
SMALL_NUMBER_OF_RECORDS
numRecords
=
self
.
LARGE_NUMBER_OF_RECORDS
if
Config
.
getConfig
().
larger_data
else
self
.
SMALL_NUMBER_OF_RECORDS
for
j
in
range
(
numRecords
):
# number of records per table
for
j
in
range
(
numRecords
):
# number of records per table
nextInt
=
db
.
getNextInt
()
intToWrite
=
db
.
getNextInt
()
nextTick
=
db
.
getNextTick
()
nextTick
=
db
.
getNextTick
()
nextColor
=
db
.
getNextColor
()
nextColor
=
db
.
getNextColor
()
if
Config
.
getConfig
().
record_ops
:
if
Config
.
getConfig
().
record_ops
:
self
.
prepToRecordOps
()
self
.
prepToRecordOps
()
if
self
.
fAddLogReady
is
None
:
if
self
.
fAddLogReady
is
None
:
raise
CrashGenError
(
"Unexpected empty fAddLogReady"
)
raise
CrashGenError
(
"Unexpected empty fAddLogReady"
)
self
.
fAddLogReady
.
write
(
"Ready to write {} to {}
\n
"
.
format
(
nextInt
,
regTableName
))
self
.
fAddLogReady
.
write
(
"Ready to write {} to {}
\n
"
.
format
(
intToWrite
,
regTableName
))
self
.
fAddLogReady
.
flush
()
self
.
fAddLogReady
.
flush
()
os
.
fsync
(
self
.
fAddLogReady
.
fileno
())
os
.
fsync
(
self
.
fAddLogReady
.
fileno
())
# TODO: too ugly trying to lock the table reliably, refactor...
# TODO: too ugly trying to lock the table reliably, refactor...
fullTableName
=
db
.
getName
()
+
'.'
+
regTableName
fullTableName
=
db
.
getName
()
+
'.'
+
regTableName
if
Config
.
getConfig
().
verify_data
:
self
.
_lockTableIfNeeded
(
fullTableName
)
# so that we are verify read-back. TODO: deal with exceptions before unlock
self
.
lockTable
(
fullTableName
)
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
try
:
try
:
sql
=
"INSERT INTO {} VALUES ('{}', {}, '{}');"
.
format
(
# removed: tags ('{}', {})
sql
=
"INSERT INTO {} VALUES ('{}', {}, '{}');"
.
format
(
# removed: tags ('{}', {})
fullTableName
,
fullTableName
,
# ds.getFixedSuperTableName(),
# ds.getFixedSuperTableName(),
# ds.getNextBinary(), ds.getNextFloat(),
# ds.getNextBinary(), ds.getNextFloat(),
nextTick
,
nextInt
,
nextColor
)
nextTick
,
intToWrite
,
nextColor
)
# Logging.info("Adding data: {}".format(sql))
dbc
.
execute
(
sql
)
dbc
.
execute
(
sql
)
# Logging.info("Data added: {}".format(sql))
intWrote
=
intToWrite
# Quick hack, attach an update statement here. TODO: create an "update" task
# Quick hack, attach an update statement here. TODO: create an "update" task
if
(
not
Config
.
getConfig
().
use_shadow_db
)
and
Dice
.
throw
(
5
)
==
0
:
# 1 in N chance, plus not using shaddow DB
if
(
not
Config
.
getConfig
().
use_shadow_db
)
and
Dice
.
throw
(
5
)
==
0
:
# 1 in N chance, plus not using shaddow DB
nextInt
=
db
.
getNextInt
()
intToUpdate
=
db
.
getNextInt
()
# Updated, but should not succeed
nextColor
=
db
.
getNextColor
()
nextColor
=
db
.
getNextColor
()
sql
=
"INSERt INTO {} VALUES ('{}', {}, '{}');"
.
format
(
# "INSERt" means "update" here
sql
=
"INSERt INTO {} VALUES ('{}', {}, '{}');"
.
format
(
# "INSERt" means "update" here
fullTableName
,
fullTableName
,
nextTick
,
nextInt
,
nextColor
)
nextTick
,
intToUpdate
,
nextColor
)
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
dbc
.
execute
(
sql
)
dbc
.
execute
(
sql
)
intWrote
=
intToUpdate
# We updated, seems TDengine non-cluster accepts this.
except
:
# Any exception at all
except
:
# Any exception at all
if
Config
.
getConfig
().
verify_data
:
self
.
_unlockTableIfNeeded
(
fullTableName
)
self
.
unlockTable
(
fullTableName
)
raise
raise
# Now read it back and verify, we might encounter an error if table is dropped
# Now read it back and verify, we might encounter an error if table is dropped
...
@@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask):
...
@@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask):
try
:
try
:
readBack
=
dbc
.
queryScalar
(
"SELECT speed from {}.{} WHERE ts='{}'"
.
readBack
=
dbc
.
queryScalar
(
"SELECT speed from {}.{} WHERE ts='{}'"
.
format
(
db
.
getName
(),
regTableName
,
nextTick
))
format
(
db
.
getName
(),
regTableName
,
nextTick
))
if
readBack
!=
nextInt
:
if
readBack
!=
intWrote
:
raise
taos
.
error
.
ProgrammingError
(
raise
taos
.
error
.
ProgrammingError
(
"Failed to read back same data, wrote: {}, read: {}"
"Failed to read back same data, wrote: {}, read: {}"
.
format
(
nextInt
,
readBack
),
0x999
)
.
format
(
intWrote
,
readBack
),
0x999
)
except
taos
.
error
.
ProgrammingError
as
err
:
except
taos
.
error
.
ProgrammingError
as
err
:
errno
=
Helper
.
convertErrno
(
err
.
errno
)
errno
=
Helper
.
convertErrno
(
err
.
errno
)
if
errno
in
[
CrashGenError
.
INVALID_EMPTY_RESULT
,
CrashGenError
.
INVALID_MULTIPLE_RESULT
]
:
# not a single
result
if
errno
==
CrashGenError
.
INVALID_EMPTY_RESULT
:
# empty
result
raise
taos
.
error
.
ProgrammingError
(
raise
taos
.
error
.
ProgrammingError
(
"Failed to read back same data for tick: {}, wrote: {}, read: {}"
"Failed to read back same data for tick: {}, wrote: {}, read: EMPTY"
.
format
(
nextTick
,
nextInt
,
"Empty Result"
if
errno
==
CrashGenError
.
INVALID_EMPTY_RESULT
else
"Multiple Result"
),
.
format
(
nextTick
,
intWrote
),
errno
)
elif
errno
==
CrashGenError
.
INVALID_MULTIPLE_RESULT
:
# multiple results
raise
taos
.
error
.
ProgrammingError
(
"Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS"
.
format
(
nextTick
,
intWrote
),
errno
)
errno
)
elif
errno
in
[
0x218
,
0x362
]:
# table doesn't exist
elif
errno
in
[
0x218
,
0x362
]:
# table doesn't exist
# do nothing
# do nothing
dummy
=
0
pass
else
:
else
:
# Re-throw otherwise
# Re-throw otherwise
raise
raise
finally
:
finally
:
self
.
unlockTable
(
fullTableName
)
# Unlock the table no matter what
self
.
_unlockTableIfNeeded
(
fullTableName
)
# Quite ugly, refactor lock/unlock
# Done with read-back verification, unlock the table now
else
:
self
.
_unlockTableIfNeeded
(
fullTableName
)
# Successfully wrote the data into the DB, let's record it somehow
# Successfully wrote the data into the DB, let's record it somehow
te
.
recordDataMark
(
nextInt
)
te
.
recordDataMark
(
intWrote
)
if
Config
.
getConfig
().
record_ops
:
if
Config
.
getConfig
().
record_ops
:
if
self
.
fAddLogDone
is
None
:
if
self
.
fAddLogDone
is
None
:
raise
CrashGenError
(
"Unexpected empty fAddLogDone"
)
raise
CrashGenError
(
"Unexpected empty fAddLogDone"
)
self
.
fAddLogDone
.
write
(
"Wrote {} to {}
\n
"
.
format
(
nextInt
,
regTableName
))
self
.
fAddLogDone
.
write
(
"Wrote {} to {}
\n
"
.
format
(
intWrote
,
regTableName
))
self
.
fAddLogDone
.
flush
()
self
.
fAddLogDone
.
flush
()
os
.
fsync
(
self
.
fAddLogDone
.
fileno
())
os
.
fsync
(
self
.
fAddLogDone
.
fileno
())
...
@@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask):
...
@@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask):
class
ThreadStacks
:
# stack info for all threads
class
ThreadStacks
:
# stack info for all threads
def
__init__
(
self
):
def
__init__
(
self
):
self
.
_allStacks
=
{}
self
.
_allStacks
=
{}
allFrames
=
sys
.
_current_frames
()
allFrames
=
sys
.
_current_frames
()
# All current stack frames
for
th
in
threading
.
enumerate
():
for
th
in
threading
.
enumerate
():
# For each thread
if
th
.
ident
is
None
:
if
th
.
ident
is
None
:
continue
continue
stack
=
traceback
.
extract_stack
(
allFrames
[
th
.
ident
])
stack
=
traceback
.
extract_stack
(
allFrames
[
th
.
ident
])
# Get stack for a thread
self
.
_allStacks
[
th
.
native_id
]
=
stack
shortTid
=
th
.
ident
%
10000
self
.
_allStacks
[
shortTid
]
=
stack
# Was using th.native_id
def
print
(
self
,
filteredEndName
=
None
,
filterInternal
=
False
):
def
print
(
self
,
filteredEndName
=
None
,
filterInternal
=
False
):
for
t
hNid
,
stack
in
self
.
_allStacks
.
items
():
# for each thread, stack frames top to bottom
for
t
Ident
,
stack
in
self
.
_allStacks
.
items
():
# for each thread, stack frames top to bottom
lastFrame
=
stack
[
-
1
]
lastFrame
=
stack
[
-
1
]
if
filteredEndName
:
# we need to filter out stacks that match this name
if
filteredEndName
:
# we need to filter out stacks that match this name
if
lastFrame
.
name
==
filteredEndName
:
# end did not match
if
lastFrame
.
name
==
filteredEndName
:
# end did not match
...
@@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads
...
@@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads
'__init__'
]:
# the thread that extracted the stack
'__init__'
]:
# the thread that extracted the stack
continue
# ignore
continue
# ignore
# Now print
# Now print
print
(
"
\n
<----- Thread Info for LWP/ID: {} (most recent call last) <-----"
.
format
(
t
hNid
))
print
(
"
\n
<----- Thread Info for LWP/ID: {} (most recent call last) <-----"
.
format
(
t
Ident
))
stackFrame
=
0
stackFrame
=
0
for
frame
in
stack
:
# was using: reversed(stack)
for
frame
in
stack
:
# was using: reversed(stack)
# print(frame)
# print(frame)
...
@@ -2376,7 +2441,7 @@ class MainExec:
...
@@ -2376,7 +2441,7 @@ class MainExec:
action
=
'store'
,
action
=
'store'
,
default
=
0
,
default
=
0
,
type
=
int
,
type
=
int
,
help
=
'
Maximum number of DBs to keep
, set to disable dropping DB. (default: 0)'
)
help
=
'
Number of DBs to use
, set to disable dropping DB. (default: 0)'
)
parser
.
add_argument
(
parser
.
add_argument
(
'-c'
,
'-c'
,
'--connector-type'
,
'--connector-type'
,
...
...
tests/pytest/crash_gen/service_manager.py
浏览文件 @
1e3ca4c6
...
@@ -179,7 +179,7 @@ quorum 2
...
@@ -179,7 +179,7 @@ quorum 2
def
getServiceCmdLine
(
self
):
# to start the instance
def
getServiceCmdLine
(
self
):
# to start the instance
if
Config
.
getConfig
().
track_memory_leaks
:
if
Config
.
getConfig
().
track_memory_leaks
:
Logging
.
info
(
"Invoking VALGRIND on service..."
)
Logging
.
info
(
"Invoking VALGRIND on service..."
)
return
[
'exec
/usr/bin/
valgrind'
,
'--leak-check=yes'
,
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
return
[
'exec valgrind'
,
'--leak-check=yes'
,
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
else
:
else
:
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
return
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
return
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
...
@@ -310,7 +310,7 @@ class TdeSubProcess:
...
@@ -310,7 +310,7 @@ class TdeSubProcess:
# print("Starting TDengine with env: ", myEnv.items())
# print("Starting TDengine with env: ", myEnv.items())
print
(
"Starting TDengine: {}"
.
format
(
cmdLine
))
print
(
"Starting TDengine: {}"
.
format
(
cmdLine
))
ret
urn
Popen
(
ret
=
Popen
(
' '
.
join
(
cmdLine
),
# ' '.join(cmdLine) if useShell else cmdLine,
' '
.
join
(
cmdLine
),
# ' '.join(cmdLine) if useShell else cmdLine,
shell
=
True
,
# Always use shell, since we need to pass ENV vars
shell
=
True
,
# Always use shell, since we need to pass ENV vars
stdout
=
PIPE
,
stdout
=
PIPE
,
...
@@ -318,6 +318,10 @@ class TdeSubProcess:
...
@@ -318,6 +318,10 @@ class TdeSubProcess:
close_fds
=
ON_POSIX
,
close_fds
=
ON_POSIX
,
env
=
myEnv
env
=
myEnv
)
# had text=True, which interferred with reading EOF
)
# had text=True, which interferred with reading EOF
time
.
sleep
(
0.01
)
# very brief wait, then let's check if sub process started successfully.
if
ret
.
poll
():
raise
CrashGenError
(
"Sub process failed to start with command line: {}"
.
format
(
cmdLine
))
return
ret
STOP_SIGNAL
=
signal
.
SIGINT
# signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
STOP_SIGNAL
=
signal
.
SIGINT
# signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
SIG_KILL_RETCODE
=
137
# ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
SIG_KILL_RETCODE
=
137
# ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
...
@@ -614,7 +618,7 @@ class ServiceManager:
...
@@ -614,7 +618,7 @@ class ServiceManager:
# Find if there's already a taosd service, and then kill it
# Find if there's already a taosd service, and then kill it
for
proc
in
psutil
.
process_iter
():
for
proc
in
psutil
.
process_iter
():
if
proc
.
name
()
==
'taosd'
:
if
proc
.
name
()
==
'taosd'
or
proc
.
name
()
==
'memcheck-amd64-'
:
# Regular or under Valgrind
Logging
.
info
(
"Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt"
)
Logging
.
info
(
"Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt"
)
time
.
sleep
(
2.0
)
time
.
sleep
(
2.0
)
proc
.
kill
()
proc
.
kill
()
...
...
tests/pytest/crash_gen/shared/misc.py
浏览文件 @
1e3ca4c6
...
@@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter):
...
@@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter):
class
MyLoggingAdapter
(
logging
.
LoggerAdapter
):
class
MyLoggingAdapter
(
logging
.
LoggerAdapter
):
def
process
(
self
,
msg
,
kwargs
):
def
process
(
self
,
msg
,
kwargs
):
return
"[{:04d}] {}"
.
format
(
threading
.
get_ident
()
%
10000
,
msg
),
kwargs
shortTid
=
threading
.
get_ident
()
%
10000
return
"[{:04d}] {}"
.
format
(
shortTid
,
msg
),
kwargs
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
...
...
tests/pytest/fulltest.sh
浏览文件 @
1e3ca4c6
...
@@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py
...
@@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py
python3 ./test.py
-f
table/column_num.py
python3 ./test.py
-f
table/column_num.py
python3 ./test.py
-f
table/db_table.py
python3 ./test.py
-f
table/db_table.py
python3 ./test.py
-f
table/create_sensitive.py
python3 ./test.py
-f
table/create_sensitive.py
#
python3 ./test.py -f table/tablename-boundary.py
python3 ./test.py
-f
table/tablename-boundary.py
python3 ./test.py
-f
table/max_table_length.py
python3 ./test.py
-f
table/max_table_length.py
python3 ./test.py
-f
table/alter_column.py
python3 ./test.py
-f
table/alter_column.py
python3 ./test.py
-f
table/boundary.py
python3 ./test.py
-f
table/boundary.py
...
@@ -314,6 +314,8 @@ python3 ./test.py -f query/last_row_cache.py
...
@@ -314,6 +314,8 @@ python3 ./test.py -f query/last_row_cache.py
python3 ./test.py
-f
account/account_create.py
python3 ./test.py
-f
account/account_create.py
python3 ./test.py
-f
alter/alter_table.py
python3 ./test.py
-f
alter/alter_table.py
python3 ./test.py
-f
query/queryGroupbySort.py
python3 ./test.py
-f
query/queryGroupbySort.py
python3 ./test.py
-f
functions/function_session.py
python3 ./test.py
-f
functions/function_stateWindow.py
python3 ./test.py
-f
insert/unsignedInt.py
python3 ./test.py
-f
insert/unsignedInt.py
python3 ./test.py
-f
insert/unsignedBigint.py
python3 ./test.py
-f
insert/unsignedBigint.py
...
@@ -334,6 +336,7 @@ python3 ./test.py -f tag_lite/alter_tag.py
...
@@ -334,6 +336,7 @@ python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 ./test.py
-f
tag_lite/drop_auto_create.py
python3 test.py
-f
insert/insert_before_use_db.py
python3 test.py
-f
insert/insert_before_use_db.py
python3 test.py
-f
alter/alter_cacheLastRow.py
#======================p4-end===============
#======================p4-end===============
tests/pytest/functions/function_session.py
0 → 100644
浏览文件 @
1e3ca4c6
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
#import numpy as np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
'''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))'''
)
tdSql
.
execute
(
"create table test1 using test tags('beijing')"
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
# operation not allowed on super table
tdSql
.
error
(
"select count(*) from test session(ts, 1s)"
)
# operation not allowde on col pro
tdSql
.
error
(
"select * from test1 session(ts, 1s)"
)
# operation not allowed on col except primary ts
tdSql
.
error
(
"select * from test1 session(col1, 1s)"
)
tdSql
.
query
(
"select count(*) from test1 session(ts, 1s)"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
1
,
10
)
# append more data
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
2000
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
tdSql
.
query
(
"select count(*) from test1 session(ts, 1s)"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
1
,
10
)
tdSql
.
checkData
(
1
,
1
,
1
)
tdSql
.
query
(
"select count(*) from test1 session(ts, 1m)"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
1
,
11
)
tdSql
.
query
(
"select first(col1) from test1 session(ts, 1s)"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
checkData
(
1
,
1
,
1
)
tdSql
.
query
(
"select first(col1), last(col2) from test1 session(ts, 1s)"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
checkData
(
0
,
2
,
10
)
tdSql
.
checkData
(
1
,
1
,
1
)
tdSql
.
checkData
(
1
,
1
,
1
)
# add more function
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/functions/function_stateWindow.py
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/insert/nchar.py
浏览文件 @
1e3ca4c6
...
@@ -37,6 +37,10 @@ class TDTestCase:
...
@@ -37,6 +37,10 @@ class TDTestCase:
tdSql
.
error
(
"insert into tb values (now, 'taosdata001')"
)
tdSql
.
error
(
"insert into tb values (now, 'taosdata001')"
)
tdSql
.
error
(
"insert into tb(now, 😀)"
)
tdSql
.
query
(
"select * from tb"
)
tdSql
.
checkRows
(
2
)
def
stop
(
self
):
def
stop
(
self
):
tdSql
.
close
()
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/pytest/manualTest/manual_alter_block.py
0 → 100644
浏览文件 @
1e3ca4c6
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
tdDnodes
##TODO: auto test version is currently unsupported, need to come up with
# an auto test version in the future
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
tdSql
.
prepare
()
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
#alter cache block to 3, then check alter
tdSql
.
execute
(
'alter database db blocks 3'
)
tdSql
.
query
(
'show databases'
)
tdSql
.
checkData
(
0
,
9
,
3
)
#run taosdemo to occupy all cache, need to manually check memory consumption
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json"
%
binPath
)
input
(
"please check memory usage for taosd. After checking, press enter"
)
#alter cache block to 8, then check alter
tdSql
.
execute
(
'alter database db blocks 8'
)
tdSql
.
query
(
'show databases'
)
tdSql
.
checkData
(
0
,
9
,
8
)
#run taosdemo to occupy all cache, need to manually check memory consumption
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/manual_block2.json"
%
binPath
)
input
(
"please check memory usage for taosd. After checking, press enter"
)
##expected result the peak memory consumption should increase by around 80MB = 5 blocks of cache
##test results
#2021/06/02 before:2621700K after: 2703640K memory usage increased by 80MB = 5 block
# confirm with the change in block. Baosheng Chang
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
debug
(
"%s alter block manual check finish"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/manualTest/manual_alter_comp.py
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/query/queryInsertValue.py
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/table/tablename-boundary.py
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/tag_lite/drop_auto_create.py
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/manual_block2.json
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/pytest/util/dnodes.py
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/script/api/stmtBatchTest.c
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
tests/script/general/parser/alter_column.sim
0 → 100644
浏览文件 @
1e3ca4c6
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录