Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
8fa42af9
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8fa42af9
编写于
6月 04, 2020
作者:
T
Tao Liu
浏览文件
操作
浏览文件
下载
差异文件
merge from develop
上级
2649d517
4913ce58
变更
48
隐藏空白更改
内联
并排
Showing
48 changed file
with
2090 addition
and
417 deletion
+2090
-417
.travis.yml
.travis.yml
+1
-0
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+3
-0
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+8
-0
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+2
-1
src/client/src/tscPrepare.c
src/client/src/tscPrepare.c
+72
-33
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+32
-7
src/client/src/tscSchemaUtil.c
src/client/src/tscSchemaUtil.c
+2
-0
src/client/src/tscServer.c
src/client/src/tscServer.c
+22
-14
src/client/src/tscSql.c
src/client/src/tscSql.c
+8
-2
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+1
-0
src/common/inc/qsqltype.h
src/common/inc/qsqltype.h
+2
-1
src/common/inc/tdataformat.h
src/common/inc/tdataformat.h
+14
-11
src/common/src/tdataformat.c
src/common/src/tdataformat.c
+145
-86
src/common/src/ttypes.c
src/common/src/ttypes.c
+1
-1
src/dnode/src/dnodeShell.c
src/dnode/src/dnodeShell.c
+2
-1
src/inc/taosdef.h
src/inc/taosdef.h
+1
-0
src/inc/taoserror.h
src/inc/taoserror.h
+2
-0
src/inc/taosmsg.h
src/inc/taosmsg.h
+14
-1
src/inc/tsdb.h
src/inc/tsdb.h
+5
-3
src/inc/tsync.h
src/inc/tsync.h
+11
-5
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+1
-1
src/mnode/src/mnodeTable.c
src/mnode/src/mnodeTable.c
+7
-4
src/mnode/src/mnodeVgroup.c
src/mnode/src/mnodeVgroup.c
+3
-1
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+62
-31
src/rpc/src/rpcMain.c
src/rpc/src/rpcMain.c
+5
-4
src/rpc/src/rpcTcp.c
src/rpc/src/rpcTcp.c
+5
-1
src/rpc/src/rpcUdp.c
src/rpc/src/rpcUdp.c
+1
-1
src/rpc/test/rclient.c
src/rpc/test/rclient.c
+2
-1
src/rpc/test/rsclient.c
src/rpc/test/rsclient.c
+2
-1
src/rpc/test/rserver.c
src/rpc/test/rserver.c
+1
-0
src/tsdb/inc/tsdbMain.h
src/tsdb/inc/tsdbMain.h
+17
-8
src/tsdb/src/tsdbFile.c
src/tsdb/src/tsdbFile.c
+1
-2
src/tsdb/src/tsdbMain.c
src/tsdb/src/tsdbMain.c
+156
-30
src/tsdb/src/tsdbMeta.c
src/tsdb/src/tsdbMeta.c
+152
-58
src/tsdb/src/tsdbRWHelper.c
src/tsdb/src/tsdbRWHelper.c
+1
-1
src/util/inc/tutil.h
src/util/inc/tutil.h
+5
-0
src/util/src/tqueue.c
src/util/src/tqueue.c
+1
-1
src/vnode/inc/vnodeInt.h
src/vnode/inc/vnodeInt.h
+1
-0
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+13
-4
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+11
-7
src/wal/test/waltest.c
src/wal/test/waltest.c
+2
-1
tests/examples/c/demo.c
tests/examples/c/demo.c
+3
-2
tests/examples/c/prepare.c
tests/examples/c/prepare.c
+195
-0
tests/pytest/random-test/random-test-multi-threading-3.py
tests/pytest/random-test/random-test-multi-threading-3.py
+281
-0
tests/pytest/random-test/random-test-multi-threading.py
tests/pytest/random-test/random-test-multi-threading.py
+113
-59
tests/pytest/random-test/random-test.py
tests/pytest/random-test/random-test.py
+88
-33
tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim
tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim
+200
-0
tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
+413
-0
未找到文件。
.travis.yml
浏览文件 @
8fa42af9
...
...
@@ -38,6 +38,7 @@ matrix:
-
make > /dev/null
after_success
:
-
travis_wait
20
-
|-
case $TRAVIS_OS_NAME in
linux)
...
...
src/client/inc/tsclient.h
浏览文件 @
8fa42af9
...
...
@@ -56,6 +56,7 @@ typedef struct STableMeta {
STableComInfo
tableInfo
;
uint8_t
tableType
;
int16_t
sversion
;
int16_t
tversion
;
SCMVgroupInfo
vgroupInfo
;
int32_t
sid
;
// the index of one table in a virtual node
uint64_t
uid
;
// unique id of a table
...
...
@@ -316,6 +317,7 @@ typedef struct SSqlObj {
SRpcIpSet
ipList
;
char
freed
:
4
;
char
listed
:
4
;
uint32_t
insertType
;
tsem_t
rspSem
;
SSqlCmd
cmd
;
SSqlRes
res
;
...
...
@@ -401,6 +403,7 @@ void tscCloseTscObj(STscObj *pObj);
TAOS
*
taos_connect_a
(
char
*
ip
,
char
*
user
,
char
*
pass
,
char
*
db
,
uint16_t
port
,
void
(
*
fp
)(
void
*
,
TAOS_RES
*
,
int
),
void
*
param
,
void
**
taos
);
void
waitForQueryRsp
(
void
*
param
,
TAOS_RES
*
tres
,
int
code
)
;
void
doAsyncQuery
(
STscObj
*
pObj
,
SSqlObj
*
pSql
,
void
(
*
fp
)(),
void
*
param
,
const
char
*
sqlstr
,
size_t
sqlLen
);
...
...
src/client/src/tscAsync.c
浏览文件 @
8fa42af9
...
...
@@ -483,6 +483,14 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
}
else
{
code
=
tsParseSql
(
pSql
,
false
);
if
((
pQueryInfo
->
type
&
TSDB_QUERY_TYPE_STMT_INSERT
)
==
TSDB_QUERY_TYPE_STMT_INSERT
)
{
STableMetaInfo
*
pTableMetaInfo
=
tscGetTableMetaInfoFromCmd
(
pCmd
,
pCmd
->
clauseIndex
,
0
);
code
=
tscGetTableMeta
(
pSql
,
pTableMetaInfo
);
assert
(
code
==
TSDB_CODE_SUCCESS
&&
pTableMetaInfo
->
pTableMeta
!=
NULL
);
(
*
pSql
->
fp
)(
pSql
->
param
,
NULL
,
code
);
return
;
}
if
(
code
==
TSDB_CODE_ACTION_IN_PROGRESS
)
return
;
}
}
...
...
src/client/src/tscParseInsert.c
浏览文件 @
8fa42af9
...
...
@@ -1314,6 +1314,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
tscGetQueryInfoDetailSafely
(
pCmd
,
pCmd
->
clauseIndex
,
&
pQueryInfo
);
TSDB_QUERY_SET_TYPE
(
pQueryInfo
->
type
,
TSDB_QUERY_TYPE_INSERT
);
TSDB_QUERY_SET_TYPE
(
pQueryInfo
->
type
,
pSql
->
insertType
);
sToken
=
tStrGetToken
(
pSql
->
sqlstr
,
&
index
,
false
,
0
,
NULL
);
if
(
sToken
.
type
!=
TK_INTO
)
{
...
...
@@ -1341,7 +1342,7 @@ int tsParseSql(SSqlObj *pSql, bool initialParse) {
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
* the error handle callback function can rightfully restore the user-defined callback function (fp).
*/
if
(
initialParse
)
{
if
(
initialParse
&&
(
pSql
->
insertType
!=
TSDB_QUERY_TYPE_STMT_INSERT
)
)
{
pSql
->
fetchFp
=
pSql
->
fp
;
pSql
->
fp
=
(
void
(
*
)())
tscHandleMultivnodeInsert
;
}
...
...
src/client/src/tscPrepare.c
浏览文件 @
8fa42af9
...
...
@@ -12,6 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "taos.h"
#include "tsclient.h"
...
...
@@ -20,6 +21,7 @@
#include "taosmsg.h"
#include "tstrbuild.h"
#include "tscLog.h"
#include "tscSubquery.h"
int
tsParseInsertSql
(
SSqlObj
*
pSql
);
int
taos_query_imp
(
STscObj
*
pObj
,
SSqlObj
*
pSql
);
...
...
@@ -262,7 +264,11 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
static
int
doBindParam
(
char
*
data
,
SParamInfo
*
param
,
TAOS_BIND
*
bind
)
{
if
(
bind
->
is_null
!=
NULL
&&
*
(
bind
->
is_null
))
{
setNull
(
data
,
param
->
type
,
param
->
bytes
);
if
(
param
->
type
==
TSDB_DATA_TYPE_BINARY
||
param
->
type
==
TSDB_DATA_TYPE_NCHAR
)
{
setVardataNull
(
data
+
param
->
offset
,
param
->
type
);
}
else
{
setNull
(
data
+
param
->
offset
,
param
->
type
,
param
->
bytes
);
}
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -297,14 +303,17 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
return
TSDB_CODE_INVALID_VALUE
;
}
size
=
(
short
)
*
bind
->
length
;
break
;
STR_WITH_SIZE_TO_VARSTR
(
data
+
param
->
offset
,
bind
->
buffer
,
size
);
return
TSDB_CODE_SUCCESS
;
case
TSDB_DATA_TYPE_NCHAR
:
if
(
!
taosMbsToUcs4
(
bind
->
buffer
,
*
bind
->
length
,
data
+
param
->
offset
,
param
->
bytes
,
NULL
))
{
case
TSDB_DATA_TYPE_NCHAR
:
{
size_t
output
=
0
;
if
(
!
taosMbsToUcs4
(
bind
->
buffer
,
*
bind
->
length
,
varDataVal
(
data
+
param
->
offset
),
param
->
bytes
-
VARSTR_HEADER_SIZE
,
&
output
))
{
return
TSDB_CODE_INVALID_VALUE
;
}
}
varDataSetLen
(
data
+
param
->
offset
,
output
);
return
TSDB_CODE_SUCCESS
;
}
default:
assert
(
false
);
return
TSDB_CODE_INVALID_VALUE
;
...
...
@@ -383,14 +392,6 @@ static int insertStmtAddBatch(STscStmt* stmt) {
return
TSDB_CODE_SUCCESS
;
}
static
int
insertStmtPrepare
(
STscStmt
*
stmt
)
{
SSqlObj
*
pSql
=
stmt
->
pSql
;
pSql
->
cmd
.
numOfParams
=
0
;
pSql
->
cmd
.
batchSize
=
0
;
return
tsParseInsertSql
(
pSql
);
}
static
int
insertStmtReset
(
STscStmt
*
pStmt
)
{
SSqlCmd
*
pCmd
=
&
pStmt
->
pSql
->
cmd
;
if
(
pCmd
->
batchSize
>
2
)
{
...
...
@@ -451,14 +452,16 @@ static int insertStmtExecute(STscStmt* stmt) {
pRes
->
qhandle
=
0
;
pSql
->
insertType
=
0
;
pSql
->
fetchFp
=
waitForQueryRsp
;
pSql
->
fp
=
(
void
(
*
)())
tscHandleMultivnodeInsert
;
tscDoQuery
(
pSql
);
// tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
if
(
pRes
->
code
!=
TSDB_CODE_SUCCESS
)
{
tscPartiallyFreeSqlObj
(
pSql
);
}
// wait for the callback function to post the semaphore
tsem_wait
(
&
pSql
->
rspSem
);
return
pSql
->
res
.
code
;
return
pRes
->
code
;
}
////////////////////////////////////////////////////////////////////////////////
...
...
@@ -478,6 +481,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tscError
(
"failed to allocate memory for statement"
);
return
NULL
;
}
pStmt
->
taos
=
pObj
;
SSqlObj
*
pSql
=
calloc
(
1
,
sizeof
(
SSqlObj
));
if
(
pSql
==
NULL
)
{
...
...
@@ -488,8 +492,10 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
}
tsem_init
(
&
pSql
->
rspSem
,
0
,
0
);
pSql
->
signature
=
pSql
;
pSql
->
pTscObj
=
pObj
;
pSql
->
signature
=
pSql
;
pSql
->
pTscObj
=
pObj
;
pSql
->
pTscObj
->
pSql
=
pSql
;
pSql
->
maxRetry
=
TSDB_MAX_REPLICA_NUM
;
pStmt
->
pSql
=
pSql
;
return
pStmt
;
...
...
@@ -497,22 +503,55 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
int
taos_stmt_prepare
(
TAOS_STMT
*
stmt
,
const
char
*
sql
,
unsigned
long
length
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
if
(
length
==
0
)
{
length
=
strlen
(
sql
);
if
(
stmt
==
NULL
||
pStmt
->
taos
==
NULL
||
pStmt
->
pSql
==
NULL
)
{
terrno
=
TSDB_CODE_DISCONNECTED
;
return
TSDB_CODE_DISCONNECTED
;
}
SSqlObj
*
pSql
=
pStmt
->
pSql
;
size_t
sqlLen
=
strlen
(
sql
);
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SSqlRes
*
pRes
=
&
pSql
->
res
;
pSql
->
param
=
(
void
*
)
pStmt
->
taos
;
pSql
->
fp
=
waitForQueryRsp
;
pSql
->
insertType
=
TSDB_QUERY_TYPE_STMT_INSERT
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
))
{
tscError
(
"%p failed to malloc payload buffer"
,
pSql
);
return
TSDB_CODE_CLI_OUT_OF_MEMORY
;
}
char
*
sqlstr
=
(
char
*
)
malloc
(
length
+
1
);
if
(
sqlstr
==
NULL
)
{
tscError
(
"failed to malloc sql string buffer"
);
pSql
->
sqlstr
=
realloc
(
pSql
->
sqlstr
,
sqlLen
+
1
);
if
(
pSql
->
sqlstr
==
NULL
)
{
tscError
(
"%p failed to malloc sql string buffer"
,
pSql
);
free
(
pCmd
->
payload
);
return
TSDB_CODE_CLI_OUT_OF_MEMORY
;
}
memcpy
(
sqlstr
,
sql
,
length
);
sqlstr
[
length
]
=
0
;
strtolower
(
sqlstr
,
sqlstr
);
pRes
->
qhandle
=
0
;
pRes
->
numOfRows
=
1
;
strtolower
(
pSql
->
sqlstr
,
sql
);
tscDump
(
"%p SQL: %s"
,
pSql
,
pSql
->
sqlstr
);
pStmt
->
pSql
->
sqlstr
=
sqlstr
;
if
(
tscIsInsertData
(
sqlstr
))
{
if
(
tscIsInsertData
(
pSql
->
sqlstr
))
{
pStmt
->
isInsert
=
true
;
return
insertStmtPrepare
(
pStmt
);
pSql
->
cmd
.
numOfParams
=
0
;
pSql
->
cmd
.
batchSize
=
0
;
int32_t
code
=
tsParseSql
(
pSql
,
true
);
if
(
code
==
TSDB_CODE_ACTION_IN_PROGRESS
)
{
// wait for the callback function to post the semaphore
tsem_wait
(
&
pSql
->
rspSem
);
return
pSql
->
res
.
code
;
}
return
code
;
}
pStmt
->
isInsert
=
false
;
...
...
@@ -574,7 +613,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
}
else
{
tfree
(
pStmt
->
pSql
->
sqlstr
);
pStmt
->
pSql
->
sqlstr
=
sql
;
ret
=
taos_query
_imp
(
pStmt
->
taos
,
pStmt
->
pSql
);
ret
=
taos_query
(
pStmt
->
taos
,
pStmt
->
pSql
->
sqlstr
);
}
}
return
ret
;
...
...
src/client/src/tscSQLParser.c
浏览文件 @
8fa42af9
...
...
@@ -4400,7 +4400,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
}
else
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_UPDATE_TAG_VAL
)
{
// Note: update can only be applied to table not super table.
// the following is handle display tags value for meters created according to super table
// the following is used to handle tags value for table created according to super table
pCmd
->
command
=
TSDB_SQL_UPDATE_TAGS_VAL
;
tVariantList
*
pVarList
=
pAlterSQL
->
varList
;
tVariant
*
pTagName
=
&
pVarList
->
a
[
0
].
pVar
;
...
...
@@ -4423,15 +4425,38 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the length of binary
if
((
pTagsSchema
->
type
==
TSDB_DATA_TYPE_BINARY
||
pTagsSchema
->
type
==
TSDB_DATA_TYPE_NCHAR
)
&&
pVarList
->
a
[
1
].
pVar
.
nLen
>
pTagsSchema
->
bytes
)
{
(
pVarList
->
a
[
1
].
pVar
.
nLen
+
VARSTR_HEADER_SIZE
)
>
pTagsSchema
->
bytes
)
{
return
invalidSqlErrMsg
(
pQueryInfo
->
msg
,
msg14
);
}
char
name1
[
128
]
=
{
0
};
strncpy
(
name1
,
pTagName
->
pz
,
pTagName
->
nLen
);
TAOS_FIELD
f
=
tscCreateField
(
TSDB_DATA_TYPE_INT
,
name1
,
tDataTypeDesc
[
TSDB_DATA_TYPE_INT
].
nSize
);
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
&
f
);
int32_t
size
=
sizeof
(
SUpdateTableTagValMsg
)
+
pTagsSchema
->
bytes
+
TSDB_EXTRA_PAYLOAD_SIZE
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
tscError
(
"%p failed to malloc for alter table msg"
,
pSql
);
return
TSDB_CODE_CLI_OUT_OF_MEMORY
;
}
SUpdateTableTagValMsg
*
pUpdateMsg
=
(
SUpdateTableTagValMsg
*
)
(
pCmd
->
payload
+
tsRpcHeadSize
);
pUpdateMsg
->
head
.
vgId
=
htonl
(
pTableMeta
->
vgroupInfo
.
vgId
);
pUpdateMsg
->
tid
=
htonl
(
pTableMeta
->
sid
);
pUpdateMsg
->
uid
=
htobe64
(
pTableMeta
->
uid
);
pUpdateMsg
->
colId
=
htons
(
pTagsSchema
->
colId
);
pUpdateMsg
->
type
=
htons
(
pTagsSchema
->
type
);
pUpdateMsg
->
bytes
=
htons
(
pTagsSchema
->
bytes
);
pUpdateMsg
->
tversion
=
htons
(
pTableMeta
->
tversion
);
tVariantDump
(
&
pVarList
->
a
[
1
].
pVar
,
pUpdateMsg
->
data
,
pTagsSchema
->
type
,
true
);
int32_t
len
=
0
;
if
(
pTagsSchema
->
type
!=
TSDB_DATA_TYPE_BINARY
&&
pTagsSchema
->
type
!=
TSDB_DATA_TYPE_NCHAR
)
{
len
=
tDataTypeDesc
[
pTagsSchema
->
type
].
nSize
;
}
else
{
len
=
varDataLen
(
pUpdateMsg
->
data
);
}
pUpdateMsg
->
tagValLen
=
htonl
(
len
);
// length may be changed after dump data
int32_t
total
=
sizeof
(
SUpdateTableTagValMsg
)
+
len
;
pUpdateMsg
->
head
.
contLen
=
htonl
(
total
);
}
else
if
(
pAlterSQL
->
type
==
TSDB_ALTER_TABLE_ADD_COLUMN
)
{
tFieldList
*
pFieldList
=
pAlterSQL
->
pAddColumns
;
...
...
src/client/src/tscSchemaUtil.c
浏览文件 @
8fa42af9
...
...
@@ -168,6 +168,8 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta
->
sid
=
pTableMetaMsg
->
sid
;
pTableMeta
->
uid
=
pTableMetaMsg
->
uid
;
pTableMeta
->
vgroupInfo
=
pTableMetaMsg
->
vgroup
;
pTableMeta
->
sversion
=
pTableMetaMsg
->
sversion
;
pTableMeta
->
tversion
=
pTableMetaMsg
->
tversion
;
memcpy
(
pTableMeta
->
schema
,
pTableMetaMsg
->
schema
,
schemaSize
);
...
...
src/client/src/tscServer.c
浏览文件 @
8fa42af9
...
...
@@ -1251,28 +1251,24 @@ int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
}
int
tscBuildAlterTableMsg
(
SSqlObj
*
pSql
,
SSqlInfo
*
pInfo
)
{
SCMAlterTableMsg
*
pAlterTableMsg
;
char
*
pMsg
;
int
msgLen
=
0
;
int
size
=
0
;
char
*
pMsg
;
int
msgLen
=
0
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SQueryInfo
*
pQueryInfo
=
tscGetQueryInfoDetail
(
pCmd
,
0
);
STableMetaInfo
*
pTableMetaInfo
=
tscGetMetaInfo
(
pQueryInfo
,
0
);
size
=
tscEstimateAlterTableMsgLength
(
pCmd
);
SAlterTableSQL
*
pAlterInfo
=
pInfo
->
pAlterInfo
;
int
size
=
tscEstimateAlterTableMsgLength
(
pCmd
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
tscError
(
"%p failed to malloc for alter table msg"
,
pSql
);
return
-
1
;
}
pAlterTableMsg
=
(
SCMAlterTableMsg
*
)
pCmd
->
payload
;
SCMAlterTableMsg
*
pAlterTableMsg
=
(
SCMAlterTableMsg
*
)
pCmd
->
payload
;
tscGetDBInfoFromMeterId
(
pTableMetaInfo
->
name
,
pAlterTableMsg
->
db
);
SAlterTableSQL
*
pAlterInfo
=
pInfo
->
pAlterInfo
;
strcpy
(
pAlterTableMsg
->
tableId
,
pTableMetaInfo
->
name
);
pAlterTableMsg
->
type
=
htons
(
pAlterInfo
->
type
);
...
...
@@ -1280,7 +1276,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema
*
pSchema
=
pAlterTableMsg
->
schema
;
for
(
int
i
=
0
;
i
<
tscNumOfFields
(
pQueryInfo
);
++
i
)
{
TAOS_FIELD
*
pField
=
tscFieldInfoGetField
(
&
pQueryInfo
->
fieldsInfo
,
i
);
pSchema
->
type
=
pField
->
type
;
strcpy
(
pSchema
->
name
,
pField
->
name
);
pSchema
->
bytes
=
htons
(
pField
->
bytes
);
...
...
@@ -1293,6 +1289,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg
+=
pAlterInfo
->
tagData
.
dataLen
;
msgLen
=
pMsg
-
(
char
*
)
pAlterTableMsg
;
pCmd
->
payloadLen
=
msgLen
;
pCmd
->
msgType
=
TSDB_MSG_TYPE_CM_ALTER_TABLE
;
...
...
@@ -1301,6 +1298,16 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return
TSDB_CODE_SUCCESS
;
}
int
tscBuildUpdateTagMsg
(
SSqlObj
*
pSql
,
SSqlInfo
*
pInfo
)
{
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
pCmd
->
msgType
=
TSDB_MSG_TYPE_UPDATE_TAG_VAL
;
SUpdateTableTagValMsg
*
pUpdateMsg
=
(
SUpdateTableTagValMsg
*
)
(
pCmd
->
payload
+
tsRpcHeadSize
);
pCmd
->
payloadLen
=
htonl
(
pUpdateMsg
->
head
.
contLen
);
return
TSDB_CODE_SUCCESS
;
}
int
tscAlterDbMsg
(
SSqlObj
*
pSql
,
SSqlInfo
*
pInfo
)
{
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
pCmd
->
payloadLen
=
sizeof
(
SCMAlterDbMsg
);
...
...
@@ -1795,7 +1802,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pMetaMsg
->
sid
=
htonl
(
pMetaMsg
->
sid
);
pMetaMsg
->
sversion
=
htons
(
pMetaMsg
->
sversion
);
pMetaMsg
->
tversion
=
htons
(
pMetaMsg
->
tversion
);
pMetaMsg
->
vgroup
.
vgId
=
htonl
(
pMetaMsg
->
vgroup
.
vgId
);
pMetaMsg
->
uid
=
htobe64
(
pMetaMsg
->
uid
);
...
...
@@ -2543,6 +2550,7 @@ void tscInitMsgsFp() {
tscBuildMsg
[
TSDB_SQL_DROP_DNODE
]
=
tscBuildDropDnodeMsg
;
tscBuildMsg
[
TSDB_SQL_CFG_DNODE
]
=
tscBuildCfgDnodeMsg
;
tscBuildMsg
[
TSDB_SQL_ALTER_TABLE
]
=
tscBuildAlterTableMsg
;
tscBuildMsg
[
TSDB_SQL_UPDATE_TAGS_VAL
]
=
tscBuildUpdateTagMsg
;
tscBuildMsg
[
TSDB_SQL_ALTER_DB
]
=
tscAlterDbMsg
;
tscBuildMsg
[
TSDB_SQL_CONNECT
]
=
tscBuildConnectMsg
;
...
...
src/client/src/tscSql.c
浏览文件 @
8fa42af9
...
...
@@ -255,8 +255,14 @@ int taos_query_imp(STscObj *pObj, SSqlObj *pSql) {
return
pRes
->
code
;
}
static
void
waitForQueryRsp
(
void
*
param
,
TAOS_RES
*
tres
,
int
code
)
{
assert
(
tres
!=
NULL
);
void
waitForQueryRsp
(
void
*
param
,
TAOS_RES
*
tres
,
int
code
)
{
assert
(
param
!=
NULL
);
SSqlObj
*
pSql
=
((
STscObj
*
)
param
)
->
pSql
;
// valid error code is less than 0
if
(
code
<
0
)
{
pSql
->
res
.
code
=
code
;
}
SSqlObj
*
pSql
=
(
SSqlObj
*
)
tres
;
sem_post
(
&
pSql
->
rspSem
);
...
...
src/client/src/tscUtil.c
浏览文件 @
8fa42af9
...
...
@@ -654,6 +654,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock) {
for
(
int32_t
i
=
0
;
i
<
numOfRows
;
++
i
)
{
SDataRow
trow
=
(
SDataRow
)
pDataBlock
;
dataRowSetLen
(
trow
,
TD_DATA_ROW_HEAD_SIZE
+
flen
);
dataRowSetVersion
(
trow
,
pTableMeta
->
sversion
);
int
toffset
=
0
;
for
(
int32_t
j
=
0
;
j
<
tinfo
.
numOfColumns
;
j
++
)
{
...
...
src/common/inc/qsqltype.h
浏览文件 @
8fa42af9
...
...
@@ -35,7 +35,8 @@ enum {
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_SELECT
,
"select"
)
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_FETCH
,
"fetch"
)
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_INSERT
,
"insert"
)
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_UPDATE_TAGS_VAL
,
"update-tag-val"
)
// the SQL below is for mgmt node
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_MGMT
,
"mgmt"
)
TSDB_DEFINE_SQL_TYPE
(
TSDB_SQL_CREATE_DB
,
"create-db"
)
...
...
src/common/inc/tdataformat.h
浏览文件 @
8fa42af9
...
...
@@ -119,22 +119,24 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Data row structure
/* A data row, the format is like below:
* |<------------------------------------- len ---------------------------------->|
* |<--
Head
->|<--------- flen -------------->| |
* +----------+---------------------------------+---------------------------------+
* | int
32_t
| | |
* +----------+---------------------------------+---------------------------------+
* | len | First part | Second part |
* +----------+---------------------------------+---------------------------------+
* |<--------------------
+----------
----------------- len ---------------------------------->|
* |<--
Head -
->|<--------- flen -------------->| |
* +----------
-----------
+---------------------------------+---------------------------------+
* | int
16_t | int16_t
| | |
* +----------+----------
+----------
-----------------------+---------------------------------+
* | len |
sversion |
First part | Second part |
* +----------+----------
+----------
-----------------------+---------------------------------+
*/
typedef
void
*
SDataRow
;
#define TD_DATA_ROW_HEAD_SIZE sizeof(int
32_t)
#define TD_DATA_ROW_HEAD_SIZE sizeof(int
16_t)*2
#define dataRowLen(r) (*(int32_t *)(r))
#define dataRowLen(r) (*(int16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
#define dataRowSetLen(r, l) (dataRowLen(r) = (l))
#define dataRowSetVersion(r, v) (dataRowVersion(r) = (v))
#define dataRowCpy(dst, r) memcpy((dst), (r), dataRowLen(r))
#define dataRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_DATA_ROW_HEAD_SIZE)
...
...
@@ -246,7 +248,7 @@ void tdResetDataCols(SDataCols *pCols);
void
tdInitDataCols
(
SDataCols
*
pCols
,
STSchema
*
pSchema
);
SDataCols
*
tdDupDataCols
(
SDataCols
*
pCols
,
bool
keepData
);
void
tdFreeDataCols
(
SDataCols
*
pCols
);
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
SDataCols
*
pCols
);
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
S
TSchema
*
pSchema
,
S
DataCols
*
pCols
);
void
tdPopDataColsPoints
(
SDataCols
*
pCols
,
int
pointsToPop
);
//!!!!
int
tdMergeDataCols
(
SDataCols
*
target
,
SDataCols
*
src
,
int
rowsToMerge
);
void
tdMergeTwoDataCols
(
SDataCols
*
target
,
SDataCols
*
src1
,
int
*
iter1
,
SDataCols
*
src2
,
int
*
iter2
,
int
tRows
);
...
...
@@ -278,9 +280,10 @@ typedef struct {
#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
SKVRow
tdKVRowDup
(
SKVRow
row
);
SKVRow
tdSetKVRowDataOfCol
(
SKVRow
row
,
int16_t
colId
,
int8_t
type
,
void
*
value
);
int
tdSetKVRowDataOfCol
(
SKVRow
*
o
row
,
int16_t
colId
,
int8_t
type
,
void
*
value
);
void
*
tdEncodeKVRow
(
void
*
buf
,
SKVRow
row
);
void
*
tdDecodeKVRow
(
void
*
buf
,
SKVRow
*
row
);
...
...
src/common/src/tdataformat.c
浏览文件 @
8fa42af9
...
...
@@ -159,7 +159,10 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
/**
* Initialize a data row
*/
void
tdInitDataRow
(
SDataRow
row
,
STSchema
*
pSchema
)
{
dataRowSetLen
(
row
,
TD_DATA_ROW_HEAD_SIZE
+
schemaFLen
(
pSchema
));
}
void
tdInitDataRow
(
SDataRow
row
,
STSchema
*
pSchema
)
{
dataRowSetLen
(
row
,
TD_DATA_ROW_HEAD_SIZE
+
schemaFLen
(
pSchema
));
dataRowSetVersion
(
row
,
schemaVersion
(
pSchema
));
}
SDataRow
tdNewDataRowFromSchema
(
STSchema
*
pSchema
)
{
int32_t
size
=
dataRowMaxBytesFromSchema
(
pSchema
);
...
...
@@ -262,25 +265,29 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
}
}
void
dataColSetNullAt
(
SDataCol
*
pCol
,
int
index
)
{
if
(
IS_VAR_DATA_TYPE
(
pCol
->
type
))
{
pCol
->
dataOff
[
index
]
=
pCol
->
len
;
char
*
ptr
=
POINTER_SHIFT
(
pCol
->
pData
,
pCol
->
len
);
varDataLen
(
ptr
)
=
(
pCol
->
type
==
TSDB_DATA_TYPE_BINARY
)
?
sizeof
(
char
)
:
TSDB_NCHAR_SIZE
;
setNull
(
varDataVal
(
ptr
),
pCol
->
type
,
pCol
->
bytes
);
pCol
->
len
+=
varDataTLen
(
ptr
);
}
else
{
setNull
(
POINTER_SHIFT
(
pCol
->
pData
,
TYPE_BYTES
[
pCol
->
type
]
*
index
),
pCol
->
type
,
pCol
->
bytes
);
pCol
->
len
+=
TYPE_BYTES
[
pCol
->
type
];
}
}
void
dataColSetNEleNull
(
SDataCol
*
pCol
,
int
nEle
,
int
maxPoints
)
{
char
*
ptr
=
NULL
;
switch
(
pCol
->
type
)
{
case
TSDB_DATA_TYPE_BINARY
:
case
TSDB_DATA_TYPE_NCHAR
:
pCol
->
len
=
0
;
for
(
int
i
=
0
;
i
<
nEle
;
i
++
)
{
pCol
->
dataOff
[
i
]
=
pCol
->
len
;
ptr
=
(
char
*
)
pCol
->
pData
+
pCol
->
len
;
varDataLen
(
ptr
)
=
(
pCol
->
type
==
TSDB_DATA_TYPE_BINARY
)
?
sizeof
(
char
)
:
TSDB_NCHAR_SIZE
;
setNull
(
ptr
+
sizeof
(
VarDataLenT
),
pCol
->
type
,
pCol
->
bytes
);
pCol
->
len
+=
varDataTLen
(
ptr
);
}
break
;
default:
setNullN
(
pCol
->
pData
,
pCol
->
type
,
pCol
->
bytes
,
nEle
);
pCol
->
len
=
TYPE_BYTES
[
pCol
->
type
]
*
nEle
;
break
;
if
(
IS_VAR_DATA_TYPE
(
pCol
->
type
))
{
pCol
->
len
=
0
;
for
(
int
i
=
0
;
i
<
nEle
;
i
++
)
{
dataColSetNullAt
(
pCol
,
i
);
}
}
else
{
setNullN
(
pCol
->
pData
,
pCol
->
type
,
pCol
->
bytes
,
nEle
);
pCol
->
len
=
TYPE_BYTES
[
pCol
->
type
]
*
nEle
;
}
}
...
...
@@ -377,14 +384,32 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
SDataCols
*
pCols
)
{
void
tdAppendDataRowToDataCol
(
SDataRow
row
,
S
TSchema
*
pSchema
,
S
DataCols
*
pCols
)
{
ASSERT
(
dataColsKeyLast
(
pCols
)
<
dataRowKey
(
row
));
for
(
int
i
=
0
;
i
<
pCols
->
numOfCols
;
i
++
)
{
SDataCol
*
pCol
=
pCols
->
cols
+
i
;
void
*
value
=
tdGetRowDataOfCol
(
row
,
pCol
->
type
,
pCol
->
offset
);
int
rcol
=
0
;
int
dcol
=
0
;
dataColAppendVal
(
pCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
);
while
(
dcol
<
pCols
->
numOfCols
)
{
SDataCol
*
pDataCol
=
&
(
pCols
->
cols
[
dcol
]);
if
(
rcol
>=
schemaNCols
(
pSchema
))
{
dataColSetNullAt
(
pDataCol
,
pCols
->
numOfRows
);
dcol
++
;
continue
;
}
STColumn
*
pRowCol
=
schemaColAt
(
pSchema
,
rcol
);
if
(
pRowCol
->
colId
==
pDataCol
->
colId
)
{
void
*
value
=
tdGetRowDataOfCol
(
row
,
pRowCol
->
type
,
pRowCol
->
offset
+
TD_DATA_ROW_HEAD_SIZE
);
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
);
dcol
++
;
rcol
++
;
}
else
if
(
pRowCol
->
colId
<
pDataCol
->
colId
)
{
rcol
++
;
}
else
{
dataColSetNullAt
(
pDataCol
,
pCols
->
numOfRows
);
dcol
++
;
}
}
pCols
->
numOfRows
++
;
}
...
...
@@ -477,69 +502,103 @@ SKVRow tdKVRowDup(SKVRow row) {
return
trow
;
}
SKVRow
tdSetKVRowDataOfCol
(
SKVRow
row
,
int16_t
colId
,
int8_t
type
,
void
*
value
)
{
// TODO
return
NULL
;
// SColIdx *pColIdx = NULL;
// SKVRow rrow = row;
// SKVRow nrow = NULL;
// void *ptr = taosbsearch(&colId, kvDataRowColIdx(row), kvDataRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
// if (ptr == NULL || ((SColIdx *)ptr)->colId < colId) { // need to add a column value to the row
// int tlen = kvDataRowLen(row) + sizeof(SColIdx) + (IS_VAR_DATA_TYPE(type) ? varDataTLen(value) :
// TYPE_BYTES[type]); nrow = malloc(tlen); if (nrow == NULL) return NULL;
// kvDataRowSetNCols(nrow, kvDataRowNCols(row)+1);
// kvDataRowSetLen(nrow, tlen);
// if (ptr == NULL) ptr = kvDataRowValues(row);
// // Copy the columns before the col
// if (POINTER_DISTANCE(ptr, kvDataRowColIdx(row)) > 0) {
// memcpy(kvDataRowColIdx(nrow), kvDataRowColIdx(row), POINTER_DISTANCE(ptr, kvDataRowColIdx(row)));
// memcpy(kvDataRowValues(nrow), kvDataRowValues(row), ((SColIdx *)ptr)->offset); // TODO: here is not correct
// }
// // Set the new col value
// pColIdx = (SColIdx *)POINTER_SHIFT(nrow, POINTER_DISTANCE(ptr, row));
// pColIdx->colId = colId;
// pColIdx->offset = ((SColIdx *)ptr)->offset; // TODO: here is not correct
// if (IS_VAR_DATA_TYPE(type)) {
// memcpy(POINTER_SHIFT(kvDataRowValues(nrow), pColIdx->offset), value, varDataLen(value));
// } else {
// memcpy(POINTER_SHIFT(kvDataRowValues(nrow), pColIdx->offset), value, TYPE_BYTES[type]);
// }
// // Copy the columns after the col
// if (POINTER_DISTANCE(kvDataRowValues(row), ptr) > 0) {
// // TODO: memcpy();
// }
// } else {
// // TODO
// ASSERT(((SColIdx *)ptr)->colId == colId);
// if (IS_VAR_DATA_TYPE(type)) {
// void *pOldVal = kvDataRowColVal(row, (SColIdx *)ptr);
// if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
// memcpy(pOldVal, value, varDataTLen(value));
// } else { // enlarge the memory
// // rrow = realloc(rrow, kvDataRowLen(rrow) + varDataTLen(value) - varDataTLen(pOldVal));
// // if (rrow == NULL) return NULL;
// // memmove();
// // for () {
// // ((SColIdx *)ptr)->offset += balabala;
// // }
// // kvDataRowSetLen();
// }
// } else {
// memcpy(kvDataRowColVal(row, (SColIdx *)ptr), value, TYPE_BYTES[type]);
// }
// }
// return rrow;
int
tdSetKVRowDataOfCol
(
SKVRow
*
orow
,
int16_t
colId
,
int8_t
type
,
void
*
value
)
{
SColIdx
*
pColIdx
=
NULL
;
SKVRow
row
=
*
orow
;
SKVRow
nrow
=
NULL
;
void
*
ptr
=
taosbsearch
(
&
colId
,
kvRowColIdx
(
row
),
kvRowNCols
(
row
),
sizeof
(
SColIdx
),
comparTagId
,
TD_GE
);
if
(
ptr
==
NULL
||
((
SColIdx
*
)
ptr
)
->
colId
<
colId
)
{
// need to add a column value to the row
int
diff
=
IS_VAR_DATA_TYPE
(
type
)
?
varDataTLen
(
value
)
:
TYPE_BYTES
[
type
];
nrow
=
malloc
(
kvRowLen
(
row
)
+
sizeof
(
SColIdx
)
+
diff
);
if
(
nrow
==
NULL
)
return
-
1
;
kvRowSetLen
(
nrow
,
kvRowLen
(
row
)
+
sizeof
(
SColIdx
)
+
diff
);
kvRowSetNCols
(
nrow
,
kvRowNCols
(
row
)
+
1
);
if
(
ptr
==
NULL
)
{
memcpy
(
kvRowColIdx
(
nrow
),
kvRowColIdx
(
row
),
sizeof
(
SColIdx
)
*
kvRowNCols
(
row
));
memcpy
(
kvRowValues
(
nrow
),
kvRowValues
(
row
),
POINTER_DISTANCE
(
kvRowEnd
(
row
),
kvRowValues
(
row
)));
int
colIdx
=
kvRowNCols
(
nrow
)
-
1
;
kvRowColIdxAt
(
nrow
,
colIdx
)
->
colId
=
colId
;
kvRowColIdxAt
(
nrow
,
colIdx
)
->
offset
=
POINTER_DISTANCE
(
kvRowEnd
(
row
),
kvRowValues
(
row
));
memcpy
(
kvRowColVal
(
nrow
,
kvRowColIdxAt
(
nrow
,
colIdx
)),
value
,
diff
);
}
else
{
int16_t
tlen
=
POINTER_DISTANCE
(
ptr
,
kvRowColIdx
(
row
));
if
(
tlen
>
0
)
{
memcpy
(
kvRowColIdx
(
nrow
),
kvRowColIdx
(
row
),
tlen
);
memcpy
(
kvRowValues
(
nrow
),
kvRowValues
(
row
),
((
SColIdx
*
)
ptr
)
->
offset
);
}
int
colIdx
=
tlen
/
sizeof
(
SColIdx
);
kvRowColIdxAt
(
nrow
,
colIdx
)
->
colId
=
colId
;
kvRowColIdxAt
(
nrow
,
colIdx
)
->
offset
=
((
SColIdx
*
)
ptr
)
->
offset
;
memcpy
(
kvRowColVal
(
nrow
,
kvRowColIdxAt
(
nrow
,
colIdx
)),
value
,
diff
);
for
(
int
i
=
colIdx
;
i
<
kvRowNCols
(
row
);
i
++
)
{
kvRowColIdxAt
(
nrow
,
i
+
1
)
->
colId
=
kvRowColIdxAt
(
row
,
i
)
->
colId
;
kvRowColIdxAt
(
nrow
,
i
+
1
)
->
offset
=
kvRowColIdxAt
(
row
,
i
)
->
offset
+
diff
;
}
memcpy
(
kvRowColVal
(
nrow
,
kvRowColIdxAt
(
nrow
,
colIdx
+
1
)),
kvRowColVal
(
row
,
kvRowColIdxAt
(
row
,
colIdx
)),
POINTER_DISTANCE
(
kvRowEnd
(
row
),
kvRowColVal
(
row
,
kvRowColIdxAt
(
row
,
colIdx
)))
);
}
*
orow
=
nrow
;
free
(
row
);
}
else
{
ASSERT
(((
SColIdx
*
)
ptr
)
->
colId
==
colId
);
if
(
IS_VAR_DATA_TYPE
(
type
))
{
void
*
pOldVal
=
kvRowColVal
(
row
,
(
SColIdx
*
)
ptr
);
if
(
varDataTLen
(
value
)
==
varDataTLen
(
pOldVal
))
{
// just update the column value in place
memcpy
(
pOldVal
,
value
,
varDataTLen
(
value
));
}
else
{
// need to reallocate the memory
int16_t
diff
=
varDataTLen
(
value
)
-
varDataTLen
(
pOldVal
);
int16_t
nlen
=
kvRowLen
(
row
)
+
diff
;
ASSERT
(
nlen
>
0
);
nrow
=
malloc
(
nlen
);
if
(
nrow
==
NULL
)
return
-
1
;
kvRowSetLen
(
nrow
,
nlen
);
kvRowSetNCols
(
nrow
,
kvRowNCols
(
row
));
// Copy part ahead
nlen
=
POINTER_DISTANCE
(
ptr
,
kvRowColIdx
(
row
));
ASSERT
(
nlen
%
sizeof
(
SColIdx
)
==
0
);
if
(
nlen
>
0
)
{
ASSERT
(((
SColIdx
*
)
ptr
)
->
offset
>
0
);
memcpy
(
kvRowColIdx
(
nrow
),
kvRowColIdx
(
row
),
nlen
);
memcpy
(
kvRowValues
(
nrow
),
kvRowValues
(
row
),
((
SColIdx
*
)
ptr
)
->
offset
);
}
// Construct current column value
int
colIdx
=
nlen
/
sizeof
(
SColIdx
);
pColIdx
=
kvRowColIdxAt
(
nrow
,
colIdx
);
pColIdx
->
colId
=
((
SColIdx
*
)
ptr
)
->
colId
;
pColIdx
->
offset
=
((
SColIdx
*
)
ptr
)
->
offset
;
memcpy
(
kvRowColVal
(
nrow
,
pColIdx
),
value
,
varDataTLen
(
value
));
// Construct columns after
if
(
kvRowNCols
(
nrow
)
-
colIdx
-
1
>
0
)
{
for
(
int
i
=
colIdx
+
1
;
i
<
kvRowNCols
(
nrow
);
i
++
)
{
kvRowColIdxAt
(
nrow
,
i
)
->
colId
=
kvRowColIdxAt
(
row
,
i
)
->
colId
;
kvRowColIdxAt
(
nrow
,
i
)
->
offset
+=
diff
;
}
memcpy
(
kvRowColVal
(
nrow
,
kvRowColIdxAt
(
nrow
,
colIdx
+
1
)),
kvRowColVal
(
row
,
kvRowColIdxAt
(
row
,
colIdx
+
1
)),
POINTER_DISTANCE
(
kvRowEnd
(
row
),
kvRowColVal
(
row
,
kvRowColIdxAt
(
row
,
colIdx
+
1
))));
}
*
orow
=
nrow
;
free
(
row
);
}
}
else
{
memcpy
(
kvRowColVal
(
row
,
(
SColIdx
*
)
ptr
),
value
,
TYPE_BYTES
[
type
]);
}
}
return
0
;
}
void
*
tdEncodeKVRow
(
void
*
buf
,
SKVRow
row
)
{
...
...
src/common/src/ttypes.c
浏览文件 @
8fa42af9
...
...
@@ -364,7 +364,7 @@ char tTokenTypeSwitcher[13] = {
};
bool
isValidDataType
(
int32_t
type
,
int32_t
length
)
{
if
(
type
<
TSDB_DATA_TYPE_
BOO
L
||
type
>
TSDB_DATA_TYPE_NCHAR
)
{
if
(
type
<
TSDB_DATA_TYPE_
NUL
L
||
type
>
TSDB_DATA_TYPE_NCHAR
)
{
return
false
;
}
...
...
src/dnode/src/dnodeShell.c
浏览文件 @
8fa42af9
...
...
@@ -40,7 +40,8 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_SUBMIT
]
=
dnodeDispatchToVnodeWriteQueue
;
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_QUERY
]
=
dnodeDispatchToVnodeReadQueue
;
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_FETCH
]
=
dnodeDispatchToVnodeReadQueue
;
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_UPDATE_TAG_VAL
]
=
dnodeDispatchToVnodeWriteQueue
;
// the following message shall be treated as mnode write
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_CM_CREATE_ACCT
]
=
dnodeDispatchToMnodeWriteQueue
;
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_CM_ALTER_ACCT
]
=
dnodeDispatchToMnodeWriteQueue
;
...
...
src/inc/taosdef.h
浏览文件 @
8fa42af9
...
...
@@ -331,6 +331,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u
#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
...
...
src/inc/taoserror.h
浏览文件 @
8fa42af9
...
...
@@ -171,6 +171,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_FILE_FORMAT, 0, 0x0500, "invalid file
// TSDB
TAOS_DEFINE_ERROR
(
TSDB_CODE_INVALID_CONFIG
,
0
,
0x0580
,
"invalid TSDB configuration"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TAG_VER_OUT_OF_DATE
,
0
,
0x0581
,
"tag version is out of date"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TABLE_SCHEMA_VERSION
,
0
,
0x0582
,
"invalid table schema version from client"
)
#ifdef TAOS_ERROR_C
...
...
src/inc/taosmsg.h
浏览文件 @
8fa42af9
...
...
@@ -43,7 +43,7 @@ enum {
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_SUBMIT
,
"submit"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_QUERY
,
"query"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_FETCH
,
"fetch"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_
DUMMY0
,
"dummy0
"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_
UPDATE_TAG_VAL
,
"update-tag-val
"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_DUMMY1
,
"dummy1"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_DUMMY2
,
"dummy2"
)
TAOS_DEFINE_MESSAGE_TYPE
(
TSDB_MSG_TYPE_DUMMY3
,
"dummy3"
)
...
...
@@ -276,6 +276,18 @@ typedef struct {
// char tagVal[];
}
SCMAlterTableMsg
;
typedef
struct
{
SMsgHead
head
;
int64_t
uid
;
int32_t
tid
;
int16_t
tversion
;
int16_t
colId
;
int16_t
type
;
int16_t
bytes
;
int32_t
tagValLen
;
char
data
[];
}
SUpdateTableTagValMsg
;
typedef
struct
{
char
clientVersion
[
TSDB_VERSION_LEN
];
char
msgVersion
[
TSDB_VERSION_LEN
];
...
...
@@ -590,6 +602,7 @@ typedef struct {
}
SMDVnodeDesc
;
typedef
struct
{
char
db
[
TSDB_DB_NAME_LEN
+
1
];
SMDVnodeCfg
cfg
;
SMDVnodeDesc
nodes
[
TSDB_MAX_REPLICA
];
}
SMDCreateVnodeMsg
;
...
...
src/inc/tsdb.h
浏览文件 @
8fa42af9
...
...
@@ -45,6 +45,7 @@ typedef struct {
int
(
*
eventCallBack
)(
void
*
);
void
*
(
*
cqCreateFunc
)(
void
*
handle
,
int
sid
,
char
*
sqlStr
,
STSchema
*
pSchema
);
void
(
*
cqDropFunc
)(
void
*
handle
);
void
*
(
*
configFunc
)(
int32_t
vgId
,
int32_t
sid
);
}
STsdbAppH
;
// --------- TSDB REPOSITORY CONFIGURATION DEFINITION
...
...
@@ -108,16 +109,17 @@ int tsdbTableSetSName(STableCfg *config, char *sname, bool dup);
int
tsdbTableSetStreamSql
(
STableCfg
*
config
,
char
*
sql
,
bool
dup
);
void
tsdbClearTableCfg
(
STableCfg
*
config
);
int32_t
tsdbGetTableTagVal
(
TsdbRepoT
*
repo
,
STableId
*
id
,
int32_t
colId
,
int16_t
*
type
,
int16_t
*
bytes
,
char
**
val
);
char
*
tsdbGetTableName
(
TsdbRepoT
*
repo
,
const
STableId
*
id
,
int16_t
*
bytes
);
void
*
tsdbGetTableTagVal
(
TsdbRepoT
*
repo
,
const
STableId
*
id
,
int32_t
colId
,
int16_t
type
,
int16_t
bytes
);
char
*
tsdbGetTableName
(
TsdbRepoT
*
repo
,
const
STableId
*
id
);
STableCfg
*
tsdbCreateTableCfgFromMsg
(
SMDCreateTableMsg
*
pMsg
);
int
tsdbCreateTable
(
TsdbRepoT
*
repo
,
STableCfg
*
pCfg
);
int
tsdbDropTable
(
TsdbRepoT
*
pRepo
,
STableId
tableId
);
int
tsdbAlterTable
(
TsdbRepoT
*
repo
,
STableCfg
*
pCfg
);
int
tsdbUpdateTagValue
(
TsdbRepoT
*
repo
,
SUpdateTableTagValMsg
*
pMsg
);
TSKEY
tsdbGetTableLastKey
(
TsdbRepoT
*
repo
,
uint64_t
uid
);
uint32_t
tsdbGetFileInfo
(
TsdbRepoT
*
repo
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
);
uint32_t
tsdbGetFileInfo
(
TsdbRepoT
*
repo
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
);
// the TSDB repository info
typedef
struct
STsdbRepoInfo
{
...
...
src/inc/tsync.h
浏览文件 @
8fa42af9
...
...
@@ -21,6 +21,7 @@ extern "C" {
#endif
#define TAOS_SYNC_MAX_REPLICA 5
#define TAOS_SYNC_MAX_INDEX 0x7FFFFFFF
typedef
enum
_TAOS_SYNC_ROLE
{
TAOS_SYNC_ROLE_OFFLINE
,
...
...
@@ -53,11 +54,16 @@ typedef struct {
uint32_t
nodeId
[
TAOS_SYNC_MAX_REPLICA
];
int
role
[
TAOS_SYNC_MAX_REPLICA
];
}
SNodesRole
;
// if name is empty(name[0] is zero), get the file from index or after, used by master
// if name is provided(name[0] is not zero), get the named file at the specified index, used by unsynced node
// it returns the file magic number and size, if file not there, magic shall be 0.
typedef
uint32_t
(
*
FGetFileInfo
)(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
,
uint64_t
*
fversion
);
/*
if name is empty(name[0] is zero), get the file from index or after, but not larger than eindex. If a file
is found between index and eindex, index shall be updated, name shall be set, size shall be set to
file size, and file magic number shall be returned.
if name is provided(name[0] is not zero), get the named file at the specified index. If not there, return
zero. If it is there, set the size to file size, and return file magic number. Index shall not be updated.
*/
typedef
uint32_t
(
*
FGetFileInfo
)(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
,
uint64_t
*
fversion
);
// get the wal file from index or after
// return value, -1: error, 1:more wal files, 0:last WAL. if name[0]==0, no WAL file
...
...
src/mnode/src/mnodeSdb.c
浏览文件 @
8fa42af9
...
...
@@ -200,7 +200,7 @@ void sdbUpdateMnodeRoles() {
mnodeUpdateMnodeIpSet
();
}
static
uint32_t
sdbGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
,
uint64_t
*
fversion
)
{
static
uint32_t
sdbGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
,
uint64_t
*
fversion
)
{
sdbUpdateMnodeRoles
();
return
0
;
}
...
...
src/mnode/src/mnodeTable.c
浏览文件 @
8fa42af9
...
...
@@ -1352,11 +1352,14 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO
int32_t
tagDataLen
=
0
;
int32_t
totalCols
=
0
;
int32_t
contLen
=
0
;
if
(
pTable
->
info
.
type
==
TSDB_CHILD_TABLE
&&
pMsg
!=
NULL
)
{
pTagData
=
(
STagData
*
)
pMsg
->
schema
;
tagDataLen
=
ntohl
(
pTagData
->
dataLen
);
if
(
pTable
->
info
.
type
==
TSDB_CHILD_TABLE
)
{
totalCols
=
pTable
->
superTable
->
numOfColumns
+
pTable
->
superTable
->
numOfTags
;
contLen
=
sizeof
(
SMDCreateTableMsg
)
+
totalCols
*
sizeof
(
SSchema
)
+
tagDataLen
+
pTable
->
sqlLen
;
if
(
pMsg
!=
NULL
)
{
pTagData
=
(
STagData
*
)
pMsg
->
schema
;
tagDataLen
=
ntohl
(
pTagData
->
dataLen
);
contLen
+=
tagDataLen
;
}
}
else
{
totalCols
=
pTable
->
numOfColumns
;
contLen
=
sizeof
(
SMDCreateTableMsg
)
+
totalCols
*
sizeof
(
SSchema
)
+
pTable
->
sqlLen
;
...
...
@@ -1410,7 +1413,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO
memcpy
(
pCreate
->
data
+
totalCols
*
sizeof
(
SSchema
),
pTagData
->
data
,
tagDataLen
);
}
if
(
pTable
->
info
.
type
==
TSDB_STREAM_TABLE
&&
pMsg
!=
NULL
)
{
if
(
pTable
->
info
.
type
==
TSDB_STREAM_TABLE
)
{
memcpy
(
pCreate
->
data
+
totalCols
*
sizeof
(
SSchema
),
pTable
->
sql
,
pTable
->
sqlLen
);
}
...
...
src/mnode/src/mnodeVgroup.c
浏览文件 @
8fa42af9
...
...
@@ -539,6 +539,8 @@ SMDCreateVnodeMsg *mnodeBuildCreateVnodeMsg(SVgObj *pVgroup) {
SMDCreateVnodeMsg
*
pVnode
=
rpcMallocCont
(
sizeof
(
SMDCreateVnodeMsg
));
if
(
pVnode
==
NULL
)
return
NULL
;
strcpy
(
pVnode
->
db
,
pVgroup
->
dbName
);
SMDVnodeCfg
*
pCfg
=
&
pVnode
->
cfg
;
pCfg
->
vgId
=
htonl
(
pVgroup
->
vgId
);
pCfg
->
cfgVersion
=
htonl
(
pDb
->
cfgVersion
);
...
...
@@ -594,7 +596,7 @@ SRpcIpSet mnodeGetIpSetFromIp(char *ep) {
}
void
mnodeSendCreateVnodeMsg
(
SVgObj
*
pVgroup
,
SRpcIpSet
*
ipSet
,
void
*
ahandle
)
{
mTrace
(
"vgId:%d, send create vnode:%d msg, ahandle:%p
"
,
pVgroup
->
vgId
,
pVgroup
->
vgId
,
ahandl
e
);
mTrace
(
"vgId:%d, send create vnode:%d msg, ahandle:%p
db:%s"
,
pVgroup
->
vgId
,
pVgroup
->
vgId
,
ahandle
,
pVgroup
->
dbNam
e
);
SMDCreateVnodeMsg
*
pCreate
=
mnodeBuildCreateVnodeMsg
(
pVgroup
);
SRpcMsg
rpcMsg
=
{
.
handle
=
ahandle
,
...
...
src/query/src/qExecutor.c
浏览文件 @
8fa42af9
...
...
@@ -2223,24 +2223,26 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
* set tag value in SQLFunctionCtx
* e.g.,tag information into input buffer
*/
static
void
doSetTagValueInParam
(
void
*
tsdb
,
STableId
*
pTableId
,
int32_t
tagColId
,
tVariant
*
param
)
{
tVariantDestroy
(
param
);
char
*
val
=
NULL
;
int16_t
bytes
=
0
;
int16_t
type
=
0
;
static
void
doSetTagValueInParam
(
void
*
tsdb
,
STableId
*
pTableId
,
int32_t
tagColId
,
tVariant
*
tag
,
int16_t
type
,
int16_t
bytes
)
{
tVariantDestroy
(
tag
);
if
(
tagColId
==
TSDB_TBNAME_COLUMN_INDEX
)
{
val
=
tsdbGetTableName
(
tsdb
,
pTableId
,
&
bytes
);
type
=
TSDB_DATA_TYPE_BINARY
;
tVariantCreateFromBinary
(
param
,
varDataVal
(
val
),
varDataLen
(
val
),
type
);
char
*
val
=
tsdbGetTableName
(
tsdb
,
pTableId
);
assert
(
val
!=
NULL
);
tVariantCreateFromBinary
(
tag
,
varDataVal
(
val
),
varDataLen
(
val
),
TSDB_DATA_TYPE_BINARY
);
}
else
{
tsdbGetTableTagVal
(
tsdb
,
pTableId
,
tagColId
,
&
type
,
&
bytes
,
&
val
);
char
*
val
=
tsdbGetTableTagVal
(
tsdb
,
pTableId
,
tagColId
,
type
,
bytes
);
if
(
val
==
NULL
)
{
tag
->
nType
=
TSDB_DATA_TYPE_NULL
;
return
;
}
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
tVariantCreateFromBinary
(
param
,
varDataVal
(
val
),
varDataLen
(
val
),
type
);
tVariantCreateFromBinary
(
tag
,
varDataVal
(
val
),
varDataLen
(
val
),
type
);
}
else
{
tVariantCreateFromBinary
(
param
,
val
,
bytes
,
type
);
tVariantCreateFromBinary
(
tag
,
val
,
bytes
,
type
);
}
}
}
...
...
@@ -2248,25 +2250,29 @@ static void doSetTagValueInParam(void *tsdb, STableId* pTableId, int32_t tagColI
void
setTagVal
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
STableId
*
pTableId
,
void
*
tsdb
)
{
SQuery
*
pQuery
=
pRuntimeEnv
->
pQuery
;
SSqlFuncMsg
*
pFuncMsg
=
&
pQuery
->
pSelectExpr
[
0
].
base
;
if
(
pQuery
->
numOfOutput
==
1
&&
pFuncMsg
->
functionId
==
TSDB_FUNC_TS_COMP
)
{
assert
(
pFuncMsg
->
numOfParams
==
1
);
doSetTagValueInParam
(
tsdb
,
pTableId
,
pFuncMsg
->
arg
->
argValue
.
i64
,
&
pRuntimeEnv
->
pCtx
[
0
].
tag
);
SExprInfo
*
pExprInfo
=
&
pQuery
->
pSelectExpr
[
0
];
if
(
pQuery
->
numOfOutput
==
1
&&
pExprInfo
->
base
.
functionId
==
TSDB_FUNC_TS_COMP
)
{
assert
(
pExprInfo
->
base
.
numOfParams
==
1
);
doSetTagValueInParam
(
tsdb
,
pTableId
,
pExprInfo
->
base
.
arg
->
argValue
.
i64
,
&
pRuntimeEnv
->
pCtx
[
0
].
tag
,
pExprInfo
->
type
,
pExprInfo
->
bytes
);
}
else
{
// set tag value, by which the results are aggregated.
for
(
int32_t
idx
=
0
;
idx
<
pQuery
->
numOfOutput
;
++
idx
)
{
S
ColIndex
*
pCol
=
&
pQuery
->
pSelectExpr
[
idx
].
base
.
colInfo
;
S
ExprInfo
*
pExprInfo
=
&
pQuery
->
pSelectExpr
[
idx
]
;
// ts_comp column required the tag value for join filter
if
(
!
TSDB_COL_IS_TAG
(
p
Col
->
flag
))
{
if
(
!
TSDB_COL_IS_TAG
(
p
ExprInfo
->
base
.
colInfo
.
flag
))
{
continue
;
}
// todo use tag column index to optimize performance
doSetTagValueInParam
(
tsdb
,
pTableId
,
pCol
->
colId
,
&
pRuntimeEnv
->
pCtx
[
idx
].
tag
);
doSetTagValueInParam
(
tsdb
,
pTableId
,
pExprInfo
->
base
.
colInfo
.
colId
,
&
pRuntimeEnv
->
pCtx
[
idx
].
tag
,
pExprInfo
->
type
,
pExprInfo
->
bytes
);
}
// set the join tag for first column
SSqlFuncMsg
*
pFuncMsg
=
&
pExprInfo
->
base
;
if
(
pFuncMsg
->
functionId
==
TSDB_FUNC_TS
&&
pFuncMsg
->
colInfo
.
colIndex
==
PRIMARYKEY_TIMESTAMP_COL_INDEX
&&
pRuntimeEnv
->
pTSBuf
!=
NULL
)
{
assert
(
pFuncMsg
->
numOfParams
==
1
);
...
...
@@ -5996,7 +6002,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
num
=
taosArrayGetSize
(
pa
);
assert
(
num
==
pQInfo
->
groupInfo
.
numOfTables
);
int16_t
type
,
bytes
;
//
int16_t type, bytes;
int32_t
functionId
=
pQuery
->
pSelectExpr
[
0
].
base
.
functionId
;
if
(
functionId
==
TSDB_FUNC_TID_TAG
)
{
// return the tags & table Id
...
...
@@ -6004,7 +6010,6 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
SExprInfo
*
pExprInfo
=
&
pQuery
->
pSelectExpr
[
0
];
int32_t
rsize
=
pExprInfo
->
bytes
;
char
*
data
=
NULL
;
for
(
int32_t
i
=
0
;
i
<
num
;
++
i
)
{
SGroupItem
*
item
=
taosArrayGet
(
pa
,
i
);
...
...
@@ -6022,8 +6027,25 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
*
(
int32_t
*
)
output
=
pQInfo
->
vgId
;
output
+=
sizeof
(
pQInfo
->
vgId
);
tsdbGetTableTagVal
(
pQInfo
->
tsdb
,
&
item
->
id
,
pExprInfo
->
base
.
colInfo
.
colId
,
&
type
,
&
bytes
,
&
data
);
memcpy
(
output
,
data
,
bytes
);
int16_t
bytes
=
pExprInfo
->
bytes
;
int16_t
type
=
pExprInfo
->
type
;
char
*
val
=
tsdbGetTableTagVal
(
pQInfo
->
tsdb
,
&
item
->
id
,
pExprInfo
->
base
.
colInfo
.
colId
,
type
,
bytes
);
// todo refactor
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
if
(
val
==
NULL
)
{
setVardataNull
(
output
,
type
);
}
else
{
memcpy
(
output
,
val
,
varDataTLen
(
val
));
}
}
else
{
if
(
val
==
NULL
)
{
setNull
(
output
,
type
,
bytes
);
}
else
{
memcpy
(
output
,
val
,
bytes
);
}
}
}
qTrace
(
"QInfo:%p create (tableId, tag) info completed, rows:%d"
,
pQInfo
,
num
);
...
...
@@ -6032,23 +6054,32 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
SExprInfo
*
pExprInfo
=
pQuery
->
pSelectExpr
;
SGroupItem
*
item
=
taosArrayGet
(
pa
,
i
);
char
*
data
=
NULL
;
for
(
int32_t
j
=
0
;
j
<
pQuery
->
numOfOutput
;
++
j
)
{
// todo check the return value, refactor codes
if
(
pExprInfo
[
j
].
base
.
colInfo
.
colId
==
TSDB_TBNAME_COLUMN_INDEX
)
{
data
=
tsdbGetTableName
(
pQInfo
->
tsdb
,
&
item
->
id
,
&
bytes
);
char
*
data
=
tsdbGetTableName
(
pQInfo
->
tsdb
,
&
item
->
id
);
char
*
dst
=
pQuery
->
sdata
[
j
]
->
data
+
i
*
(
TSDB_TABLE_NAME_LEN
+
VARSTR_HEADER_SIZE
);
memcpy
(
dst
,
data
,
varDataTLen
(
data
));
}
else
{
// todo refactor, return the true length of binary|nchar data
tsdbGetTableTagVal
(
pQInfo
->
tsdb
,
&
item
->
id
,
pExprInfo
[
j
].
base
.
colInfo
.
colId
,
&
type
,
&
bytes
,
&
data
);
assert
(
bytes
<=
pExprInfo
[
j
].
bytes
&&
type
==
pExprInfo
[
j
].
type
);
}
else
{
// todo refactor
int16_t
type
=
pExprInfo
[
j
].
type
;
int16_t
bytes
=
pExprInfo
[
j
].
bytes
;
char
*
data
=
tsdbGetTableTagVal
(
pQInfo
->
tsdb
,
&
item
->
id
,
pExprInfo
[
j
].
base
.
colInfo
.
colId
,
type
,
bytes
);
char
*
dst
=
pQuery
->
sdata
[
j
]
->
data
+
i
*
pExprInfo
[
j
].
bytes
;
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
memcpy
(
dst
,
data
,
varDataTLen
(
data
));
if
(
data
==
NULL
)
{
setVardataNull
(
dst
,
type
);
}
else
{
memcpy
(
dst
,
data
,
varDataTLen
(
data
));
}
}
else
{
memcpy
(
dst
,
data
,
bytes
);
if
(
data
==
NULL
)
{
setNull
(
dst
,
type
,
bytes
);
}
else
{
memcpy
(
dst
,
data
,
pExprInfo
[
j
].
bytes
);
}
}
}
}
...
...
src/rpc/src/rpcMain.c
浏览文件 @
8fa42af9
...
...
@@ -218,9 +218,9 @@ void *rpcOpen(const SRpcInit *pInit) {
pRpc
->
localPort
=
pInit
->
localPort
;
pRpc
->
afp
=
pInit
->
afp
;
pRpc
->
sessions
=
pInit
->
sessions
+
1
;
if
(
pInit
->
user
)
strcpy
(
pRpc
->
user
,
pInit
->
user
);
if
(
pInit
->
secret
)
strcpy
(
pRpc
->
secret
,
pInit
->
secret
);
if
(
pInit
->
ckey
)
strcpy
(
pRpc
->
ckey
,
pInit
->
ckey
);
if
(
pInit
->
user
)
tstrncpy
(
pRpc
->
user
,
pInit
->
user
,
sizeof
(
pRpc
->
user
)
);
if
(
pInit
->
secret
)
memcpy
(
pRpc
->
secret
,
pInit
->
secret
,
sizeof
(
pRpc
->
secret
)
);
if
(
pInit
->
ckey
)
tstrncpy
(
pRpc
->
ckey
,
pInit
->
ckey
,
sizeof
(
pRpc
->
ckey
)
);
pRpc
->
spi
=
pInit
->
spi
;
pRpc
->
cfp
=
pInit
->
cfp
;
pRpc
->
afp
=
pInit
->
afp
;
...
...
@@ -434,7 +434,8 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
}
void
rpcSendRedirectRsp
(
void
*
thandle
,
const
SRpcIpSet
*
pIpSet
)
{
SRpcMsg
rpcMsg
;
SRpcMsg
rpcMsg
;
memset
(
&
rpcMsg
,
0
,
sizeof
(
rpcMsg
));
rpcMsg
.
contLen
=
sizeof
(
SRpcIpSet
);
rpcMsg
.
pCont
=
rpcMallocCont
(
rpcMsg
.
contLen
);
...
...
src/rpc/src/rpcTcp.c
浏览文件 @
8fa42af9
...
...
@@ -253,12 +253,14 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
if
(
pthread_mutex_init
(
&
(
pThreadObj
->
mutex
),
NULL
)
<
0
)
{
tError
(
"%s failed to init TCP client mutex(%s)"
,
label
,
strerror
(
errno
));
free
(
pThreadObj
);
return
NULL
;
}
pThreadObj
->
pollFd
=
epoll_create
(
10
);
// size does not matter
if
(
pThreadObj
->
pollFd
<
0
)
{
tError
(
"%s failed to create TCP client epoll"
,
label
);
free
(
pThreadObj
);
return
NULL
;
}
...
...
@@ -269,6 +271,8 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
int
code
=
pthread_create
(
&
(
pThreadObj
->
thread
),
&
thattr
,
taosProcessTcpData
,
(
void
*
)(
pThreadObj
));
pthread_attr_destroy
(
&
thattr
);
if
(
code
!=
0
)
{
close
(
pThreadObj
->
pollFd
);
free
(
pThreadObj
);
tError
(
"%s failed to create TCP read data thread(%s)"
,
label
,
strerror
(
errno
));
return
NULL
;
}
...
...
@@ -292,7 +296,7 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
SThreadObj
*
pThreadObj
=
shandle
;
int
fd
=
taosOpenTcpClientSocket
(
ip
,
port
,
pThreadObj
->
ip
);
if
(
fd
<
=
0
)
return
NULL
;
if
(
fd
<
0
)
return
NULL
;
SFdObj
*
pFdObj
=
taosMallocFdObj
(
pThreadObj
,
fd
);
...
...
src/rpc/src/rpcUdp.c
浏览文件 @
8fa42af9
...
...
@@ -192,7 +192,7 @@ static void *taosRecvUdpData(void *param) {
char
*
tmsg
=
malloc
(
dataLen
+
tsRpcOverhead
);
if
(
NULL
==
tmsg
)
{
tError
(
"%s failed to allocate memory, size:%d"
,
pConn
->
label
,
dataLen
);
tError
(
"%s failed to allocate memory, size:%
l
d"
,
pConn
->
label
,
dataLen
);
continue
;
}
...
...
src/rpc/test/rclient.c
浏览文件 @
8fa42af9
...
...
@@ -14,6 +14,7 @@
*/
#include "os.h"
#include "tutil.h"
#include "tglobal.h"
#include "rpcLog.h"
#include "trpc.h"
...
...
@@ -105,7 +106,7 @@ int main(int argc, char *argv[]) {
if
(
strcmp
(
argv
[
i
],
"-p"
)
==
0
&&
i
<
argc
-
1
)
{
ipSet
.
port
[
0
]
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-i"
)
==
0
&&
i
<
argc
-
1
)
{
strcpy
(
ipSet
.
fqdn
[
0
],
argv
[
++
i
]
);
tstrncpy
(
ipSet
.
fqdn
[
0
],
argv
[
++
i
],
sizeof
(
ipSet
.
fqdn
)
);
}
else
if
(
strcmp
(
argv
[
i
],
"-t"
)
==
0
&&
i
<
argc
-
1
)
{
rpcInit
.
numOfThreads
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-m"
)
==
0
&&
i
<
argc
-
1
)
{
...
...
src/rpc/test/rsclient.c
浏览文件 @
8fa42af9
...
...
@@ -15,6 +15,7 @@
#include "os.h"
#include "tutil.h"
#include "tglobal.h"
#include "rpcLog.h"
#include "trpc.h"
...
...
@@ -106,7 +107,7 @@ int main(int argc, char *argv[]) {
if
(
strcmp
(
argv
[
i
],
"-p"
)
==
0
&&
i
<
argc
-
1
)
{
ipSet
.
port
[
0
]
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-i"
)
==
0
&&
i
<
argc
-
1
)
{
strcpy
(
ipSet
.
fqdn
[
0
],
argv
[
++
i
]
);
tstrncpy
(
ipSet
.
fqdn
[
0
],
argv
[
++
i
],
sizeof
(
ipSet
.
fqdn
[
0
])
);
}
else
if
(
strcmp
(
argv
[
i
],
"-t"
)
==
0
&&
i
<
argc
-
1
)
{
rpcInit
.
numOfThreads
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-m"
)
==
0
&&
i
<
argc
-
1
)
{
...
...
src/rpc/test/rserver.c
浏览文件 @
8fa42af9
...
...
@@ -69,6 +69,7 @@ void processShellMsg() {
taosGetQitem
(
qall
,
&
type
,
(
void
**
)
&
pRpcMsg
);
rpcFreeCont
(
pRpcMsg
->
pCont
);
memset
(
&
rpcMsg
,
0
,
sizeof
(
rpcMsg
));
rpcMsg
.
pCont
=
rpcMallocCont
(
msgSize
);
rpcMsg
.
contLen
=
msgSize
;
rpcMsg
.
handle
=
pRpcMsg
->
handle
;
...
...
src/tsdb/inc/tsdbMain.h
浏览文件 @
8fa42af9
...
...
@@ -69,12 +69,13 @@ typedef struct {
}
SMemTable
;
// ---------- TSDB TABLE DEFINITION
#define TSDB_MAX_TABLE_SCHEMAS 16
typedef
struct
STable
{
int8_t
type
;
STableId
tableId
;
uint64_t
superUid
;
// Super table UID
int
32_t
sversion
;
STSchema
*
schema
;
int
16_t
numOfSchemas
;
STSchema
*
*
schema
;
STSchema
*
tagSchema
;
SKVRow
tagVal
;
SMemTable
*
mem
;
...
...
@@ -122,7 +123,6 @@ typedef struct STableIndexElem {
STsdbMeta
*
tsdbInitMeta
(
char
*
rootDir
,
int32_t
maxTables
,
void
*
pRepo
);
int32_t
tsdbFreeMeta
(
STsdbMeta
*
pMeta
);
STSchema
*
tsdbGetTableSchema
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
STSchema
*
tsdbGetTableTagSchema
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
// ---- Operation on STable
...
...
@@ -502,11 +502,20 @@ int tsdbWriteCompInfo(SRWHelper *pHelper);
int
tsdbWriteCompIdx
(
SRWHelper
*
pHelper
);
// --------- Other functions need to further organize
void
tsdbFitRetention
(
STsdbRepo
*
pRepo
);
int
tsdbAlterCacheTotalBlocks
(
STsdbRepo
*
pRepo
,
int
totalBlocks
);
void
tsdbAdjustCacheBlocks
(
STsdbCache
*
pCache
);
int32_t
tsdbGetMetaFileName
(
char
*
rootDir
,
char
*
fname
);
int
tsdbUpdateFileHeader
(
SFile
*
pFile
,
uint32_t
version
);
void
tsdbFitRetention
(
STsdbRepo
*
pRepo
);
int
tsdbAlterCacheTotalBlocks
(
STsdbRepo
*
pRepo
,
int
totalBlocks
);
void
tsdbAdjustCacheBlocks
(
STsdbCache
*
pCache
);
int32_t
tsdbGetMetaFileName
(
char
*
rootDir
,
char
*
fname
);
int
tsdbUpdateFileHeader
(
SFile
*
pFile
,
uint32_t
version
);
int
tsdbUpdateTable
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
STableCfg
*
pCfg
);
int
tsdbRemoveTableFromIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
int
tsdbAddTableIntoIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
STSchema
*
tsdbGetTableSchemaByVersion
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
int16_t
version
);
STSchema
*
tsdbGetTableSchema
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
#define DEFAULT_TAG_INDEX_COLUMN 0 // skip list built based on the first column of tags
int
compFGroupKey
(
const
void
*
key
,
const
void
*
fgroup
);
#ifdef __cplusplus
}
...
...
src/tsdb/src/tsdbFile.c
浏览文件 @
8fa42af9
...
...
@@ -35,7 +35,6 @@ const char *tsdbFileSuffix[] = {
".last"
// TSDB_FILE_TYPE_LAST
};
static
int
compFGroupKey
(
const
void
*
key
,
const
void
*
fgroup
);
static
int
compFGroup
(
const
void
*
arg1
,
const
void
*
arg2
);
static
int
tsdbOpenFGroup
(
STsdbFileH
*
pFileH
,
char
*
dataDir
,
int
fid
);
...
...
@@ -285,7 +284,7 @@ int tsdbCopyBlockDataInFile(SFile *pOutFile, SFile *pInFile, SCompInfo *pCompInf
return
0
;
}
static
int
compFGroupKey
(
const
void
*
key
,
const
void
*
fgroup
)
{
int
compFGroupKey
(
const
void
*
key
,
const
void
*
fgroup
)
{
int
fid
=
*
(
int
*
)
key
;
SFileGroup
*
pFGroup
=
(
SFileGroup
*
)
fgroup
;
if
(
fid
==
pFGroup
->
fileId
)
{
...
...
src/tsdb/src/tsdbMain.c
浏览文件 @
8fa42af9
...
...
@@ -410,6 +410,61 @@ int tsdbAlterTable(TsdbRepoT *pRepo, STableCfg *pCfg) {
return
0
;
}
int
tsdbUpdateTagValue
(
TsdbRepoT
*
repo
,
SUpdateTableTagValMsg
*
pMsg
)
{
STsdbRepo
*
pRepo
=
(
STsdbRepo
*
)
repo
;
STsdbMeta
*
pMeta
=
pRepo
->
tsdbMeta
;
int16_t
tversion
=
htons
(
pMsg
->
tversion
);
STable
*
pTable
=
tsdbGetTableByUid
(
pMeta
,
htobe64
(
pMsg
->
uid
));
if
(
pTable
==
NULL
)
return
TSDB_CODE_INVALID_TABLE_ID
;
if
(
pTable
->
tableId
.
tid
!=
htonl
(
pMsg
->
tid
))
return
TSDB_CODE_INVALID_TABLE_ID
;
if
(
pTable
->
type
!=
TSDB_CHILD_TABLE
)
{
tsdbError
(
"vgId:%d failed to update tag value of table %s since its type is %d"
,
pRepo
->
config
.
tsdbId
,
varDataVal
(
pTable
->
name
),
pTable
->
type
);
return
TSDB_CODE_INVALID_TABLE_TYPE
;
}
if
(
schemaVersion
(
tsdbGetTableTagSchema
(
pMeta
,
pTable
))
<
tversion
)
{
tsdbTrace
(
"vgId:%d server tag version %d is older than client tag version %d, try to config"
,
pRepo
->
config
.
tsdbId
,
schemaVersion
(
tsdbGetTableTagSchema
(
pMeta
,
pTable
)),
tversion
);
void
*
msg
=
(
*
pRepo
->
appH
.
configFunc
)(
pRepo
->
config
.
tsdbId
,
htonl
(
pMsg
->
tid
));
if
(
msg
==
NULL
)
{
return
terrno
;
}
// Deal with error her
STableCfg
*
pTableCfg
=
tsdbCreateTableCfgFromMsg
(
msg
);
STable
*
super
=
tsdbGetTableByUid
(
pMeta
,
pTableCfg
->
superUid
);
ASSERT
(
super
!=
NULL
);
int32_t
code
=
tsdbUpdateTable
(
pMeta
,
super
,
pTableCfg
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
tsdbClearTableCfg
(
pTableCfg
);
rpcFreeCont
(
msg
);
}
STSchema
*
pTagSchema
=
tsdbGetTableTagSchema
(
pMeta
,
pTable
);
if
(
schemaVersion
(
pTagSchema
)
>
tversion
)
{
tsdbError
(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version:%d server tag "
"version:%d"
,
pRepo
->
config
.
tsdbId
,
varDataVal
(
pTable
->
name
),
tversion
,
schemaVersion
(
pTable
->
tagSchema
));
return
TSDB_CODE_TAG_VER_OUT_OF_DATE
;
}
if
(
schemaColAt
(
pTagSchema
,
DEFAULT_TAG_INDEX_COLUMN
)
->
colId
==
htons
(
pMsg
->
colId
))
{
tsdbRemoveTableFromIndex
(
pMeta
,
pTable
);
}
// TODO: remove table from index if it is the first column of tag
tdSetKVRowDataOfCol
(
&
pTable
->
tagVal
,
htons
(
pMsg
->
colId
),
htons
(
pMsg
->
type
),
pMsg
->
data
);
if
(
schemaColAt
(
pTagSchema
,
DEFAULT_TAG_INDEX_COLUMN
)
->
colId
==
htons
(
pMsg
->
colId
))
{
tsdbAddTableIntoIndex
(
pMeta
,
pTable
);
}
return
TSDB_CODE_SUCCESS
;
}
TSKEY
tsdbGetTableLastKey
(
TsdbRepoT
*
repo
,
uint64_t
uid
)
{
STsdbRepo
*
pRepo
=
(
STsdbRepo
*
)
repo
;
...
...
@@ -559,12 +614,15 @@ int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup) {
}
void
tsdbClearTableCfg
(
STableCfg
*
config
)
{
if
(
config
->
schema
)
tdFreeSchema
(
config
->
schema
);
if
(
config
->
tagSchema
)
tdFreeSchema
(
config
->
tagSchema
);
if
(
config
->
tagValues
)
kvRowFree
(
config
->
tagValues
);
tfree
(
config
->
name
);
tfree
(
config
->
sname
);
tfree
(
config
->
sql
);
if
(
config
)
{
if
(
config
->
schema
)
tdFreeSchema
(
config
->
schema
);
if
(
config
->
tagSchema
)
tdFreeSchema
(
config
->
tagSchema
);
if
(
config
->
tagValues
)
kvRowFree
(
config
->
tagValues
);
tfree
(
config
->
name
);
tfree
(
config
->
sname
);
tfree
(
config
->
sql
);
free
(
config
);
}
}
int
tsdbInitSubmitBlkIter
(
SSubmitBlk
*
pBlock
,
SSubmitBlkIter
*
pIter
)
{
...
...
@@ -883,6 +941,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
static
int32_t
tsdbInsertDataToTable
(
TsdbRepoT
*
repo
,
SSubmitBlk
*
pBlock
,
TSKEY
now
,
int32_t
*
affectedrows
)
{
STsdbRepo
*
pRepo
=
(
STsdbRepo
*
)
repo
;
STsdbMeta
*
pMeta
=
pRepo
->
tsdbMeta
;
STableId
tableId
=
{.
uid
=
pBlock
->
uid
,
.
tid
=
pBlock
->
tid
};
STable
*
pTable
=
tsdbIsValidTableToInsert
(
pRepo
->
tsdbMeta
,
tableId
);
...
...
@@ -892,6 +951,39 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
return
TSDB_CODE_INVALID_TABLE_ID
;
}
// Check schema version
int32_t
tversion
=
pBlock
->
sversion
;
int16_t
nversion
=
schemaVersion
(
tsdbGetTableSchema
(
pMeta
,
pTable
));
if
(
tversion
>
nversion
)
{
tsdbTrace
(
"vgId:%d table:%s tid:%d server schema version %d is older than clien version %d, try to config."
,
pRepo
->
config
.
tsdbId
,
varDataVal
(
pTable
->
name
),
pTable
->
tableId
.
tid
,
nversion
,
tversion
);
void
*
msg
=
(
*
pRepo
->
appH
.
configFunc
)(
pRepo
->
config
.
tsdbId
,
pTable
->
tableId
.
tid
);
if
(
msg
==
NULL
)
{
return
terrno
;
}
// Deal with error her
STableCfg
*
pTableCfg
=
tsdbCreateTableCfgFromMsg
(
msg
);
STable
*
pTableUpdate
=
NULL
;
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
pTableUpdate
=
tsdbGetTableByUid
(
pMeta
,
pTableCfg
->
superUid
);
}
else
{
pTableUpdate
=
pTable
;
}
int32_t
code
=
tsdbUpdateTable
(
pMeta
,
pTableUpdate
,
pTableCfg
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
code
;
}
tsdbClearTableCfg
(
pTableCfg
);
rpcFreeCont
(
msg
);
}
else
{
if
(
tsdbGetTableSchemaByVersion
(
pMeta
,
pTable
,
tversion
)
==
NULL
)
{
tsdbError
(
"vgId:%d table:%s tid:%d invalid schema version %d from client"
,
pRepo
->
config
.
tsdbId
,
varDataVal
(
pTable
->
name
),
pTable
->
tableId
.
tid
,
tversion
);
return
TSDB_CODE_TABLE_SCHEMA_VERSION
;
}
}
SSubmitBlkIter
blkIter
=
{
0
};
SDataRow
row
=
NULL
;
...
...
@@ -916,9 +1008,10 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
return
TSDB_CODE_SUCCESS
;
}
static
int
tsdbReadRowsFromCache
(
SSkipListIterator
*
pIter
,
TSKEY
maxKey
,
int
maxRowsToRead
,
SDataCols
*
pCols
)
{
static
int
tsdbReadRowsFromCache
(
S
TsdbMeta
*
pMeta
,
STable
*
pTable
,
S
SkipListIterator
*
pIter
,
TSKEY
maxKey
,
int
maxRowsToRead
,
SDataCols
*
pCols
)
{
ASSERT
(
maxRowsToRead
>
0
);
if
(
pIter
==
NULL
)
return
0
;
STSchema
*
pSchema
=
NULL
;
int
numOfRows
=
0
;
...
...
@@ -931,7 +1024,15 @@ static int tsdbReadRowsFromCache(SSkipListIterator *pIter, TSKEY maxKey, int max
SDataRow
row
=
SL_GET_NODE_DATA
(
node
);
if
(
dataRowKey
(
row
)
>
maxKey
)
break
;
tdAppendDataRowToDataCol
(
row
,
pCols
);
if
(
pSchema
==
NULL
||
schemaVersion
(
pSchema
)
!=
dataRowVersion
(
row
))
{
pSchema
=
tsdbGetTableSchemaByVersion
(
pMeta
,
pTable
,
dataRowVersion
(
row
));
if
(
pSchema
==
NULL
)
{
// TODO: deal with the error here
ASSERT
(
false
);
}
}
tdAppendDataRowToDataCol
(
row
,
pSchema
,
pCols
);
numOfRows
++
;
}
while
(
tSkipListIterNext
(
pIter
));
...
...
@@ -1081,7 +1182,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters
int
maxRowsToRead
=
pCfg
->
maxRowsPerFileBlock
*
4
/
5
;
int
nLoop
=
0
;
while
(
true
)
{
int
rowsRead
=
tsdbReadRowsFromCache
(
pIter
,
maxKey
,
maxRowsToRead
,
pDataCols
);
int
rowsRead
=
tsdbReadRowsFromCache
(
p
Meta
,
pTable
,
p
Iter
,
maxKey
,
maxRowsToRead
,
pDataCols
);
assert
(
rowsRead
>=
0
);
if
(
pDataCols
->
numOfRows
==
0
)
break
;
nLoop
++
;
...
...
@@ -1199,46 +1300,71 @@ static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
tsdbTrace
(
"vgId:%d, tsdb maxTables is changed from %d to %d!"
,
pRepo
->
config
.
tsdbId
,
oldMaxTables
,
maxTables
);
}
uint32_t
tsdbGetFileInfo
(
TsdbRepoT
*
repo
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
)
{
// TODO: need to refactor this function
#define TSDB_META_FILE_INDEX 10000000
uint32_t
tsdbGetFileInfo
(
TsdbRepoT
*
repo
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
)
{
STsdbRepo
*
pRepo
=
(
STsdbRepo
*
)
repo
;
// STsdbMeta *pMeta = pRepo->tsdbMeta;
STsdbFileH
*
pFileH
=
pRepo
->
tsdbFileH
;
uint32_t
magic
=
0
;
char
fname
[
256
]
=
"
\0
"
;
uint32_t
magic
=
0
;
char
fname
[
256
]
=
"
\0
"
;
struct
stat
fState
;
char
*
spath
=
strdup
(
pRepo
->
rootDir
);
char
*
prefixDir
=
dirname
(
spath
);
if
(
name
[
0
]
==
0
)
{
// Map index to the file name
tsdbTrace
(
"vgId:%d name:%s index:%d eindex:%d"
,
pRepo
->
config
.
tsdbId
,
name
,
*
index
,
eindex
);
ASSERT
(
*
index
<=
eindex
);
char
*
sdup
=
strdup
(
pRepo
->
rootDir
);
char
*
prefix
=
dirname
(
sdup
);
if
(
name
[
0
]
==
0
)
{
// get the file from index or after, but not larger than eindex
int
fid
=
(
*
index
)
/
3
;
if
(
fid
>=
pFileH
->
numOfFGroups
)
{
// return meta data file
if
((
*
index
)
%
3
>
0
)
{
// it is finished
tfree
(
spath
);
if
(
pFileH
->
numOfFGroups
==
0
||
fid
>
pFileH
->
fGroup
[
pFileH
->
numOfFGroups
-
1
].
fileId
)
{
if
(
*
index
<=
TSDB_META_FILE_INDEX
&&
TSDB_META_FILE_INDEX
<=
eindex
)
{
tsdbGetMetaFileName
(
pRepo
->
rootDir
,
fname
);
*
index
=
TSDB_META_FILE_INDEX
;
}
else
{
tfree
(
sdup
);
return
0
;
}
}
else
{
SFileGroup
*
pFGroup
=
taosbsearch
(
&
fid
,
pFileH
->
fGroup
,
pFileH
->
numOfFGroups
,
sizeof
(
SFileGroup
),
compFGroupKey
,
TD_GE
);
if
(
pFGroup
->
fileId
==
fid
)
{
strcpy
(
fname
,
pFGroup
->
files
[(
*
index
)
%
3
].
fname
);
}
else
{
tsdbGetMetaFileName
(
pRepo
->
rootDir
,
fname
);
if
(
pFGroup
->
fileId
*
3
+
2
<
eindex
)
{
strcpy
(
fname
,
pFGroup
->
files
[
0
].
fname
);
*
index
=
pFGroup
->
fileId
*
3
;
}
else
{
tfree
(
sdup
);
return
0
;
}
}
}
strcpy
(
name
,
fname
+
strlen
(
prefix
));
}
else
{
// get the named file at the specified index. If not there, return 0
if
(
*
index
==
TSDB_META_FILE_INDEX
)
{
// get meta file
tsdbGetMetaFileName
(
pRepo
->
rootDir
,
fname
);
}
else
{
// return data file name
strcpy
(
fname
,
pFileH
->
fGroup
[
fid
].
files
[(
*
index
)
%
3
].
fname
);
int
fid
=
(
*
index
)
/
3
;
SFileGroup
*
pFGroup
=
tsdbSearchFGroup
(
pFileH
,
fid
);
if
(
pFGroup
==
NULL
)
{
// not found
tfree
(
sdup
);
return
0
;
}
SFile
*
pFile
=
&
pFGroup
->
files
[(
*
index
)
%
3
];
strcpy
(
fname
,
pFile
->
fname
);
}
strcpy
(
name
,
fname
+
strlen
(
spath
));
}
else
{
// Name is provided, need to get the file info
sprintf
(
fname
,
"%s/%s"
,
prefixDir
,
name
);
}
if
(
stat
(
fname
,
&
fState
)
<
0
)
{
tfree
(
s
path
);
tfree
(
s
dup
);
return
0
;
}
tfree
(
sdup
);
*
size
=
fState
.
st_size
;
magic
=
*
size
;
...
...
src/tsdb/src/tsdbMeta.c
浏览文件 @
8fa42af9
...
...
@@ -8,13 +8,10 @@
#define TSDB_SUPER_TABLE_SL_LEVEL 5 // TODO: may change here
// #define TSDB_META_FILE_NAME "META"
const
int32_t
DEFAULT_TAG_INDEX_COLUMN
=
0
;
// skip list built based on the first column of tags
static
int
tsdbFreeTable
(
STable
*
pTable
);
static
int32_t
tsdbCheckTableCfg
(
STableCfg
*
pCfg
);
static
int
tsdbAddTableToMeta
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
bool
addIdx
);
static
int
tsdbAddTableIntoIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
static
int
tsdbRemoveTableFromIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
);
static
int
tsdbRemoveTableFromMeta
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
bool
rmFromIdx
);
/**
...
...
@@ -37,19 +34,24 @@ void tsdbEncodeTable(STable *pTable, char *buf, int *contLen) {
ptr
=
(
char
*
)
ptr
+
sizeof
(
int
);
memcpy
(
ptr
,
varDataVal
(
pTable
->
name
),
varDataLen
(
pTable
->
name
));
ptr
=
(
char
*
)
ptr
+
varDataLen
(
pTable
->
name
);
T_APPEND_MEMBER
(
ptr
,
&
(
pTable
->
tableId
),
STableId
,
uid
);
T_APPEND_MEMBER
(
ptr
,
&
(
pTable
->
tableId
),
STableId
,
tid
);
T_APPEND_MEMBER
(
ptr
,
pTable
,
STable
,
superUid
);
T_APPEND_MEMBER
(
ptr
,
pTable
,
STable
,
sversion
);
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
)
{
ptr
=
tdEncodeSchema
(
ptr
,
pTable
->
schema
);
T_APPEND_MEMBER
(
ptr
,
pTable
,
STable
,
numOfSchemas
);
for
(
int
i
=
0
;
i
<
pTable
->
numOfSchemas
;
i
++
)
{
ptr
=
tdEncodeSchema
(
ptr
,
pTable
->
schema
[
i
]);
}
ptr
=
tdEncodeSchema
(
ptr
,
pTable
->
tagSchema
);
}
else
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
ptr
=
tdEncodeKVRow
(
ptr
,
pTable
->
tagVal
);
}
else
{
ptr
=
tdEncodeSchema
(
ptr
,
pTable
->
schema
);
T_APPEND_MEMBER
(
ptr
,
pTable
,
STable
,
numOfSchemas
);
for
(
int
i
=
0
;
i
<
pTable
->
numOfSchemas
;
i
++
)
{
ptr
=
tdEncodeSchema
(
ptr
,
pTable
->
schema
[
i
]);
}
}
if
(
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
...
...
@@ -72,6 +74,11 @@ void tsdbEncodeTable(STable *pTable, char *buf, int *contLen) {
STable
*
tsdbDecodeTable
(
void
*
cont
,
int
contLen
)
{
STable
*
pTable
=
(
STable
*
)
calloc
(
1
,
sizeof
(
STable
));
if
(
pTable
==
NULL
)
return
NULL
;
pTable
->
schema
=
(
STSchema
**
)
malloc
(
sizeof
(
STSchema
*
)
*
TSDB_MAX_TABLE_SCHEMAS
);
if
(
pTable
->
schema
==
NULL
)
{
free
(
pTable
);
return
NULL
;
}
void
*
ptr
=
cont
;
T_READ_MEMBER
(
ptr
,
int8_t
,
pTable
->
type
);
...
...
@@ -79,29 +86,34 @@ STable *tsdbDecodeTable(void *cont, int contLen) {
ptr
=
(
char
*
)
ptr
+
sizeof
(
int
);
pTable
->
name
=
calloc
(
1
,
len
+
VARSTR_HEADER_SIZE
+
1
);
if
(
pTable
->
name
==
NULL
)
return
NULL
;
varDataSetLen
(
pTable
->
name
,
len
);
memcpy
(
pTable
->
name
->
data
,
ptr
,
len
);
ptr
=
(
char
*
)
ptr
+
len
;
T_READ_MEMBER
(
ptr
,
uint64_t
,
pTable
->
tableId
.
uid
);
T_READ_MEMBER
(
ptr
,
int32_t
,
pTable
->
tableId
.
tid
);
T_READ_MEMBER
(
ptr
,
uint64_t
,
pTable
->
superUid
);
T_READ_MEMBER
(
ptr
,
int32_t
,
pTable
->
sversion
);
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
)
{
pTable
->
schema
=
tdDecodeSchema
(
&
ptr
);
T_READ_MEMBER
(
ptr
,
int16_t
,
pTable
->
numOfSchemas
);
for
(
int
i
=
0
;
i
<
pTable
->
numOfSchemas
;
i
++
)
{
pTable
->
schema
[
i
]
=
tdDecodeSchema
(
&
ptr
);
}
pTable
->
tagSchema
=
tdDecodeSchema
(
&
ptr
);
}
else
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
ptr
=
tdDecodeKVRow
(
ptr
,
&
pTable
->
tagVal
);
}
else
{
pTable
->
schema
=
tdDecodeSchema
(
&
ptr
);
T_READ_MEMBER
(
ptr
,
int16_t
,
pTable
->
numOfSchemas
);
for
(
int
i
=
0
;
i
<
pTable
->
numOfSchemas
;
i
++
)
{
pTable
->
schema
[
i
]
=
tdDecodeSchema
(
&
ptr
);
}
}
if
(
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
ptr
=
taosDecodeString
(
ptr
,
&
(
pTable
->
sql
));
}
pTable
->
lastKey
=
TSKEY_INITIAL_VAL
;
return
pTable
;
}
...
...
@@ -223,18 +235,45 @@ int32_t tsdbFreeMeta(STsdbMeta *pMeta) {
return
0
;
}
// Get the newest table schema
STSchema
*
tsdbGetTableSchema
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
if
(
pTable
->
type
==
TSDB_NORMAL_TABLE
||
pTable
->
type
==
TSDB_SUPER_TABLE
||
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
return
pTable
->
schema
;
return
pTable
->
schema
[
pTable
->
numOfSchemas
-
1
]
;
}
else
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
STable
*
pSuper
=
tsdbGetTableByUid
(
pMeta
,
pTable
->
superUid
);
if
(
pSuper
==
NULL
)
return
NULL
;
return
pSuper
->
schema
;
return
pSuper
->
schema
[
pSuper
->
numOfSchemas
-
1
]
;
}
else
{
return
NULL
;
}
}
static
int
tsdbCompareSchemaVersion
(
const
void
*
key1
,
const
void
*
key2
)
{
if
(
*
(
int16_t
*
)
key1
<
(
*
(
STSchema
**
)
key2
)
->
version
)
{
return
-
1
;
}
else
if
(
*
(
int16_t
*
)
key1
>
(
*
(
STSchema
**
)
key2
)
->
version
)
{
return
1
;
}
else
{
return
0
;
}
}
STSchema
*
tsdbGetTableSchemaByVersion
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
int16_t
version
)
{
STable
*
pSearchTable
=
NULL
;
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
pSearchTable
=
tsdbGetTableByUid
(
pMeta
,
pTable
->
superUid
);
}
else
{
pSearchTable
=
pTable
;
}
ASSERT
(
pSearchTable
!=
NULL
);
void
*
ptr
=
taosbsearch
(
&
version
,
pSearchTable
->
schema
,
pSearchTable
->
numOfSchemas
,
sizeof
(
STSchema
*
),
tsdbCompareSchemaVersion
,
TD_EQ
);
if
(
ptr
==
NULL
)
return
NULL
;
return
*
(
STSchema
**
)
ptr
;
}
STSchema
*
tsdbGetTableTagSchema
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
)
{
return
pTable
->
tagSchema
;
...
...
@@ -247,45 +286,33 @@ STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable) {
}
}
int32_t
tsdbGetTableTagVal
(
TsdbRepoT
*
repo
,
STableId
*
id
,
int32_t
colId
,
int16_t
*
type
,
int16_t
*
bytes
,
char
**
val
)
{
void
*
tsdbGetTableTagVal
(
TsdbRepoT
*
repo
,
const
STableId
*
id
,
int32_t
colId
,
int16_t
type
,
int16_t
bytes
)
{
STsdbMeta
*
pMeta
=
tsdbGetMeta
(
repo
);
STable
*
pTable
=
tsdbGetTableByUid
(
pMeta
,
id
->
uid
);
STSchema
*
pSchema
=
tsdbGetTableTagSchema
(
pMeta
,
pTable
);
STColumn
*
pCol
=
tdGetColOfID
(
pSchema
,
colId
);
if
(
pCol
==
NULL
)
{
return
-
1
;
// No matched tag volumn
return
NULL
;
// No matched tag volumn
}
*
val
=
tdGetKVRowValOfCol
(
pTable
->
tagVal
,
colId
);
*
type
=
pCol
->
type
;
char
*
val
=
tdGetKVRowValOfCol
(
pTable
->
tagVal
,
colId
);
assert
(
type
==
pCol
->
type
&&
bytes
==
pCol
->
bytes
)
;
if
(
*
val
!=
NULL
)
{
if
(
IS_VAR_DATA_TYPE
(
*
type
))
{
*
bytes
=
varDataLen
(
*
val
);
}
else
{
*
bytes
=
TYPE_BYTES
[
*
type
];
}
if
(
val
!=
NULL
&&
IS_VAR_DATA_TYPE
(
type
))
{
assert
(
varDataLen
(
val
)
<
pCol
->
bytes
);
}
return
TSDB_CODE_SUCCESS
;
return
val
;
}
char
*
tsdbGetTableName
(
TsdbRepoT
*
repo
,
const
STableId
*
id
,
int16_t
*
bytes
)
{
char
*
tsdbGetTableName
(
TsdbRepoT
*
repo
,
const
STableId
*
id
)
{
STsdbMeta
*
pMeta
=
tsdbGetMeta
(
repo
);
STable
*
pTable
=
tsdbGetTableByUid
(
pMeta
,
id
->
uid
);
if
(
pTable
==
NULL
)
{
if
(
bytes
!=
NULL
)
{
*
bytes
=
0
;
}
return
NULL
;
}
else
{
if
(
bytes
!=
NULL
)
{
*
bytes
=
varDataLen
(
pTable
->
name
);
}
return
(
char
*
)
pTable
->
name
;
}
}
...
...
@@ -301,13 +328,16 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
}
pTable
->
type
=
pCfg
->
type
;
pTable
->
numOfSchemas
=
0
;
if
(
isSuper
)
{
pTable
->
type
=
TSDB_SUPER_TABLE
;
pTable
->
tableId
.
uid
=
pCfg
->
superUid
;
pTable
->
tableId
.
tid
=
-
1
;
pTable
->
superUid
=
TSDB_INVALID_SUPER_TABLE_ID
;
pTable
->
schema
=
tdDupSchema
(
pCfg
->
schema
);
pTable
->
schema
=
(
STSchema
**
)
malloc
(
sizeof
(
STSchema
*
)
*
TSDB_MAX_TABLE_SCHEMAS
);
pTable
->
numOfSchemas
=
1
;
pTable
->
schema
[
0
]
=
tdDupSchema
(
pCfg
->
schema
);
pTable
->
tagSchema
=
tdDupSchema
(
pCfg
->
tagSchema
);
tsize
=
strnlen
(
pCfg
->
sname
,
TSDB_TABLE_NAME_LEN
);
...
...
@@ -342,14 +372,18 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
if
(
pCfg
->
type
==
TSDB_CHILD_TABLE
)
{
pTable
->
superUid
=
pCfg
->
superUid
;
pTable
->
tagVal
=
tdKVRowDup
(
pCfg
->
tagValues
);
}
else
if
(
pCfg
->
type
==
TSDB_NORMAL_TABLE
)
{
pTable
->
superUid
=
-
1
;
pTable
->
schema
=
tdDupSchema
(
pCfg
->
schema
);
}
else
{
ASSERT
(
pCfg
->
type
==
TSDB_STREAM_TABLE
);
pTable
->
superUid
=
-
1
;
pTable
->
schema
=
tdDupSchema
(
pCfg
->
schema
);
pTable
->
sql
=
strdup
(
pCfg
->
sql
);
pTable
->
schema
=
(
STSchema
**
)
malloc
(
sizeof
(
STSchema
*
)
*
TSDB_MAX_TABLE_SCHEMAS
);
pTable
->
numOfSchemas
=
1
;
pTable
->
schema
[
0
]
=
tdDupSchema
(
pCfg
->
schema
);
if
(
pCfg
->
type
==
TSDB_NORMAL_TABLE
)
{
pTable
->
superUid
=
-
1
;
}
else
{
ASSERT
(
pCfg
->
type
==
TSDB_STREAM_TABLE
);
pTable
->
superUid
=
-
1
;
pTable
->
sql
=
strdup
(
pCfg
->
sql
);
}
}
}
...
...
@@ -360,6 +394,56 @@ _err:
return
NULL
;
}
static
int
tsdbUpdateTableTagSchema
(
STable
*
pTable
,
STSchema
*
newSchema
)
{
ASSERT
(
pTable
->
type
==
TSDB_SUPER_TABLE
);
ASSERT
(
schemaVersion
(
pTable
->
tagSchema
)
<
schemaVersion
(
newSchema
));
STSchema
*
pOldSchema
=
pTable
->
tagSchema
;
STSchema
*
pNewSchema
=
tdDupSchema
(
newSchema
);
if
(
pNewSchema
==
NULL
)
return
TSDB_CODE_SERV_OUT_OF_MEMORY
;
pTable
->
tagSchema
=
pNewSchema
;
tdFreeSchema
(
pOldSchema
);
return
TSDB_CODE_SUCCESS
;
}
int
tsdbUpdateTable
(
STsdbMeta
*
pMeta
,
STable
*
pTable
,
STableCfg
*
pCfg
)
{
ASSERT
(
pTable
->
type
!=
TSDB_CHILD_TABLE
);
bool
isChanged
=
false
;
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
)
{
if
(
schemaVersion
(
pTable
->
tagSchema
)
<
schemaVersion
(
pCfg
->
tagSchema
))
{
int32_t
code
=
tsdbUpdateTableTagSchema
(
pTable
,
pCfg
->
tagSchema
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
return
code
;
}
isChanged
=
true
;
}
STSchema
*
pTSchema
=
tsdbGetTableSchema
(
pMeta
,
pTable
);
if
(
schemaVersion
(
pTSchema
)
<
schemaVersion
(
pCfg
->
schema
))
{
if
(
pTable
->
numOfSchemas
<
TSDB_MAX_TABLE_SCHEMAS
)
{
pTable
->
schema
[
pTable
->
numOfSchemas
++
]
=
tdDupSchema
(
pCfg
->
schema
);
}
else
{
ASSERT
(
pTable
->
numOfSchemas
==
TSDB_MAX_TABLE_SCHEMAS
);
STSchema
*
tSchema
=
tdDupSchema
(
pCfg
->
schema
);
tdFreeSchema
(
pTable
->
schema
[
0
]);
memmove
(
pTable
->
schema
,
pTable
->
schema
+
1
,
sizeof
(
STSchema
*
)
*
(
TSDB_MAX_TABLE_SCHEMAS
-
1
));
pTable
->
schema
[
pTable
->
numOfSchemas
-
1
]
=
tSchema
;
}
isChanged
=
true
;
}
if
(
isChanged
)
{
char
*
buf
=
malloc
(
1024
*
1024
);
int
bufLen
=
0
;
tsdbEncodeTable
(
pTable
,
buf
,
&
bufLen
);
tsdbInsertMetaRecord
(
pMeta
->
mfh
,
pTable
->
tableId
.
uid
,
buf
,
bufLen
);
free
(
buf
);
}
return
TSDB_CODE_SUCCESS
;
}
int
tsdbCreateTable
(
TsdbRepoT
*
repo
,
STableCfg
*
pCfg
)
{
STsdbRepo
*
pRepo
=
(
STsdbRepo
*
)
repo
;
STsdbMeta
*
pMeta
=
pRepo
->
tsdbMeta
;
...
...
@@ -384,6 +468,8 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
if
(
super
==
NULL
)
return
-
1
;
}
else
{
if
(
super
->
type
!=
TSDB_SUPER_TABLE
)
return
-
1
;
if
(
super
->
tableId
.
uid
!=
pCfg
->
superUid
)
return
-
1
;
tsdbUpdateTable
(
pMeta
,
super
,
pCfg
);
}
}
...
...
@@ -458,23 +544,30 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
if
(
tsdbTableSetName
(
pCfg
,
pMsg
->
tableId
,
true
)
<
0
)
goto
_err
;
if
(
numOfTags
>
0
)
{
int
accBytes
=
0
;
char
*
pTagData
=
pMsg
->
data
+
(
numOfCols
+
numOfTags
)
*
sizeof
(
SSchema
);
SKVRowBuilder
kvRowBuilder
=
{
0
};
// Decode tag schema
tdResetTSchemaBuilder
(
&
schemaBuilder
,
htonl
(
pMsg
->
tversion
));
if
(
tdInitKVRowBuilder
(
&
kvRowBuilder
)
<
0
)
goto
_err
;
for
(
int
i
=
numOfCols
;
i
<
numOfCols
+
numOfTags
;
i
++
)
{
tdAddColToSchema
(
&
schemaBuilder
,
pSchema
[
i
].
type
,
htons
(
pSchema
[
i
].
colId
),
htons
(
pSchema
[
i
].
bytes
));
tdAddColToKVRow
(
&
kvRowBuilder
,
htons
(
pSchema
[
i
].
colId
),
pSchema
[
i
].
type
,
pTagData
+
accBytes
);
accBytes
+=
htons
(
pSchema
[
i
].
bytes
);
}
if
(
tsdbTableSetTagSchema
(
pCfg
,
tdGetSchemaFromBuilder
(
&
schemaBuilder
),
false
)
<
0
)
goto
_err
;
if
(
tsdbTableSetSName
(
pCfg
,
pMsg
->
superTableId
,
true
)
<
0
)
goto
_err
;
if
(
tsdbTableSetSuperUid
(
pCfg
,
htobe64
(
pMsg
->
superTableUid
))
<
0
)
goto
_err
;
tsdbTableSetTagValue
(
pCfg
,
tdGetKVRowFromBuilder
(
&
kvRowBuilder
),
false
);
tdDestroyKVRowBuilder
(
&
kvRowBuilder
);
// Decode tag values
if
(
pMsg
->
tagDataLen
)
{
int
accBytes
=
0
;
char
*
pTagData
=
pMsg
->
data
+
(
numOfCols
+
numOfTags
)
*
sizeof
(
SSchema
);
SKVRowBuilder
kvRowBuilder
=
{
0
};
if
(
tdInitKVRowBuilder
(
&
kvRowBuilder
)
<
0
)
goto
_err
;
for
(
int
i
=
numOfCols
;
i
<
numOfCols
+
numOfTags
;
i
++
)
{
tdAddColToKVRow
(
&
kvRowBuilder
,
htons
(
pSchema
[
i
].
colId
),
pSchema
[
i
].
type
,
pTagData
+
accBytes
);
accBytes
+=
htons
(
pSchema
[
i
].
bytes
);
}
tsdbTableSetTagValue
(
pCfg
,
tdGetKVRowFromBuilder
(
&
kvRowBuilder
),
false
);
tdDestroyKVRowBuilder
(
&
kvRowBuilder
);
}
}
if
(
pMsg
->
tableType
==
TSDB_STREAM_TABLE
)
{
...
...
@@ -535,7 +628,7 @@ static int tsdbFreeTable(STable *pTable) {
if
(
pTable
->
type
==
TSDB_CHILD_TABLE
)
{
kvRowFree
(
pTable
->
tagVal
);
}
else
{
tdFreeSchema
(
pTable
->
schema
);
for
(
int
i
=
0
;
i
<
pTable
->
numOfSchemas
;
i
++
)
tdFreeSchema
(
pTable
->
schema
[
i
]
);
}
if
(
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
...
...
@@ -597,9 +690,10 @@ static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx) {
}
// Update the pMeta->maxCols and pMeta->maxRowBytes
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
||
pTable
->
type
==
TSDB_NORMAL_TABLE
)
{
if
(
schemaNCols
(
pTable
->
schema
)
>
pMeta
->
maxCols
)
pMeta
->
maxCols
=
schemaNCols
(
pTable
->
schema
);
int
bytes
=
dataRowMaxBytesFromSchema
(
pTable
->
schema
);
if
(
pTable
->
type
==
TSDB_SUPER_TABLE
||
pTable
->
type
==
TSDB_NORMAL_TABLE
||
pTable
->
type
==
TSDB_STREAM_TABLE
)
{
if
(
schemaNCols
(
pTable
->
schema
[
pTable
->
numOfSchemas
-
1
])
>
pMeta
->
maxCols
)
pMeta
->
maxCols
=
schemaNCols
(
pTable
->
schema
[
pTable
->
numOfSchemas
-
1
]);
int
bytes
=
dataRowMaxBytesFromSchema
(
pTable
->
schema
[
pTable
->
numOfSchemas
-
1
]);
if
(
bytes
>
pMeta
->
maxRowBytes
)
pMeta
->
maxRowBytes
=
bytes
;
}
...
...
@@ -648,7 +742,7 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable, bool rmFrom
return
0
;
}
static
int
tsdbAddTableIntoIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
int
tsdbAddTableIntoIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
assert
(
pTable
->
type
==
TSDB_CHILD_TABLE
&&
pTable
!=
NULL
);
STable
*
pSTable
=
tsdbGetTableByUid
(
pMeta
,
pTable
->
superUid
);
assert
(
pSTable
!=
NULL
);
...
...
@@ -673,7 +767,7 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
return
0
;
}
static
int
tsdbRemoveTableFromIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
int
tsdbRemoveTableFromIndex
(
STsdbMeta
*
pMeta
,
STable
*
pTable
)
{
assert
(
pTable
->
type
==
TSDB_CHILD_TABLE
&&
pTable
!=
NULL
);
STable
*
pSTable
=
tsdbGetTableByUid
(
pMeta
,
pTable
->
superUid
);
...
...
src/tsdb/src/tsdbRWHelper.c
浏览文件 @
8fa42af9
...
...
@@ -289,8 +289,8 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
pHelper
->
tableInfo
.
tid
=
pTable
->
tableId
.
tid
;
pHelper
->
tableInfo
.
uid
=
pTable
->
tableId
.
uid
;
pHelper
->
tableInfo
.
sversion
=
pTable
->
sversion
;
STSchema
*
pSchema
=
tsdbGetTableSchema
(
pRepo
->
tsdbMeta
,
pTable
);
pHelper
->
tableInfo
.
sversion
=
schemaVersion
(
pSchema
);
tdInitDataCols
(
pHelper
->
pDataCols
[
0
],
pSchema
);
tdInitDataCols
(
pHelper
->
pDataCols
[
1
],
pSchema
);
...
...
src/util/inc/tutil.h
浏览文件 @
8fa42af9
...
...
@@ -42,6 +42,11 @@ extern "C" {
} \
}
#define tstrncpy(dst, src, size) do { \
strncpy((dst), (src), (size)); \
(dst)[(size) - 1] = 0; \
} while (0);
#define tclose(x) taosCloseSocket(x)
// Pointer p drift right by b bytes
...
...
src/util/src/tqueue.c
浏览文件 @
8fa42af9
...
...
@@ -145,7 +145,7 @@ int taosReadQitem(taos_queue param, int *type, void **pitem) {
queue
->
numOfItems
--
;
if
(
queue
->
qset
)
atomic_sub_fetch_32
(
&
queue
->
qset
->
numOfItems
,
1
);
code
=
1
;
uTrace
(
"item:%p is read out from queue:%p, type:%d items:%d"
,
*
pitem
,
*
type
,
queue
->
numOfItems
);
uTrace
(
"item:%p is read out from queue:%p, type:%d items:%d"
,
*
pitem
,
queue
,
*
type
,
queue
->
numOfItems
);
}
pthread_mutex_unlock
(
&
queue
->
mutex
);
...
...
src/vnode/inc/vnodeInt.h
浏览文件 @
8fa42af9
...
...
@@ -51,6 +51,7 @@ typedef struct {
SSyncCfg
syncCfg
;
SWalCfg
walCfg
;
char
*
rootDir
;
char
db
[
TSDB_DB_NAME_LEN
+
1
];
}
SVnodeObj
;
int
vnodeWriteToQueue
(
void
*
param
,
void
*
pHead
,
int
type
);
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
8fa42af9
...
...
@@ -39,7 +39,7 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode);
static
int32_t
vnodeSaveVersion
(
SVnodeObj
*
pVnode
);
static
int32_t
vnodeReadVersion
(
SVnodeObj
*
pVnode
);
static
int
vnodeProcessTsdbStatus
(
void
*
arg
,
int
status
);
static
uint32_t
vnodeGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
,
uint64_t
*
fversion
);
static
uint32_t
vnodeGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
,
uint64_t
*
fversion
);
static
int
vnodeGetWalInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
);
static
void
vnodeNotifyRole
(
void
*
ahandle
,
int8_t
role
);
static
void
vnodeNotifyFileSynced
(
void
*
ahandle
,
uint64_t
fversion
);
...
...
@@ -224,6 +224,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
appH
.
cqH
=
pVnode
->
cq
;
appH
.
cqCreateFunc
=
cqCreate
;
appH
.
cqDropFunc
=
cqDrop
;
appH
.
configFunc
=
dnodeSendCfgTableToRecv
;
sprintf
(
temp
,
"%s/tsdb"
,
rootDir
);
pVnode
->
tsdb
=
tsdbOpenRepo
(
temp
,
&
appH
);
if
(
pVnode
->
tsdb
==
NULL
)
{
...
...
@@ -433,10 +434,10 @@ static int vnodeProcessTsdbStatus(void *arg, int status) {
return
0
;
}
static
uint32_t
vnodeGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
int32_t
*
size
,
uint64_t
*
fversion
)
{
static
uint32_t
vnodeGetFileInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
,
uint32_t
eindex
,
int32_t
*
size
,
uint64_t
*
fversion
)
{
SVnodeObj
*
pVnode
=
ahandle
;
*
fversion
=
pVnode
->
fversion
;
return
tsdbGetFileInfo
(
pVnode
->
tsdb
,
name
,
index
,
size
);
return
tsdbGetFileInfo
(
pVnode
->
tsdb
,
name
,
index
,
eindex
,
size
);
}
static
int
vnodeGetWalInfo
(
void
*
ahandle
,
char
*
name
,
uint32_t
*
index
)
{
...
...
@@ -473,6 +474,7 @@ static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) {
appH
.
cqH
=
pVnode
->
cq
;
appH
.
cqCreateFunc
=
cqCreate
;
appH
.
cqDropFunc
=
cqDrop
;
appH
.
configFunc
=
dnodeSendCfgTableToRecv
;
pVnode
->
tsdb
=
tsdbOpenRepo
(
rootDir
,
&
appH
);
}
...
...
@@ -496,7 +498,7 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
}
len
+=
snprintf
(
content
+
len
,
maxLen
-
len
,
"{
\n
"
);
len
+=
snprintf
(
content
+
len
,
maxLen
-
len
,
"
\"
db
\"
:
\"
%s
\"
,
\n
"
,
pVnodeCfg
->
db
);
len
+=
snprintf
(
content
+
len
,
maxLen
-
len
,
"
\"
cfgVersion
\"
: %d,
\n
"
,
pVnodeCfg
->
cfg
.
cfgVersion
);
len
+=
snprintf
(
content
+
len
,
maxLen
-
len
,
"
\"
cacheBlockSize
\"
: %d,
\n
"
,
pVnodeCfg
->
cfg
.
cacheBlockSize
);
len
+=
snprintf
(
content
+
len
,
maxLen
-
len
,
"
\"
totalBlocks
\"
: %d,
\n
"
,
pVnodeCfg
->
cfg
.
totalBlocks
);
...
...
@@ -568,6 +570,13 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
goto
PARSE_OVER
;
}
cJSON
*
db
=
cJSON_GetObjectItem
(
root
,
"db"
);
if
(
!
db
||
db
->
type
!=
cJSON_String
||
db
->
valuestring
==
NULL
)
{
vError
(
"vgId:%d, failed to read vnode cfg, db not found"
,
pVnode
->
vgId
);
goto
PARSE_OVER
;
}
strcpy
(
pVnode
->
db
,
db
->
valuestring
);
cJSON
*
cfgVersion
=
cJSON_GetObjectItem
(
root
,
"cfgVersion"
);
if
(
!
cfgVersion
||
cfgVersion
->
type
!=
cJSON_Number
)
{
vError
(
"vgId:%d, failed to read vnode cfg, cfgVersion not found"
,
pVnode
->
vgId
);
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
8fa42af9
...
...
@@ -29,11 +29,12 @@
#include "tcq.h"
static
int32_t
(
*
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_MAX
])(
SVnodeObj
*
,
void
*
,
SRspRet
*
);
static
int32_t
vnodeProcessSubmitMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessCreateTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessDropTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessAlterTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessDropStableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessSubmitMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessCreateTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessDropTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessAlterTableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessDropStableMsg
(
SVnodeObj
*
pVnode
,
void
*
pMsg
,
SRspRet
*
);
static
int32_t
vnodeProcessUpdateTagValMsg
(
SVnodeObj
*
pVnode
,
void
*
pCont
,
SRspRet
*
pRet
);
void
vnodeInitWriteFp
(
void
)
{
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_SUBMIT
]
=
vnodeProcessSubmitMsg
;
...
...
@@ -41,6 +42,7 @@ void vnodeInitWriteFp(void) {
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_MD_DROP_TABLE
]
=
vnodeProcessDropTableMsg
;
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_MD_ALTER_TABLE
]
=
vnodeProcessAlterTableMsg
;
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_MD_DROP_STABLE
]
=
vnodeProcessDropStableMsg
;
vnodeProcessWriteMsgFp
[
TSDB_MSG_TYPE_UPDATE_TAG_VAL
]
=
vnodeProcessUpdateTagValMsg
;
}
int32_t
vnodeProcessWrite
(
void
*
param1
,
int
qtype
,
void
*
param2
,
void
*
item
)
{
...
...
@@ -110,7 +112,6 @@ static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRe
int32_t
code
=
tsdbCreateTable
(
pVnode
->
tsdb
,
pCfg
);
tsdbClearTableCfg
(
pCfg
);
free
(
pCfg
);
return
code
;
}
...
...
@@ -134,7 +135,6 @@ static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
if
(
pCfg
==
NULL
)
return
terrno
;
int32_t
code
=
tsdbAlterTable
(
pVnode
->
tsdb
,
pCfg
);
tsdbClearTableCfg
(
pCfg
);
free
(
pCfg
);
return
code
;
}
...
...
@@ -156,6 +156,10 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
return
code
;
}
static
int32_t
vnodeProcessUpdateTagValMsg
(
SVnodeObj
*
pVnode
,
void
*
pCont
,
SRspRet
*
pRet
)
{
return
tsdbUpdateTagValue
(
pVnode
->
tsdb
,
(
SUpdateTableTagValMsg
*
)
pCont
);
}
int
vnodeWriteToQueue
(
void
*
param
,
void
*
data
,
int
type
)
{
SVnodeObj
*
pVnode
=
param
;
SWalHead
*
pHead
=
data
;
...
...
src/wal/test/waltest.c
浏览文件 @
8fa42af9
...
...
@@ -15,6 +15,7 @@
//#define _DEFAULT_SOURCE
#include "os.h"
#include "tutil.h"
#include "tglobal.h"
#include "tlog.h"
#include "twal.h"
...
...
@@ -45,7 +46,7 @@ int main(int argc, char *argv[]) {
for
(
int
i
=
1
;
i
<
argc
;
++
i
)
{
if
(
strcmp
(
argv
[
i
],
"-p"
)
==
0
&&
i
<
argc
-
1
)
{
strcpy
(
path
,
argv
[
++
i
]
);
tstrncpy
(
path
,
argv
[
++
i
],
sizeof
(
path
)
);
}
else
if
(
strcmp
(
argv
[
i
],
"-m"
)
==
0
&&
i
<
argc
-
1
)
{
max
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-l"
)
==
0
&&
i
<
argc
-
1
)
{
...
...
tests/examples/c/demo.c
浏览文件 @
8fa42af9
...
...
@@ -94,7 +94,7 @@ int main(int argc, char *argv[]) {
return
0
;
}
taos_options
(
TSDB_OPTION_CONFIGDIR
,
"
/home/lisa/Documents/workspace/TDinternal/community/sim/tsim
/cfg"
);
taos_options
(
TSDB_OPTION_CONFIGDIR
,
"
~/sec
/cfg"
);
// init TAOS
taos_init
();
...
...
@@ -107,7 +107,8 @@ int main(int argc, char *argv[]) {
printf
(
"success to connect to server
\n
"
);
// multiThreadTest(1, taos);
doQuery
(
taos
,
"insert into tb9 (ts, c1, c2) using stb (t1, t2) tags ('tag4', 4) values ( now + 4s, 'binary4', 4);"
);
doQuery
(
taos
,
"use test"
);
doQuery
(
taos
,
"alter table tm99 set tag a=99"
);
// for(int32_t i = 0; i < 100000; ++i) {
// doQuery(taos, "insert into t1 values(now, 2)");
// }
...
...
tests/examples/c/prepare.c
0 → 100644
浏览文件 @
8fa42af9
// TAOS standard API example. The same syntax as MySQL, but only a subet
// to compile: gcc -o prepare prepare.c -ltaos
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
void
taosMsleep
(
int
mseconds
);
int
main
(
int
argc
,
char
*
argv
[])
{
TAOS
*
taos
;
TAOS_RES
*
result
;
TAOS_STMT
*
stmt
;
// connect to server
if
(
argc
<
2
)
{
printf
(
"please input server ip
\n
"
);
return
0
;
}
// init TAOS
taos_init
();
taos
=
taos_connect
(
argv
[
1
],
"root"
,
"taosdata"
,
NULL
,
0
);
if
(
taos
==
NULL
)
{
printf
(
"failed to connect to db, reason:%s
\n
"
,
taos_errstr
(
taos
));
exit
(
1
);
}
taos_query
(
taos
,
"drop database demo"
);
if
(
taos_query
(
taos
,
"create database demo"
)
!=
0
)
{
printf
(
"failed to create database, reason:%s
\n
"
,
taos_errstr
(
taos
));
exit
(
1
);
}
taos_query
(
taos
,
"use demo"
);
// create table
const
char
*
sql
=
"create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))"
;
if
(
taos_query
(
taos
,
sql
)
!=
0
)
{
printf
(
"failed to create table, reason:%s
\n
"
,
taos_errstr
(
taos
));
exit
(
1
);
}
// sleep for one second to make sure table is created on data node
// taosMsleep(1000);
// insert 10 records
struct
{
int64_t
ts
;
int8_t
b
;
int8_t
v1
;
int16_t
v2
;
int32_t
v4
;
int64_t
v8
;
float
f4
;
double
f8
;
char
bin
[
40
];
char
blob
[
80
];
}
v
=
{
0
};
stmt
=
taos_stmt_init
(
taos
);
TAOS_BIND
params
[
10
];
params
[
0
].
buffer_type
=
TSDB_DATA_TYPE_TIMESTAMP
;
params
[
0
].
buffer_length
=
sizeof
(
v
.
ts
);
params
[
0
].
buffer
=
&
v
.
ts
;
params
[
0
].
length
=
&
params
[
0
].
buffer_length
;
params
[
0
].
is_null
=
NULL
;
params
[
1
].
buffer_type
=
TSDB_DATA_TYPE_BOOL
;
params
[
1
].
buffer_length
=
sizeof
(
v
.
b
);
params
[
1
].
buffer
=
&
v
.
b
;
params
[
1
].
length
=
&
params
[
1
].
buffer_length
;
params
[
1
].
is_null
=
NULL
;
params
[
2
].
buffer_type
=
TSDB_DATA_TYPE_TINYINT
;
params
[
2
].
buffer_length
=
sizeof
(
v
.
v1
);
params
[
2
].
buffer
=
&
v
.
v1
;
params
[
2
].
length
=
&
params
[
2
].
buffer_length
;
params
[
2
].
is_null
=
NULL
;
params
[
3
].
buffer_type
=
TSDB_DATA_TYPE_SMALLINT
;
params
[
3
].
buffer_length
=
sizeof
(
v
.
v2
);
params
[
3
].
buffer
=
&
v
.
v2
;
params
[
3
].
length
=
&
params
[
3
].
buffer_length
;
params
[
3
].
is_null
=
NULL
;
params
[
4
].
buffer_type
=
TSDB_DATA_TYPE_INT
;
params
[
4
].
buffer_length
=
sizeof
(
v
.
v4
);
params
[
4
].
buffer
=
&
v
.
v4
;
params
[
4
].
length
=
&
params
[
4
].
buffer_length
;
params
[
4
].
is_null
=
NULL
;
params
[
5
].
buffer_type
=
TSDB_DATA_TYPE_BIGINT
;
params
[
5
].
buffer_length
=
sizeof
(
v
.
v8
);
params
[
5
].
buffer
=
&
v
.
v8
;
params
[
5
].
length
=
&
params
[
5
].
buffer_length
;
params
[
5
].
is_null
=
NULL
;
params
[
6
].
buffer_type
=
TSDB_DATA_TYPE_FLOAT
;
params
[
6
].
buffer_length
=
sizeof
(
v
.
f4
);
params
[
6
].
buffer
=
&
v
.
f4
;
params
[
6
].
length
=
&
params
[
6
].
buffer_length
;
params
[
6
].
is_null
=
NULL
;
params
[
7
].
buffer_type
=
TSDB_DATA_TYPE_DOUBLE
;
params
[
7
].
buffer_length
=
sizeof
(
v
.
f8
);
params
[
7
].
buffer
=
&
v
.
f8
;
params
[
7
].
length
=
&
params
[
7
].
buffer_length
;
params
[
7
].
is_null
=
NULL
;
params
[
8
].
buffer_type
=
TSDB_DATA_TYPE_BINARY
;
params
[
8
].
buffer_length
=
sizeof
(
v
.
bin
);
params
[
8
].
buffer
=
v
.
bin
;
params
[
8
].
length
=
&
params
[
8
].
buffer_length
;
params
[
8
].
is_null
=
NULL
;
strcpy
(
v
.
blob
,
"一二三四五六七八九十"
);
params
[
9
].
buffer_type
=
TSDB_DATA_TYPE_NCHAR
;
params
[
9
].
buffer_length
=
strlen
(
v
.
blob
);
params
[
9
].
buffer
=
v
.
blob
;
params
[
9
].
length
=
&
params
[
9
].
buffer_length
;
params
[
9
].
is_null
=
NULL
;
int
is_null
=
1
;
sql
=
"insert into m1 values(?,?,?,?,?,?,?,?,?,?)"
;
int
code
=
taos_stmt_prepare
(
stmt
,
sql
,
0
);
if
(
code
!=
0
){
printf
(
"failed to execute taos_stmt_prepare. code:0x%x
\n
"
,
code
);
}
v
.
ts
=
1591060628000
;
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
v
.
ts
+=
1
;
for
(
int
j
=
1
;
j
<
10
;
++
j
)
{
params
[
j
].
is_null
=
((
i
==
j
)
?
&
is_null
:
0
);
}
v
.
b
=
(
int8_t
)
i
%
2
;
v
.
v1
=
(
int8_t
)
i
;
v
.
v2
=
(
int16_t
)(
i
*
2
);
v
.
v4
=
(
int32_t
)(
i
*
4
);
v
.
v8
=
(
int64_t
)(
i
*
8
);
v
.
f4
=
(
float
)(
i
*
40
);
v
.
f8
=
(
double
)(
i
*
80
);
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
)
-
1
;
++
j
)
{
v
.
bin
[
j
]
=
(
char
)(
i
+
'0'
);
}
taos_stmt_bind_param
(
stmt
,
params
);
taos_stmt_add_batch
(
stmt
);
}
if
(
taos_stmt_execute
(
stmt
)
!=
0
)
{
printf
(
"failed to execute insert statement.
\n
"
);
exit
(
1
);
}
taos_stmt_close
(
stmt
);
printf
(
"==== success inset data ====.
\n
"
);
// query the records
stmt
=
taos_stmt_init
(
taos
);
taos_stmt_prepare
(
stmt
,
"SELECT * FROM m1 WHERE v1 > ? AND v2 < ?"
,
0
);
v
.
v1
=
5
;
v
.
v2
=
15
;
taos_stmt_bind_param
(
stmt
,
params
+
2
);
if
(
taos_stmt_execute
(
stmt
)
!=
0
)
{
printf
(
"failed to execute select statement.
\n
"
);
exit
(
1
);
}
result
=
taos_stmt_use_result
(
stmt
);
TAOS_ROW
row
;
int
rows
=
0
;
int
num_fields
=
taos_num_fields
(
result
);
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
result
);
char
temp
[
256
];
// fetch the records row by row
while
((
row
=
taos_fetch_row
(
result
)))
{
rows
++
;
taos_print_row
(
temp
,
row
,
fields
,
num_fields
);
printf
(
"%s
\n
"
,
temp
);
}
taos_free_result
(
result
);
taos_stmt_close
(
stmt
);
return
getchar
();
}
tests/pytest/random-test/random-test-multi-threading-3.py
0 → 100644
浏览文件 @
8fa42af9
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
random
import
threading
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
last_tb
=
""
last_stb
=
""
written
=
0
class
Test
(
threading
.
Thread
):
def
__init__
(
self
,
threadId
,
name
):
threading
.
Thread
.
__init__
(
self
)
self
.
threadId
=
threadId
self
.
name
=
name
self
.
threadLock
=
threading
.
Lock
()
def
create_table
(
self
):
tdLog
.
info
(
"create_table"
)
global
last_tb
global
written
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
if
(
current_tb
==
last_tb
):
return
else
:
tdLog
.
info
(
"will create table %s"
%
current_tb
)
try
:
tdSql
.
execute
(
'create table %s (ts timestamp, speed int)'
%
current_tb
)
last_tb
=
current_tb
written
=
0
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
def
insert_data
(
self
):
tdLog
.
info
(
"insert_data"
)
global
last_tb
global
written
if
(
last_tb
==
""
):
tdLog
.
info
(
"no table, create first"
)
self
.
create_table
()
tdLog
.
info
(
"will insert data to table"
)
for
i
in
range
(
0
,
10
):
insertRows
=
1000
tdLog
.
info
(
"insert %d rows to %s"
%
(
insertRows
,
last_tb
))
for
j
in
range
(
0
,
insertRows
):
ret
=
tdSql
.
execute
(
'insert into %s values (now + %dm, %d)'
%
(
last_tb
,
j
,
j
))
written
=
written
+
1
def
query_data
(
self
):
tdLog
.
info
(
"query_data"
)
global
last_tb
global
written
if
(
written
>
0
):
tdLog
.
info
(
"query data from table"
)
tdSql
.
query
(
"select * from %s"
%
last_tb
)
tdSql
.
checkRows
(
written
)
def
create_stable
(
self
):
tdLog
.
info
(
"create_stable"
)
global
last_tb
global
last_stb
global
written
current_stb
=
"stb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
if
(
current_stb
==
last_stb
):
return
else
:
tdLog
.
info
(
"will create stable %s"
%
current_stb
)
tdSql
.
execute
(
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))'
%
current_stb
)
last_stb
=
current_stb
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
tdSql
.
execute
(
"create table %s using %s tags (1, '表1')"
%
(
current_tb
,
last_stb
))
last_tb
=
current_tb
tdSql
.
execute
(
"insert into %s values (now, 27, '我是nchar字符串')"
%
last_tb
)
written
=
written
+
1
def
drop_stable
(
self
):
tdLog
.
info
(
"drop_stable"
)
global
last_stb
if
(
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will drop last super table"
)
tdSql
.
execute
(
'drop table %s'
%
last_stb
)
last_stb
=
""
def
restart_database
(
self
):
tdLog
.
info
(
"restart_database"
)
global
last_tb
global
written
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
def
force_restart_database
(
self
):
tdLog
.
info
(
"force_restart_database"
)
global
last_tb
global
written
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
start
(
1
)
def
drop_table
(
self
):
tdLog
.
info
(
"drop_table"
)
global
last_tb
global
written
for
i
in
range
(
0
,
10
):
if
(
last_tb
!=
""
):
tdLog
.
info
(
"drop last_tb %s"
%
last_tb
)
tdSql
.
execute
(
"drop table %s"
%
last_tb
)
last_tb
=
""
written
=
0
def
query_data_from_stable
(
self
):
tdLog
.
info
(
"query_data_from_stable"
)
global
last_stb
if
(
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will query data from super table"
)
tdSql
.
execute
(
'select * from %s'
%
last_stb
)
def
reset_query_cache
(
self
):
tdLog
.
info
(
"reset_query_cache"
)
global
last_tb
global
written
tdLog
.
info
(
"reset query cache"
)
tdSql
.
execute
(
"reset query cache"
)
tdLog
.
sleep
(
1
)
def
reset_database
(
self
):
tdLog
.
info
(
"reset_database"
)
global
last_tb
global
last_stb
global
written
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
deploy
(
1
)
last_tb
=
""
last_stb
=
""
written
=
0
tdDnodes
.
start
(
1
)
tdSql
.
prepare
()
def
delete_datafiles
(
self
):
tdLog
.
info
(
"delete_data_files"
)
global
last_tb
global
written
dnodesDir
=
tdDnodes
.
getDnodesRootDir
()
dataDir
=
dnodesDir
+
'/dnode1/*'
deleteCmd
=
'rm -rf %s'
%
dataDir
os
.
system
(
deleteCmd
)
last_tb
=
""
written
=
0
tdDnodes
.
start
(
1
)
tdSql
.
prepare
()
def
run
(
self
):
dataOp
=
{
1
:
self
.
insert_data
,
2
:
self
.
query_data
,
3
:
self
.
query_data_from_stable
,
}
dbOp
=
{
1
:
self
.
create_table
,
2
:
self
.
create_stable
,
3
:
self
.
restart_database
,
4
:
self
.
force_restart_database
,
5
:
self
.
drop_table
,
6
:
self
.
reset_query_cache
,
7
:
self
.
reset_database
,
8
:
self
.
delete_datafiles
,
9
:
self
.
drop_stable
,
}
queryOp
=
{
1
:
self
.
query_data
,
2
:
self
.
query_data_from_stable
,
}
if
(
self
.
threadId
==
1
):
while
True
:
self
.
threadLock
.
acquire
()
tdLog
.
notice
(
"first thread"
)
randDataOp
=
random
.
randint
(
1
,
3
)
dataOp
.
get
(
randDataOp
,
lambda
:
"ERROR"
)()
self
.
threadLock
.
release
()
elif
(
self
.
threadId
==
2
):
while
True
:
tdLog
.
notice
(
"second thread"
)
self
.
threadLock
.
acquire
()
randDbOp
=
random
.
randint
(
1
,
9
)
dbOp
.
get
(
randDbOp
,
lambda
:
"ERROR"
)()
self
.
threadLock
.
release
()
elif
(
self
.
threadId
==
3
):
while
True
:
tdLog
.
notice
(
"third thread"
)
self
.
threadLock
.
acquire
()
randQueryOp
=
random
.
randint
(
1
,
9
)
queryOp
.
get
(
randQueryOp
,
lambda
:
"ERROR"
)()
self
.
threadLock
.
release
()
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
run
(
self
):
tdSql
.
prepare
()
test1
=
Test
(
1
,
"data operation"
)
test2
=
Test
(
2
,
"db operation"
)
test2
=
Test
(
3
,
"query operation"
)
test1
.
start
()
test2
.
start
()
test3
.
start
()
test1
.
join
()
test2
.
join
()
test3
.
join
()
tdLog
.
info
(
"end of test"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/random-test/random-test-multi-threading.py
浏览文件 @
8fa42af9
...
...
@@ -20,63 +20,62 @@ from util.cases import *
from
util.sql
import
*
from
util.dnodes
import
*
current_tb
=
""
last_tb
=
""
last_stb
=
""
written
=
0
class
Test
(
threading
.
Thread
):
def
__init__
(
self
,
threadId
,
name
,
sleepTime
):
def
__init__
(
self
,
threadId
,
name
):
threading
.
Thread
.
__init__
(
self
)
self
.
threadId
=
threadId
self
.
name
=
name
self
.
sleepTime
=
sleepTime
self
.
threadLock
=
threading
.
Lock
()
def
create_table
(
self
):
global
current_tb
tdLog
.
info
(
"create_table"
)
global
last_tb
global
written
tdLog
.
info
(
"create a table"
)
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
tdLog
.
info
(
"current table %s"
%
current_tb
)
if
(
current_tb
==
last_tb
):
return
else
:
tdSql
.
execute
(
'create table %s (ts timestamp, speed int)'
%
current_tb
)
last_tb
=
current_tb
written
=
0
tdLog
.
info
(
"will create table %s"
%
current_tb
)
try
:
tdSql
.
execute
(
'create table %s (ts timestamp, speed int)'
%
current_tb
)
last_tb
=
current_tb
written
=
0
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
def
insert_data
(
self
):
global
current_tb
tdLog
.
info
(
"insert_data"
)
global
last_tb
global
written
tdLog
.
info
(
"will insert data to table"
)
if
(
current_tb
==
""
):
if
(
last_tb
==
""
):
tdLog
.
info
(
"no table, create first"
)
self
.
create_table
()
tdLog
.
info
(
"insert data to table"
)
tdLog
.
info
(
"
will
insert data to table"
)
for
i
in
range
(
0
,
10
):
self
.
threadLock
.
acquire
()
insertRows
=
1000
tdLog
.
info
(
"insert %d rows to %s"
%
(
insertRows
,
curren
t_tb
))
tdLog
.
info
(
"insert %d rows to %s"
%
(
insertRows
,
las
t_tb
))
for
j
in
range
(
0
,
insertRows
):
ret
=
tdSql
.
execute
(
'insert into %s values (now + %dm, %d)'
%
(
curren
t_tb
,
j
,
j
))
(
las
t_tb
,
j
,
j
))
written
=
written
+
1
self
.
threadLock
.
release
()
def
query_data
(
self
):
global
current_tb
tdLog
.
info
(
"query_data"
)
global
last_tb
global
written
...
...
@@ -86,53 +85,90 @@ class Test (threading.Thread):
tdSql
.
checkRows
(
written
)
def
create_stable
(
self
):
global
current_tb
tdLog
.
info
(
"create_stable"
)
global
last_tb
global
last_stb
global
written
tdLog
.
info
(
"create a super table"
)
current_stb
=
"stb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
if
(
current_stb
==
last_stb
):
return
else
:
tdLog
.
info
(
"will create stable %s"
%
current_stb
)
tdSql
.
execute
(
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))'
%
current_stb
)
last_stb
=
current_stb
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
tdSql
.
execute
(
"create table %s using %s tags (1, '表1')"
%
(
current_tb
,
last_stb
))
last_tb
=
current_tb
tdSql
.
execute
(
"insert into %s values (now, 27, '我是nchar字符串')"
%
last_tb
)
written
=
written
+
1
def
drop_stable
(
self
):
tdLog
.
info
(
"drop_stable"
)
global
last_stb
if
(
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will drop last super table"
)
tdSql
.
execute
(
'drop table %s'
%
last_stb
)
last_stb
=
""
def
restart_database
(
self
):
global
current_tb
tdLog
.
info
(
"restart_database"
)
global
last_tb
global
written
tdLog
.
info
(
"restart databae"
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
5
)
def
force_restart
(
self
):
global
current_tb
def
force_restart
_database
(
self
):
tdLog
.
info
(
"force_restart_database"
)
global
last_tb
global
written
tdLog
.
info
(
"force restart database"
)
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
5
)
tdLog
.
sleep
(
10
)
def
drop_table
(
self
):
global
current_tb
tdLog
.
info
(
"drop_table"
)
global
last_tb
global
written
for
i
in
range
(
0
,
10
):
self
.
threadLock
.
acquire
()
tdLog
.
info
(
"current_tb %s"
%
current_tb
)
if
(
current_tb
!=
""
):
tdLog
.
info
(
"drop current tb %s"
%
current_tb
)
tdSql
.
execute
(
"drop table %s"
%
current_tb
)
current_tb
=
""
if
(
last_tb
!=
""
):
tdLog
.
info
(
"drop last_tb %s"
%
last_tb
)
tdSql
.
execute
(
"drop table %s"
%
last_tb
)
last_tb
=
""
written
=
0
tdLog
.
sleep
(
self
.
sleepTime
)
self
.
threadLock
.
release
()
def
query_data_from_stable
(
self
):
tdLog
.
info
(
"query_data_from_stable"
)
global
last_stb
if
(
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will query data from super table"
)
tdSql
.
execute
(
'select * from %s'
%
last_stb
)
def
reset_query_cache
(
self
):
global
current_tb
tdLog
.
info
(
"reset_query_cache"
)
global
last_tb
global
written
...
...
@@ -141,51 +177,69 @@ class Test (threading.Thread):
tdLog
.
sleep
(
1
)
def
reset_database
(
self
):
global
current_tb
tdLog
.
info
(
"reset_database"
)
global
last_tb
global
last_stb
global
written
tdLog
.
info
(
"reset database"
)
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
deploy
(
1
)
current_tb
=
""
last_tb
=
""
last_stb
=
""
written
=
0
tdDnodes
.
start
(
1
)
tdSql
.
prepare
()
def
delete_datafiles
(
self
):
global
current_tb
tdLog
.
info
(
"delete_data_files"
)
global
last_tb
global
written
tdLog
.
info
(
"delete data files"
)
dnodesDir
=
tdDnodes
.
getDnodesRootDir
()
dataDir
=
dnodesDir
+
'/dnode1/*'
deleteCmd
=
'rm -rf %s'
%
dataDir
os
.
system
(
deleteCmd
)
current_tb
=
""
last_tb
=
""
written
=
0
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
10
)
tdSql
.
prepare
()
def
run
(
self
):
switch
=
{
dataOp
=
{
1
:
self
.
insert_data
,
2
:
self
.
query_data
,
3
:
self
.
query_data_from_stable
,
}
dbOp
=
{
1
:
self
.
create_table
,
2
:
self
.
insert_data
,
3
:
self
.
query_data
,
4
:
self
.
create_stable
,
5
:
self
.
restart_database
,
6
:
self
.
force_restart
,
7
:
self
.
drop_table
,
8
:
self
.
reset_query_cache
,
9
:
self
.
reset_database
,
10
:
self
.
delete_datafiles
,
2
:
self
.
create_stable
,
3
:
self
.
restart_database
,
4
:
self
.
force_restart_database
,
5
:
self
.
drop_table
,
6
:
self
.
reset_query_cache
,
7
:
self
.
reset_database
,
8
:
self
.
delete_datafiles
,
9
:
self
.
drop_stable
,
}
switch
.
get
(
self
.
threadId
,
lambda
:
"ERROR"
)()
if
(
self
.
threadId
==
1
):
while
True
:
self
.
threadLock
.
acquire
()
tdLog
.
notice
(
"first thread"
)
randDataOp
=
random
.
randint
(
1
,
3
)
dataOp
.
get
(
randDataOp
,
lambda
:
"ERROR"
)()
self
.
threadLock
.
release
()
elif
(
self
.
threadId
==
2
):
while
True
:
tdLog
.
notice
(
"second thread"
)
self
.
threadLock
.
acquire
()
randDbOp
=
random
.
randint
(
1
,
9
)
dbOp
.
get
(
randDbOp
,
lambda
:
"ERROR"
)()
self
.
threadLock
.
release
()
class
TDTestCase
:
...
...
@@ -196,8 +250,8 @@ class TDTestCase:
def
run
(
self
):
tdSql
.
prepare
()
test1
=
Test
(
2
,
"insert_data"
,
1
)
test2
=
Test
(
7
,
"drop_table"
,
2
)
test1
=
Test
(
1
,
"data operation"
)
test2
=
Test
(
2
,
"db operation"
)
test1
.
start
()
test2
.
start
()
...
...
tests/pytest/random-test/random-test.py
浏览文件 @
8fa42af9
...
...
@@ -21,104 +21,157 @@ from util.dnodes import *
class
Test
:
def
__init__
(
self
):
self
.
current_tb
=
""
self
.
last_tb
=
""
self
.
last_stb
=
""
self
.
written
=
0
def
create_table
(
self
):
tdLog
.
info
(
"create a table"
)
self
.
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
tdLog
.
info
(
"current table %s"
%
self
.
current_tb
)
tdLog
.
info
(
"create_table"
)
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
if
(
self
.
current_tb
==
self
.
last_tb
):
if
(
current_tb
==
self
.
last_tb
):
return
else
:
tdLog
.
info
(
"will create table %s"
%
current_tb
)
tdSql
.
execute
(
'create table %s (ts timestamp,
speed int
)'
%
self
.
current_tb
)
self
.
last_tb
=
self
.
current_tb
'create table %s (ts timestamp,
c1 int, c2 nchar(10)
)'
%
current_tb
)
self
.
last_tb
=
current_tb
self
.
written
=
0
def
insert_data
(
self
):
tdLog
.
info
(
"
will insert data to table
"
)
if
(
self
.
curren
t_tb
==
""
):
tdLog
.
info
(
"
insert_data
"
)
if
(
self
.
las
t_tb
==
""
):
tdLog
.
info
(
"no table, create first"
)
self
.
create_table
()
tdLog
.
info
(
"insert data to table"
)
tdLog
.
info
(
"
will
insert data to table"
)
insertRows
=
10
tdLog
.
info
(
"insert %d rows to %s"
%
(
insertRows
,
self
.
last_tb
))
for
i
in
range
(
0
,
insertRows
):
ret
=
tdSql
.
execute
(
'insert into %s values (now + %dm, %d)'
%
(
self
.
last_tb
,
i
,
i
))
'insert into %s values (now + %dm, %d
, "%s"
)'
%
(
self
.
last_tb
,
i
,
i
,
"->"
+
str
(
i
)
))
self
.
written
=
self
.
written
+
1
tdLog
.
info
(
"insert earlier data"
)
tdSql
.
execute
(
'insert into %s values (now - 5m , 10)'
%
self
.
last_tb
)
tdSql
.
execute
(
'insert into %s values (now - 5m , 10, " - 5m")'
%
self
.
last_tb
)
self
.
written
=
self
.
written
+
1
tdSql
.
execute
(
'insert into %s values (now - 6m , 10)'
%
self
.
last_tb
)
tdSql
.
execute
(
'insert into %s values (now - 6m , 10, " - 6m")'
%
self
.
last_tb
)
self
.
written
=
self
.
written
+
1
tdSql
.
execute
(
'insert into %s values (now - 7m , 10)'
%
self
.
last_tb
)
tdSql
.
execute
(
'insert into %s values (now - 7m , 10, " - 7m")'
%
self
.
last_tb
)
self
.
written
=
self
.
written
+
1
tdSql
.
execute
(
'insert into %s values (now - 8m , 10)'
%
self
.
last_tb
)
tdSql
.
execute
(
'insert into %s values (now - 8m , 10, " - 8m")'
%
self
.
last_tb
)
self
.
written
=
self
.
written
+
1
def
query_data
(
self
):
tdLog
.
info
(
"query_data"
)
if
(
self
.
written
>
0
):
tdLog
.
info
(
"query data from table"
)
tdSql
.
query
(
"select * from %s"
%
self
.
last_tb
)
tdSql
.
checkRows
(
self
.
written
)
def
create_stable
(
self
):
tdLog
.
info
(
"create a super table"
)
tdLog
.
info
(
"create_stable"
)
current_stb
=
"stb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
if
(
current_stb
==
self
.
last_stb
):
return
else
:
tdLog
.
info
(
"will create stable %s"
%
current_stb
)
tdSql
.
execute
(
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))'
%
current_stb
)
self
.
last_stb
=
current_stb
current_tb
=
"tb%d"
%
int
(
round
(
time
.
time
()
*
1000
))
tdSql
.
execute
(
"create table %s using %s tags (1, '表1')"
%
(
current_tb
,
self
.
last_stb
))
self
.
last_tb
=
current_tb
tdSql
.
execute
(
"insert into %s values (now, 27, '我是nchar字符串')"
%
self
.
last_tb
)
self
.
written
=
self
.
written
+
1
def
drop_stable
(
self
):
tdLog
.
info
(
"drop_stable"
)
if
(
self
.
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will drop last super table"
)
tdSql
.
execute
(
'drop table %s'
%
self
.
last_stb
)
self
.
last_stb
=
""
def
query_data_from_stable
(
self
):
tdLog
.
info
(
"query_data_from_stable"
)
if
(
self
.
last_stb
==
""
):
tdLog
.
info
(
"no super table"
)
return
else
:
tdLog
.
info
(
"will query data from super table"
)
tdSql
.
execute
(
'select * from %s'
%
self
.
last_stb
)
def
restart_database
(
self
):
tdLog
.
info
(
"restart
databae"
)
tdLog
.
info
(
"restart
_
databae"
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
5
)
def
force_restart
(
self
):
tdLog
.
info
(
"force restart database"
)
def
force_restart_database
(
self
):
tdLog
.
info
(
"force_restart_database"
)
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
5
)
tdSql
.
prepare
()
def
drop_table
(
self
):
if
(
self
.
current_tb
!=
""
):
tdLog
.
info
(
"drop current tb %s"
%
self
.
current_tb
)
td
Sql
.
execute
(
"drop table %s"
%
self
.
curren
t_tb
)
self
.
current_tb
=
""
tdLog
.
info
(
"drop_table"
)
if
(
self
.
last_tb
!=
""
):
td
Log
.
info
(
"drop last tb %s"
%
self
.
las
t_tb
)
tdSql
.
execute
(
"drop table %s"
%
self
.
last_tb
)
self
.
last_tb
=
""
self
.
written
=
0
def
reset_query_cache
(
self
):
tdLog
.
info
(
"reset
query
cache"
)
tdLog
.
info
(
"reset
_query_
cache"
)
tdSql
.
execute
(
"reset query cache"
)
tdLog
.
sleep
(
1
)
def
reset_database
(
self
):
tdLog
.
info
(
"reset
database"
)
tdLog
.
info
(
"reset
_
database"
)
tdDnodes
.
forcestop
(
1
)
tdDnodes
.
deploy
(
1
)
self
.
current_tb
=
""
self
.
last_tb
=
""
self
.
written
=
0
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
5
)
tdSql
.
prepare
()
def
delete_datafiles
(
self
):
tdLog
.
info
(
"delete
data
files"
)
tdLog
.
info
(
"delete
_data
files"
)
dnodesDir
=
tdDnodes
.
getDnodesRootDir
()
dataDir
=
dnodesDir
+
'/dnode1/*'
deleteCmd
=
'rm -rf %s'
%
dataDir
os
.
system
(
deleteCmd
)
self
.
current_tb
=
""
self
.
last_tb
=
""
self
.
last_stb
=
""
self
.
written
=
0
tdDnodes
.
start
(
1
)
tdLog
.
sleep
(
10
)
tdSql
.
prepare
()
...
...
@@ -138,15 +191,17 @@ class TDTestCase:
3
:
test
.
query_data
,
4
:
test
.
create_stable
,
5
:
test
.
restart_database
,
6
:
test
.
force_restart
,
6
:
test
.
force_restart
_database
,
7
:
test
.
drop_table
,
8
:
test
.
reset_query_cache
,
9
:
test
.
reset_database
,
10
:
test
.
delete_datafiles
,
11
:
test
.
query_data_from_stable
,
12
:
test
.
drop_stable
,
}
for
x
in
range
(
1
,
100
):
r
=
random
.
randint
(
1
,
1
0
)
for
x
in
range
(
1
,
100
0
):
r
=
random
.
randint
(
1
,
1
2
)
tdLog
.
notice
(
"iteration %d run func %d"
%
(
x
,
r
))
switch
.
get
(
r
,
lambda
:
"ERROR"
)()
...
...
tests/script/unique/arbitrator/dn2_mn1_cache_file_sync.sim
0 → 100644
浏览文件 @
8fa42af9
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
#sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 1 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
#$tsStart = 1420041600000
# insert over 2M data in order to falling disc, generate one file
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
# $ts = $tsStart + $x
sql insert into $tb values ( now + 0s , $x ) ( now + 1s , $x ) ( now + 2s , $x ) ( now + 3s , $x ) ( now + 4s , $x ) ( now + 5s , $x ) ( now + 6s , $x ) ( now + 7s , $x ) ( now + 8s , $x ) ( now + 9s , $x )
$x = $x + 10
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 15d , -15 )
sql insert into $tb values ( now + 15d , 15 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop
sleep $sleepTimer
wait_dnode2_offline:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 16d , -16 )
sql insert into $tb values ( now + 16d , 16 )
$totalRows = $totalRows + 2
return 1
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step6: in dnode2, the files 1840 and 1842 will be removed
# how check in script ???
\ No newline at end of file
tests/script/unique/arbitrator/dn3_mn1_replica_change.sim
0 → 100644
浏览文件 @
8fa42af9
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2 and add into cluster , then create database with replica 1, and create table, insert data
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 3000
$totalTableNum = 10000
$sleepTimer = 10000
$db = db
sql create database $db replica 1 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 100
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1420041600000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step3: start dnode3 and add into cluster , then alter replica from 1 to 2, and waiting sync
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 3000
sql alter database $db replica 2
sleep $sleepTimer
wait_dnode3_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode3_ready
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode3_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step4: stop dnode2 for checking if sync success
system sh/exec.sh -n dnode2 -s stop
sleep $sleepTimer
wait_dnode2_offline:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: restart dnode2
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step6: start dnode4 and add into cluster , then alter replica from 2 to 3, and waiting sync
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname4
sleep 3000
sql alter database $db replica 3
sleep $sleepTimer
wait_dnode4_ready:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step7: alter replica from 3 to 2, and waiting sync
sql alter database $db replica 2
sleep $sleepTimer
wait_vgroups_replic_to_2:
sql show vgroups
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$thirdDnode_2 = $data8_1
$thirdDnode_3 = $data8_2
$thirdDnode_4 = $data8_3
$thirdDnode_5 = $data8_4
if $thirdDnode_2 != null then
sleep 2000
goto wait_vgroups_replic_to_2
endi
if $thirdDnode_3 != null then
sleep 2000
goto wait_vgroups_replic_to_2
endi
if $thirdDnode_4 != null then
sleep 2000
goto wait_vgroups_replic_to_2
endi
if $thirdDnode_5 != null then
sleep 2000
goto wait_vgroups_replic_to_2
endi
sleep $sleepTimer #waiting del one replica data
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step8: alter replica from 2 to 1, and waiting sync
sql alter database $db replica 1
sleep $sleepTimer
wait_vgroups_replic_to_1:
sql show vgroups
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$sencodDnode_2 = $data5_1
$sencodDnode_3 = $data5_2
$sencodDnode_4 = $data5_3
$sencodDnode_5 = $data5_4
if $sencodDnode_2 != null then
sleep 2000
goto wait_vgroups_replic_to_1
endi
if $sencodDnode_3 != null then
sleep 2000
goto wait_vgroups_replic_to_1
endi
if $sencodDnode_4 != null then
sleep 2000
goto wait_vgroups_replic_to_1
endi
if $sencodDnode_5 != null then
sleep 2000
goto wait_vgroups_replic_to_1
endi
all_dnodes_ready:
sql show dnodes
if $rows != 4 then
sleep 2000
goto all_dnodes_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto all_dnodes_ready
endi
if $dnode2Status != ready then
sleep 2000
goto all_dnodes_ready
endi
if $dnode3Status != ready then
sleep 2000
goto all_dnodes_ready
endi
if $dnode4Status != ready then
sleep 2000
goto all_dnodes_ready
endi
sleep $sleepTimer #waiting del one replica data
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step9: drop dnode2/dnode3
sql drop dnode $hostname2
sql drop dnode $hostname3
sleep $sleepTimer
wait_dnode23_dropped:
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode23_dropped
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode2Status != null then
sleep 2000
goto wait_dnode23_dropped
endi
if $dnode3Status != null then
sleep 2000
goto wait_dnode23_dropped
endi
if $dnode4Status != ready then
return -1
endi
sleep $sleepTimer #waiting move vnode from dnode3/dnode3 to dnode4
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录