Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
4331a90f
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4331a90f
编写于
5月 17, 2020
作者:
陶建辉(Jeff)
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into enhance/log
上级
95d7105e
ee1b6f14
变更
18
显示空白变更内容
内联
并排
Showing
18 changed file
with
415 addition
and
219 deletion
+415
-219
.gitignore
.gitignore
+3
-0
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+1
-1
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+1
-1
src/inc/taosdef.h
src/inc/taosdef.h
+6
-6
src/mnode/inc/mgmtDef.h
src/mnode/inc/mgmtDef.h
+1
-1
src/mnode/inc/mgmtSdb.h
src/mnode/inc/mgmtSdb.h
+2
-1
src/mnode/src/mgmtSdb.c
src/mnode/src/mgmtSdb.c
+80
-57
src/mnode/src/mgmtTable.c
src/mnode/src/mgmtTable.c
+79
-48
src/rpc/src/rpcMain.c
src/rpc/src/rpcMain.c
+5
-3
src/tsdb/src/tsdbMeta.c
src/tsdb/src/tsdbMeta.c
+1
-1
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+1
-1
tests/pytest/insert/bool.py
tests/pytest/insert/bool.py
+8
-0
tests/pytest/table/boundary.py
tests/pytest/table/boundary.py
+161
-0
tests/pytest/table/column_name.py
tests/pytest/table/column_name.py
+19
-72
tests/pytest/table/column_num.py
tests/pytest/table/column_num.py
+1
-1
tests/pytest/table/tablename-boundary.py
tests/pytest/table/tablename-boundary.py
+1
-1
tests/pytest/util/cases.py
tests/pytest/util/cases.py
+1
-1
tests/pytest/util/sql.py
tests/pytest/util/sql.py
+44
-24
未找到文件。
.gitignore
浏览文件 @
4331a90f
...
...
@@ -33,5 +33,8 @@ Target/
*.failed
*.sql
sim/
psim/
pysim/
*.out
*DS_Store
src/client/inc/tscUtil.h
浏览文件 @
4331a90f
...
...
@@ -43,7 +43,7 @@ extern "C" {
typedef
struct
SParsedColElem
{
int16_t
colIndex
;
int16_t
offset
;
u
int16_t
offset
;
}
SParsedColElem
;
typedef
struct
SParsedDataColInfo
{
...
...
src/client/inc/tsclient.h
浏览文件 @
4331a90f
...
...
@@ -49,7 +49,7 @@ typedef struct STableComInfo {
uint8_t
numOfTags
;
uint8_t
precision
;
int16_t
numOfColumns
;
int
16
_t
rowSize
;
int
32
_t
rowSize
;
}
STableComInfo
;
typedef
struct
STableMeta
{
...
...
src/inc/taosdef.h
浏览文件 @
4331a90f
...
...
@@ -193,20 +193,20 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_ACCT_LEN TSDB_UNI_LEN
#define TSDB_PASSWORD_LEN TSDB_UNI_LEN
#define TSDB_MAX_COLUMNS
256
#define TSDB_MAX_COLUMNS
1024
#define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns
#define TSDB_NODE_NAME_LEN 64
#define TSDB_TABLE_NAME_LEN 192
#define TSDB_DB_NAME_LEN 32
#define TSDB_COL_NAME_LEN 64
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS *
16
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS *
64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb
#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS *
16
#define TSDB_MAX_TAGS_LEN
512
#define TSDB_MAX_TAGS
32
#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS *
64
#define TSDB_MAX_TAGS_LEN
65536
#define TSDB_MAX_TAGS
128
#define TSDB_AUTH_LEN 16
#define TSDB_KEY_LEN 16
...
...
@@ -236,7 +236,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_PAYLOAD_SIZE (TSDB_DEFAULT_PKT_SIZE - 100)
#define TSDB_DEFAULT_PAYLOAD_SIZE 1024 // default payload size
#define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth
#define TSDB_
SQLCMD
_SIZE 1024
#define TSDB_
CQ_SQL
_SIZE 1024
#define TSDB_MAX_VNODES 256
#define TSDB_MIN_VNODES 50
#define TSDB_INVALID_VNODE_NUM 0
...
...
src/mnode/inc/mgmtDef.h
浏览文件 @
4331a90f
...
...
@@ -68,7 +68,7 @@ typedef struct SMnodeObj {
// todo use dynamic length string
typedef
struct
{
char
tableId
[
TSDB_TABLE_ID_LEN
+
1
]
;
char
*
tableId
;
int8_t
type
;
}
STableObj
;
...
...
src/mnode/inc/mgmtSdb.h
浏览文件 @
4331a90f
...
...
@@ -35,7 +35,8 @@ typedef enum {
typedef
enum
{
SDB_KEY_STRING
,
SDB_KEY_INT
,
SDB_KEY_AUTO
SDB_KEY_AUTO
,
SDB_KEY_VAR_STRING
,
}
ESdbKey
;
typedef
enum
{
...
...
src/mnode/src/mgmtSdb.c
浏览文件 @
4331a90f
...
...
@@ -104,6 +104,14 @@ bool sdbIsServing() {
return
tsSdbObj
.
status
==
SDB_STATUS_SERVING
;
}
static
void
*
sdbGetObjKey
(
SSdbTable
*
pTable
,
void
*
key
)
{
if
(
pTable
->
keyType
==
SDB_KEY_VAR_STRING
)
{
return
*
(
char
**
)
key
;
}
return
key
;
}
static
char
*
sdbGetActionStr
(
int32_t
action
)
{
switch
(
action
)
{
case
SDB_ACTION_INSERT
:
...
...
@@ -116,20 +124,25 @@ static char *sdbGetActionStr(int32_t action) {
return
"invalid"
;
}
static
char
*
sdbGet
keyStr
(
SSdbTable
*
pTable
,
void
*
row
)
{
static
char
*
sdbGet
KeyStr
(
SSdbTable
*
pTable
,
void
*
key
)
{
static
char
str
[
16
];
switch
(
pTable
->
keyType
)
{
case
SDB_KEY_STRING
:
return
(
char
*
)
row
;
case
SDB_KEY_VAR_STRING
:
return
(
char
*
)
key
;
case
SDB_KEY_INT
:
case
SDB_KEY_AUTO
:
sprintf
(
str
,
"%d"
,
*
(
int32_t
*
)
row
);
sprintf
(
str
,
"%d"
,
*
(
int32_t
*
)
key
);
return
str
;
default:
return
"invalid"
;
}
}
static
char
*
sdbGetKeyStrFromObj
(
SSdbTable
*
pTable
,
void
*
key
)
{
return
sdbGetKeyStr
(
pTable
,
sdbGetObjKey
(
pTable
,
key
));
}
static
void
*
sdbGetTableFromId
(
int32_t
tableId
)
{
return
tsSdbObj
.
tableList
[
tableId
];
}
...
...
@@ -332,50 +345,48 @@ void sdbCleanUp() {
pthread_mutex_destroy
(
&
tsSdbObj
.
mutex
);
}
void
sdbIncRef
(
void
*
handle
,
void
*
pRow
)
{
if
(
pRow
)
{
void
sdbIncRef
(
void
*
handle
,
void
*
pObj
)
{
if
(
pObj
==
NULL
)
return
;
SSdbTable
*
pTable
=
handle
;
int32_t
*
pRefCount
=
(
int32_t
*
)(
pRow
+
pTable
->
refCountPos
);
int32_t
*
pRefCount
=
(
int32_t
*
)(
pObj
+
pTable
->
refCountPos
);
atomic_add_fetch_32
(
pRefCount
,
1
);
if
(
0
&&
(
pTable
->
tableId
==
SDB_TABLE_MNODE
||
pTable
->
tableId
==
SDB_TABLE_DNODE
))
{
sdbTrace
(
"table:%s, add ref to record:%s:%s:%d"
,
pTable
->
tableName
,
pTable
->
tableName
,
sdbGetkeyStr
(
pTable
,
pRow
),
*
pRefCount
);
}
sdbTrace
(
"table:%s, add ref to record:%s:%d"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pObj
),
*
pRefCount
);
}
}
void
sdbDecRef
(
void
*
handle
,
void
*
pRow
)
{
if
(
pRow
)
{
void
sdbDecRef
(
void
*
handle
,
void
*
pObj
)
{
if
(
pObj
==
NULL
)
return
;
SSdbTable
*
pTable
=
handle
;
int32_t
*
pRefCount
=
(
int32_t
*
)(
pRow
+
pTable
->
refCountPos
);
int32_t
*
pRefCount
=
(
int32_t
*
)(
pObj
+
pTable
->
refCountPos
);
int32_t
refCount
=
atomic_sub_fetch_32
(
pRefCount
,
1
);
if
(
0
&&
(
pTable
->
tableId
==
SDB_TABLE_MNODE
||
pTable
->
tableId
==
SDB_TABLE_DNODE
))
{
sdbTrace
(
"table:%s, def ref of record:%s:%s:%d"
,
pTable
->
tableName
,
pTable
->
tableName
,
sdbGetkeyStr
(
pTable
,
pRow
),
*
pRefCount
);
sdbTrace
(
"table:%s, def ref of record:%s:%d"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pObj
),
*
pRefCount
);
}
int8_t
*
updateEnd
=
pRow
+
pTable
->
refCountPos
-
1
;
int8_t
*
updateEnd
=
pObj
+
pTable
->
refCountPos
-
1
;
if
(
refCount
<=
0
&&
*
updateEnd
)
{
sdbTrace
(
"table:%s, record:%s:%s:%d is destroyed"
,
pTable
->
tableName
,
pTable
->
tableName
,
sdbGetkeyStr
(
pTable
,
pRow
),
*
pRefCount
);
SSdbOper
oper
=
{.
pObj
=
pRow
};
sdbTrace
(
"table:%s, record:%s:%d is destroyed"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pObj
),
*
pRefCount
);
SSdbOper
oper
=
{.
pObj
=
pObj
};
(
*
pTable
->
destroyFp
)(
&
oper
);
}
}
}
static
SSdbRow
*
sdbGetRowMeta
(
void
*
handle
,
void
*
key
)
{
SSdbTable
*
pTable
=
(
SSdbTable
*
)
handle
;
SSdbRow
*
pMeta
;
if
(
handle
==
NULL
)
return
NULL
;
static
SSdbRow
*
sdbGetRowMeta
(
SSdbTable
*
pTable
,
void
*
key
)
{
if
(
pTable
==
NULL
)
return
NULL
;
int32_t
keySize
=
sizeof
(
int32_t
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
)
{
if
(
pTable
->
keyType
==
SDB_KEY_STRING
||
pTable
->
keyType
==
SDB_KEY_VAR_STRING
)
{
keySize
=
strlen
((
char
*
)
key
);
}
pMeta
=
taosHashGet
(
pTable
->
iHandle
,
key
,
keySize
);
return
pMeta
;
return
taosHashGet
(
pTable
->
iHandle
,
key
,
keySize
);
}
static
SSdbRow
*
sdbGetRowMetaFromObj
(
SSdbTable
*
pTable
,
void
*
key
)
{
return
sdbGetRowMeta
(
pTable
,
sdbGetObjKey
(
pTable
,
key
));
}
void
*
sdbGetRow
(
void
*
handle
,
void
*
key
)
{
...
...
@@ -387,7 +398,7 @@ void *sdbGetRow(void *handle, void *key) {
pthread_mutex_lock
(
&
pTable
->
mutex
);
int32_t
keySize
=
sizeof
(
int32_t
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
)
{
if
(
pTable
->
keyType
==
SDB_KEY_STRING
||
pTable
->
keyType
==
SDB_KEY_VAR_STRING
)
{
keySize
=
strlen
((
char
*
)
key
);
}
pMeta
=
taosHashGet
(
pTable
->
iHandle
,
key
,
keySize
);
...
...
@@ -400,6 +411,10 @@ void *sdbGetRow(void *handle, void *key) {
return
pMeta
->
row
;
}
static
void
*
sdbGetRowFromObj
(
SSdbTable
*
pTable
,
void
*
key
)
{
return
sdbGetRow
(
pTable
,
sdbGetObjKey
(
pTable
,
key
));
}
static
int32_t
sdbInsertHash
(
SSdbTable
*
pTable
,
SSdbOper
*
pOper
)
{
SSdbRow
rowMeta
;
rowMeta
.
rowSize
=
pOper
->
rowSize
;
...
...
@@ -407,11 +422,14 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_lock
(
&
pTable
->
mutex
);
void
*
key
=
sdbGetObjKey
(
pTable
,
pOper
->
pObj
);
int32_t
keySize
=
sizeof
(
int32_t
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
)
{
keySize
=
strlen
((
char
*
)
pOper
->
pObj
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
||
pTable
->
keyType
==
SDB_KEY_VAR_STRING
)
{
keySize
=
strlen
((
char
*
)
key
);
}
taosHashPut
(
pTable
->
iHandle
,
pOper
->
pObj
,
keySize
,
&
rowMeta
,
sizeof
(
SSdbRow
));
taosHashPut
(
pTable
->
iHandle
,
key
,
keySize
,
&
rowMeta
,
sizeof
(
SSdbRow
));
sdbIncRef
(
pTable
,
pOper
->
pObj
);
pTable
->
numOfRows
++
;
...
...
@@ -425,7 +443,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_unlock
(
&
pTable
->
mutex
);
sdbTrace
(
"table:%s, insert record:%s to hash, numOfRows:%d version:%"
PRIu64
,
pTable
->
tableName
,
sdbGet
keyStr
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
sdbGet
KeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
(
*
pTable
->
insertFp
)(
pOper
);
return
TSDB_CODE_SUCCESS
;
...
...
@@ -436,17 +454,20 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_lock
(
&
pTable
->
mutex
);
void
*
key
=
sdbGetObjKey
(
pTable
,
pOper
->
pObj
);
int32_t
keySize
=
sizeof
(
int32_t
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
)
{
keySize
=
strlen
((
char
*
)
pOper
->
pObj
);
if
(
pTable
->
keyType
==
SDB_KEY_STRING
||
pTable
->
keyType
==
SDB_KEY_VAR_STRING
)
{
keySize
=
strlen
((
char
*
)
key
);
}
taosHashRemove
(
pTable
->
iHandle
,
pOper
->
pObj
,
keySize
);
taosHashRemove
(
pTable
->
iHandle
,
key
,
keySize
);
pTable
->
numOfRows
--
;
pthread_mutex_unlock
(
&
pTable
->
mutex
);
sdbTrace
(
"table:%s, delete record:%s from hash, numOfRows:%d version:%"
PRIu64
,
pTable
->
tableName
,
sdbGet
keyStr
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
sdbGet
KeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
int8_t
*
updateEnd
=
pOper
->
pObj
+
pTable
->
refCountPos
-
1
;
*
updateEnd
=
1
;
...
...
@@ -457,7 +478,7 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
static
int32_t
sdbUpdateHash
(
SSdbTable
*
pTable
,
SSdbOper
*
pOper
)
{
sdbTrace
(
"table:%s, update record:%s in hash, numOfRows:%d version:%"
PRIu64
,
pTable
->
tableName
,
sdbGet
keyStr
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
sdbGet
KeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
());
(
*
pTable
->
updateFp
)(
pOper
);
return
TSDB_CODE_SUCCESS
;
...
...
@@ -488,7 +509,7 @@ static int sdbWrite(void *param, void *data, int type) {
}
else
if
(
pHead
->
version
!=
tsSdbObj
.
version
+
1
)
{
pthread_mutex_unlock
(
&
tsSdbObj
.
mutex
);
sdbError
(
"table:%s, failed to restore %s record:%s from wal, version:%"
PRId64
" too large, sdb version:%"
PRId64
,
pTable
->
tableName
,
sdbGetActionStr
(
action
),
sdbGet
k
eyStr
(
pTable
,
pHead
->
cont
),
pHead
->
version
,
pTable
->
tableName
,
sdbGetActionStr
(
action
),
sdbGet
K
eyStr
(
pTable
,
pHead
->
cont
),
pHead
->
version
,
tsSdbObj
.
version
);
return
TSDB_CODE_OTHERS
;
}
else
{
...
...
@@ -540,8 +561,8 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
SSdbTable
*
pTable
=
(
SSdbTable
*
)
pOper
->
table
;
if
(
pTable
==
NULL
)
return
-
1
;
if
(
sdbGetRow
(
pTable
,
pOper
->
pObj
))
{
sdbError
(
"table:%s, failed to insert record:%s, already exist"
,
pTable
->
tableName
,
sdbGet
keyStr
(
pTable
,
pOper
->
pObj
));
if
(
sdbGetRow
FromObj
(
pTable
,
pOper
->
pObj
))
{
sdbError
(
"table:%s, failed to insert record:%s, already exist"
,
pTable
->
tableName
,
sdbGet
KeyStrFromObj
(
pTable
,
pOper
->
pObj
));
sdbDecRef
(
pTable
,
pOper
->
pObj
);
return
TSDB_CODE_ALREADY_THERE
;
}
...
...
@@ -580,7 +601,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
SSdbTable
*
pTable
=
(
SSdbTable
*
)
pOper
->
table
;
if
(
pTable
==
NULL
)
return
-
1
;
SSdbRow
*
pMeta
=
sdbGetRowMeta
(
pTable
,
pOper
->
pObj
);
SSdbRow
*
pMeta
=
sdbGetRowMeta
FromObj
(
pTable
,
pOper
->
pObj
);
if
(
pMeta
==
NULL
)
{
sdbTrace
(
"table:%s, record is not there, delete failed"
,
pTable
->
tableName
);
return
-
1
;
...
...
@@ -590,25 +611,27 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
assert
(
pMetaRow
!=
NULL
);
if
(
pOper
->
type
==
SDB_OPER_GLOBAL
)
{
int32_t
rowSize
=
0
;
void
*
key
=
sdbGetObjKey
(
pTable
,
pOper
->
pObj
);
int32_t
keySize
=
0
;
switch
(
pTable
->
keyType
)
{
case
SDB_KEY_STRING
:
rowSize
=
strlen
((
char
*
)
pOper
->
pObj
)
+
1
;
case
SDB_KEY_VAR_STRING
:
keySize
=
strlen
((
char
*
)
key
)
+
1
;
break
;
case
SDB_KEY_INT
:
case
SDB_KEY_AUTO
:
rowSize
=
sizeof
(
uint64
_t
);
keySize
=
sizeof
(
uint32
_t
);
break
;
default:
return
-
1
;
}
int32_t
size
=
sizeof
(
SWalHead
)
+
row
Size
;
int32_t
size
=
sizeof
(
SWalHead
)
+
key
Size
;
SWalHead
*
pHead
=
taosAllocateQitem
(
size
);
pHead
->
version
=
0
;
pHead
->
len
=
row
Size
;
pHead
->
len
=
key
Size
;
pHead
->
msgType
=
pTable
->
tableId
*
10
+
SDB_ACTION_DELETE
;
memcpy
(
pHead
->
cont
,
pOper
->
pObj
,
row
Size
);
memcpy
(
pHead
->
cont
,
key
,
key
Size
);
int32_t
code
=
sdbWrite
(
pOper
,
pHead
,
pHead
->
msgType
);
taosFreeQitem
(
pHead
);
...
...
@@ -622,7 +645,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) {
SSdbTable
*
pTable
=
(
SSdbTable
*
)
pOper
->
table
;
if
(
pTable
==
NULL
)
return
-
1
;
SSdbRow
*
pMeta
=
sdbGetRowMeta
(
pTable
,
pOper
->
pObj
);
SSdbRow
*
pMeta
=
sdbGetRowMeta
FromObj
(
pTable
,
pOper
->
pObj
);
if
(
pMeta
==
NULL
)
{
sdbTrace
(
"table:%s, record is not there, delete failed"
,
pTable
->
tableName
);
return
-
1
;
...
...
src/mnode/src/mgmtTable.c
浏览文件 @
4331a90f
...
...
@@ -84,6 +84,7 @@ static void mgmtProcessAlterTableRsp(SRpcMsg *rpcMsg);
static
int32_t
mgmtFindSuperTableColumnIndex
(
SSuperTableObj
*
pStable
,
char
*
colName
);
static
void
mgmtDestroyChildTable
(
SChildTableObj
*
pTable
)
{
tfree
(
pTable
->
info
.
tableId
);
tfree
(
pTable
->
schema
);
tfree
(
pTable
->
sql
);
tfree
(
pTable
);
...
...
@@ -180,6 +181,7 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) {
SChildTableObj
*
pNew
=
pOper
->
pObj
;
SChildTableObj
*
pTable
=
mgmtGetChildTable
(
pNew
->
info
.
tableId
);
if
(
pTable
!=
pNew
)
{
void
*
oldTableId
=
pTable
->
info
.
tableId
;
void
*
oldSql
=
pTable
->
sql
;
void
*
oldSchema
=
pTable
->
schema
;
memcpy
(
pTable
,
pNew
,
pOper
->
rowSize
);
...
...
@@ -188,6 +190,7 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) {
free
(
pNew
);
free
(
oldSql
);
free
(
oldSchema
);
free
(
oldTableId
);
}
mgmtDecTableRef
(
pTable
);
...
...
@@ -195,51 +198,66 @@ static int32_t mgmtChildTableActionUpdate(SSdbOper *pOper) {
}
static
int32_t
mgmtChildTableActionEncode
(
SSdbOper
*
pOper
)
{
const
int32_t
maxRowSize
=
sizeof
(
SChildTableObj
)
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
);
SChildTableObj
*
pTable
=
pOper
->
pObj
;
assert
(
pTable
!=
NULL
&&
pOper
->
rowData
!=
NULL
);
if
(
pTable
->
info
.
type
==
TSDB_CHILD_TABLE
)
{
memcpy
(
pOper
->
rowData
,
pTable
,
tsChildTableUpdateSize
);
pOper
->
rowSize
=
tsChildTableUpdateSize
;
}
else
{
int32_t
len
=
strlen
(
pTable
->
info
.
tableId
);
if
(
len
>
TSDB_TABLE_ID_LEN
)
return
TSDB_CODE_INVALID_TABLE_ID
;
memcpy
(
pOper
->
rowData
,
pTable
->
info
.
tableId
,
len
);
memset
(
pOper
->
rowData
+
len
,
0
,
1
);
len
++
;
memcpy
(
pOper
->
rowData
+
len
,
(
char
*
)
pTable
+
sizeof
(
char
*
),
tsChildTableUpdateSize
);
len
+=
tsChildTableUpdateSize
;
if
(
pTable
->
info
.
type
!=
TSDB_CHILD_TABLE
)
{
int32_t
schemaSize
=
pTable
->
numOfColumns
*
sizeof
(
SSchema
);
if
(
maxRowSize
<
tsChildTableUpdateSize
+
schemaSize
)
{
return
TSDB_CODE_INVALID_MSG_LEN
;
memcpy
(
pOper
->
rowData
+
len
,
pTable
->
schema
,
schemaSize
);
len
+=
schemaSize
;
if
(
pTable
->
sqlLen
!=
0
)
{
memcpy
(
pOper
->
rowData
+
len
,
pTable
->
sql
,
pTable
->
sqlLen
);
len
+=
pTable
->
sqlLen
;
}
memcpy
(
pOper
->
rowData
,
pTable
,
tsChildTableUpdateSize
);
memcpy
(
pOper
->
rowData
+
tsChildTableUpdateSize
,
pTable
->
schema
,
schemaSize
);
memcpy
(
pOper
->
rowData
+
tsChildTableUpdateSize
+
schemaSize
,
pTable
->
sql
,
pTable
->
sqlLen
);
pOper
->
rowSize
=
tsChildTableUpdateSize
+
schemaSize
+
pTable
->
sqlLen
;
}
pOper
->
rowSize
=
len
;
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
mgmtChildTableActionDecode
(
SSdbOper
*
pOper
)
{
assert
(
pOper
->
rowData
!=
NULL
);
SChildTableObj
*
pTable
=
calloc
(
1
,
sizeof
(
SChildTableObj
));
if
(
pTable
==
NULL
)
{
return
TSDB_CODE_SERV_OUT_OF_MEMORY
;
}
if
(
pTable
==
NULL
)
return
TSDB_CODE_SERV_OUT_OF_MEMORY
;
int32_t
len
=
strlen
(
pOper
->
rowData
);
if
(
len
>
TSDB_TABLE_ID_LEN
)
return
TSDB_CODE_INVALID_TABLE_ID
;
pTable
->
info
.
tableId
=
strdup
(
pOper
->
rowData
);
len
++
;
memcpy
(
pTable
,
pOper
->
rowData
,
tsChildTableUpdateSize
);
memcpy
((
char
*
)
pTable
+
sizeof
(
char
*
),
pOper
->
rowData
+
len
,
tsChildTableUpdateSize
);
len
+=
tsChildTableUpdateSize
;
if
(
pTable
->
info
.
type
!=
TSDB_CHILD_TABLE
)
{
int32_t
schemaSize
=
pTable
->
numOfColumns
*
sizeof
(
SSchema
);
pTable
->
schema
=
(
SSchema
*
)
malloc
(
schemaSize
);
if
(
pTable
->
schema
==
NULL
)
{
mgmtDestroyChildTable
(
pTable
);
return
TSDB_CODE_
SERV_OUT_OF_MEMORY
;
return
TSDB_CODE_
INVALID_TABLE_TYPE
;
}
memcpy
(
pTable
->
schema
,
pOper
->
rowData
+
tsChildTableUpdateSize
,
schemaSize
);
memcpy
(
pTable
->
schema
,
pOper
->
rowData
+
len
,
schemaSize
);
len
+=
schemaSize
;
pTable
->
sql
=
(
char
*
)
malloc
(
pTable
->
sqlLen
);
if
(
pTable
->
sqlLen
!=
0
)
{
pTable
->
sql
=
malloc
(
pTable
->
sqlLen
);
if
(
pTable
->
sql
==
NULL
)
{
mgmtDestroyChildTable
(
pTable
);
return
TSDB_CODE_SERV_OUT_OF_MEMORY
;
}
memcpy
(
pTable
->
sql
,
pOper
->
rowData
+
tsChildTableUpdateSize
+
schemaSize
,
pTable
->
sqlLen
);
memcpy
(
pTable
->
sql
,
pOper
->
rowData
+
len
,
pTable
->
sqlLen
);
}
}
pOper
->
pObj
=
pTable
;
...
...
@@ -311,15 +329,15 @@ static int32_t mgmtChildTableActionRestored() {
static
int32_t
mgmtInitChildTables
()
{
SChildTableObj
tObj
;
tsChildTableUpdateSize
=
(
int8_t
*
)
tObj
.
updateEnd
-
(
int8_t
*
)
&
tObj
;
tsChildTableUpdateSize
=
(
int8_t
*
)
tObj
.
updateEnd
-
(
int8_t
*
)
&
tObj
.
info
.
type
;
SSdbTableDesc
tableDesc
=
{
.
tableId
=
SDB_TABLE_CTABLE
,
.
tableName
=
"ctables"
,
.
hashSessions
=
tsMaxTables
,
.
maxRowSize
=
sizeof
(
SChildTableObj
)
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
),
.
maxRowSize
=
sizeof
(
SChildTableObj
)
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
)
+
TSDB_TABLE_ID_LEN
+
TSDB_CQ_SQL_SIZE
,
.
refCountPos
=
(
int8_t
*
)(
&
tObj
.
refCount
)
-
(
int8_t
*
)
&
tObj
,
.
keyType
=
SDB_KEY_STRING
,
.
keyType
=
SDB_KEY_
VAR_
STRING
,
.
insertFp
=
mgmtChildTableActionInsert
,
.
deleteFp
=
mgmtChildTableActionDelete
,
.
updateFp
=
mgmtChildTableActionUpdate
,
...
...
@@ -372,6 +390,7 @@ static void mgmtDestroySuperTable(SSuperTableObj *pStable) {
taosHashCleanup
(
pStable
->
vgHash
);
pStable
->
vgHash
=
NULL
;
}
tfree
(
pStable
->
info
.
tableId
);
tfree
(
pStable
->
schema
);
tfree
(
pStable
);
}
...
...
@@ -408,11 +427,13 @@ static int32_t mgmtSuperTableActionUpdate(SSdbOper *pOper) {
SSuperTableObj
*
pNew
=
pOper
->
pObj
;
SSuperTableObj
*
pTable
=
mgmtGetSuperTable
(
pNew
->
info
.
tableId
);
if
(
pTable
!=
pNew
)
{
void
*
oldTableId
=
pTable
->
info
.
tableId
;
void
*
oldSchema
=
pTable
->
schema
;
memcpy
(
pTable
,
pNew
,
pOper
->
rowSize
);
pTable
->
schema
=
pNew
->
schema
;
free
(
pNew
->
vgHash
);
free
(
pNew
);
free
(
oldTableId
);
free
(
oldSchema
);
}
mgmtDecTableRef
(
pTable
);
...
...
@@ -420,40 +441,50 @@ static int32_t mgmtSuperTableActionUpdate(SSdbOper *pOper) {
}
static
int32_t
mgmtSuperTableActionEncode
(
SSdbOper
*
pOper
)
{
const
int32_t
maxRowSize
=
sizeof
(
SChildTableObj
)
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
);
SSuperTableObj
*
pStable
=
pOper
->
pObj
;
assert
(
pOper
->
pObj
!=
NULL
&&
pOper
->
rowData
!=
NULL
);
int32_t
schemaSize
=
sizeof
(
SSchema
)
*
(
pStable
->
numOfColumns
+
pStable
->
numOfTags
);
int32_t
len
=
strlen
(
pStable
->
info
.
tableId
);
if
(
len
>
TSDB_TABLE_ID_LEN
)
len
=
TSDB_CODE_INVALID_TABLE_ID
;
if
(
maxRowSize
<
tsSuperTableUpdateSize
+
schemaSize
)
{
return
TSDB_CODE_INVALID_MSG_LEN
;
}
memcpy
(
pOper
->
rowData
,
pStable
->
info
.
tableId
,
len
);
memset
(
pOper
->
rowData
+
len
,
0
,
1
);
len
++
;
memcpy
(
pOper
->
rowData
+
len
,
(
char
*
)
pStable
+
sizeof
(
char
*
),
tsSuperTableUpdateSize
);
len
+=
tsSuperTableUpdateSize
;
memcpy
(
pOper
->
rowData
,
pStable
,
tsSuperTableUpdateSize
);
memcpy
(
pOper
->
rowData
+
tsSuperTableUpdateSize
,
pStable
->
schema
,
schemaSize
);
pOper
->
rowSize
=
tsSuperTableUpdateSize
+
schemaSize
;
int32_t
schemaSize
=
sizeof
(
SSchema
)
*
(
pStable
->
numOfColumns
+
pStable
->
numOfTags
);
memcpy
(
pOper
->
rowData
+
len
,
pStable
->
schema
,
schemaSize
);
len
+=
schemaSize
;
pOper
->
rowSize
=
len
;
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
mgmtSuperTableActionDecode
(
SSdbOper
*
pOper
)
{
assert
(
pOper
->
rowData
!=
NULL
);
SSuperTableObj
*
pStable
=
(
SSuperTableObj
*
)
calloc
(
1
,
sizeof
(
SSuperTableObj
));
if
(
pStable
==
NULL
)
return
TSDB_CODE_SERV_OUT_OF_MEMORY
;
memcpy
(
pStable
,
pOper
->
rowData
,
tsSuperTableUpdateSize
);
int32_t
len
=
strlen
(
pOper
->
rowData
);
if
(
len
>
TSDB_TABLE_ID_LEN
)
return
TSDB_CODE_INVALID_TABLE_ID
;
pStable
->
info
.
tableId
=
strdup
(
pOper
->
rowData
);
len
++
;
memcpy
((
char
*
)
pStable
+
sizeof
(
char
*
),
pOper
->
rowData
+
len
,
tsSuperTableUpdateSize
);
len
+=
tsSuperTableUpdateSize
;
int32_t
schemaSize
=
sizeof
(
SSchema
)
*
(
pStable
->
numOfColumns
+
pStable
->
numOfTags
);
pStable
->
schema
=
malloc
(
schemaSize
);
if
(
pStable
->
schema
==
NULL
)
{
mgmtDestroySuperTable
(
pStable
);
return
-
1
;
return
TSDB_CODE_NOT_SUPER_TABLE
;
}
memcpy
(
pStable
->
schema
,
pOper
->
rowData
+
tsSuperTableUpdateSize
,
schemaSize
);
memcpy
(
pStable
->
schema
,
pOper
->
rowData
+
len
,
schemaSize
);
pOper
->
pObj
=
pStable
;
return
TSDB_CODE_SUCCESS
;
...
...
@@ -465,15 +496,15 @@ static int32_t mgmtSuperTableActionRestored() {
static
int32_t
mgmtInitSuperTables
()
{
SSuperTableObj
tObj
;
tsSuperTableUpdateSize
=
(
int8_t
*
)
tObj
.
updateEnd
-
(
int8_t
*
)
&
tObj
;
tsSuperTableUpdateSize
=
(
int8_t
*
)
tObj
.
updateEnd
-
(
int8_t
*
)
&
tObj
.
info
.
type
;
SSdbTableDesc
tableDesc
=
{
.
tableId
=
SDB_TABLE_STABLE
,
.
tableName
=
"stables"
,
.
hashSessions
=
TSDB_MAX_SUPER_TABLES
,
.
maxRowSize
=
tsSuperTableUpdateSize
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
)
,
.
maxRowSize
=
sizeof
(
SSuperTableObj
)
+
sizeof
(
SSchema
)
*
(
TSDB_MAX_TAGS
+
TSDB_MAX_COLUMNS
+
16
)
+
TSDB_TABLE_ID_LEN
,
.
refCountPos
=
(
int8_t
*
)(
&
tObj
.
refCount
)
-
(
int8_t
*
)
&
tObj
,
.
keyType
=
SDB_KEY_STRING
,
.
keyType
=
SDB_KEY_
VAR_
STRING
,
.
insertFp
=
mgmtSuperTableActionInsert
,
.
deleteFp
=
mgmtSuperTableActionDelete
,
.
updateFp
=
mgmtSuperTableActionUpdate
,
...
...
@@ -720,14 +751,14 @@ static void mgmtProcessTableMetaMsg(SQueuedMsg *pMsg) {
static
void
mgmtProcessCreateSuperTableMsg
(
SQueuedMsg
*
pMsg
)
{
SCMCreateTableMsg
*
pCreate
=
pMsg
->
pCont
;
SSuperTableObj
*
pStable
=
(
SSuperTableObj
*
)
calloc
(
1
,
sizeof
(
SSuperTableObj
));
SSuperTableObj
*
pStable
=
calloc
(
1
,
sizeof
(
SSuperTableObj
));
if
(
pStable
==
NULL
)
{
mError
(
"table:%s, failed to create, no enough memory"
,
pCreate
->
tableId
);
mgmtSendSimpleResp
(
pMsg
->
thandle
,
TSDB_CODE_SERV_OUT_OF_MEMORY
);
return
;
}
strcpy
(
pStable
->
info
.
tableId
,
pCreate
->
tableId
);
pStable
->
info
.
tableId
=
strdup
(
pCreate
->
tableId
);
pStable
->
info
.
type
=
TSDB_SUPER_TABLE
;
pStable
->
createdTime
=
taosGetTimestampMs
();
pStable
->
uid
=
(((
uint64_t
)
pStable
->
createdTime
)
<<
16
)
+
(
sdbGetVersion
()
&
((
1ul
<<
16
)
-
1ul
));
...
...
@@ -1358,7 +1389,7 @@ static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableOb
}
static
SChildTableObj
*
mgmtDoCreateChildTable
(
SCMCreateTableMsg
*
pCreate
,
SVgObj
*
pVgroup
,
int32_t
tid
)
{
SChildTableObj
*
pTable
=
(
SChildTableObj
*
)
calloc
(
1
,
sizeof
(
SChildTableObj
));
SChildTableObj
*
pTable
=
calloc
(
1
,
sizeof
(
SChildTableObj
));
if
(
pTable
==
NULL
)
{
mError
(
"table:%s, failed to alloc memory"
,
pCreate
->
tableId
);
terrno
=
TSDB_CODE_SERV_OUT_OF_MEMORY
;
...
...
@@ -1371,7 +1402,7 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj
pTable
->
info
.
type
=
TSDB_NORMAL_TABLE
;
}
strcpy
(
pTable
->
info
.
tableId
,
pCreate
->
tableId
);
pTable
->
info
.
tableId
=
strdup
(
pCreate
->
tableId
);
pTable
->
createdTime
=
taosGetTimestampMs
();
pTable
->
sid
=
tid
;
pTable
->
vgId
=
pVgroup
->
vgId
;
...
...
src/rpc/src/rpcMain.c
浏览文件 @
4331a90f
...
...
@@ -362,9 +362,10 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg)
// connection type is application specific.
// for TDengine, all the query, show commands shall have TCP connection
char
type
=
pMsg
->
msgType
;
if
(
type
==
TSDB_MSG_TYPE_QUERY
||
type
==
TSDB_MSG_TYPE_CM_RETRIEVE
||
type
==
TSDB_MSG_TYPE_FETCH
||
type
==
TSDB_MSG_TYPE_CM_STABLE_VGROUP
||
type
==
TSDB_MSG_TYPE_CM_TABLES_META
||
type
==
TSDB_MSG_TYPE_CM_SHOW
)
if
(
type
==
TSDB_MSG_TYPE_QUERY
||
type
==
TSDB_MSG_TYPE_CM_RETRIEVE
||
type
==
TSDB_MSG_TYPE_FETCH
||
type
==
TSDB_MSG_TYPE_CM_STABLE_VGROUP
||
type
==
TSDB_MSG_TYPE_CM_TABLES_META
||
type
==
TSDB_MSG_TYPE_CM_TABLE_META
||
type
==
TSDB_MSG_TYPE_CM_SHOW
)
pContext
->
connType
=
RPC_CONN_TCPC
;
rpcSendReqToServer
(
pRpc
,
pContext
);
...
...
@@ -1115,6 +1116,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
htonl
(
pHead
->
code
),
msgLen
,
pHead
->
sourceId
,
pHead
->
destId
,
pHead
->
tranId
);
}
tTrace
(
"connection type is: %d"
,
pConn
->
connType
);
writtenLen
=
(
*
taosSendData
[
pConn
->
connType
])(
pConn
->
peerIp
,
pConn
->
peerPort
,
pHead
,
msgLen
,
pConn
->
chandle
);
if
(
writtenLen
!=
msgLen
)
{
...
...
src/tsdb/src/tsdbMeta.c
浏览文件 @
4331a90f
...
...
@@ -80,7 +80,7 @@ STable *tsdbDecodeTable(void *cont, int contLen) {
T_READ_MEMBER
(
ptr
,
int8_t
,
pTable
->
type
);
int
len
=
*
(
int
*
)
ptr
;
ptr
=
(
char
*
)
ptr
+
sizeof
(
int
);
pTable
->
name
=
calloc
(
1
,
len
+
VARSTR_HEADER_SIZE
);
pTable
->
name
=
calloc
(
1
,
len
+
VARSTR_HEADER_SIZE
+
1
);
if
(
pTable
->
name
==
NULL
)
return
NULL
;
varDataSetLen
(
pTable
->
name
,
len
);
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
4331a90f
...
...
@@ -429,7 +429,7 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
static
void
vnodeNotifyFileSynced
(
void
*
ahandle
,
uint64_t
fversion
)
{
SVnodeObj
*
pVnode
=
ahandle
;
vTrace
(
"vgId:%d, data file is synced
"
,
pVnode
->
vgId
);
vTrace
(
"vgId:%d, data file is synced
, fversion:%"
PRId64
""
,
pVnode
->
vgId
,
fversion
);
pVnode
->
fversion
=
fversion
;
pVnode
->
version
=
fversion
;
...
...
tests/pytest/insert/bool.py
浏览文件 @
4331a90f
...
...
@@ -58,6 +58,14 @@ class TDTestCase:
tdSql
.
query
(
'select * from tb order by ts desc'
)
tdLog
.
info
(
'tdSql.checkRow(6)'
)
tdSql
.
checkRows
(
6
)
tdLog
.
info
(
'=============== step7'
)
tdLog
.
info
(
"insert into tb values (now+6m, true)"
)
tdSql
.
execute
(
"insert into tb values (now+5m, true)"
)
tdLog
.
info
(
'select * from tb order by ts desc'
)
tdSql
.
query
(
'select * from tb order by ts desc'
)
tdLog
.
info
(
'tdSql.checkRow(7)'
)
tdSql
.
checkRows
(
7
)
# convert end
# convert end
def
stop
(
self
):
...
...
tests/pytest/table/boundary.py
0 → 100644
浏览文件 @
4331a90f
# -*- coding: utf-8 -*-
import
random
import
string
import
subprocess
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
class
TDTestCase
:
def
init
(
self
,
conn
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
def
getLimitFromSourceCode
(
self
,
name
):
cmd
=
"grep -w '#define %s' ../../src/inc/taosdef.h|awk '{print $3}'"
%
name
return
int
(
subprocess
.
check_output
(
cmd
,
shell
=
True
))
def
generateString
(
self
,
length
):
chars
=
string
.
ascii_uppercase
+
string
.
ascii_lowercase
v
=
""
for
i
in
range
(
length
):
v
+=
random
.
choice
(
chars
)
return
v
def
checkTagBoundaries
(
self
):
tdLog
.
debug
(
"checking tag boundaries"
)
tdSql
.
prepare
()
maxTags
=
self
.
getLimitFromSourceCode
(
'TSDB_MAX_TAGS'
)
totalTagsLen
=
self
.
getLimitFromSourceCode
(
'TSDB_MAX_TAGS_LEN'
)
tdLog
.
notice
(
"max tags is %d"
%
maxTags
)
tdLog
.
notice
(
"max total tag length is %d"
%
totalTagsLen
)
# for binary tags, 2 bytes are used for length
tagLen
=
(
totalTagsLen
-
maxTags
*
2
)
//
maxTags
firstTagLen
=
totalTagsLen
-
2
*
maxTags
-
tagLen
*
(
maxTags
-
1
)
sql
=
"create table cars(ts timestamp, f int) tags(t0 binary(%d)"
%
firstTagLen
for
i
in
range
(
1
,
maxTags
):
sql
+=
", t%d binary(%d)"
%
(
i
,
tagLen
)
sql
+=
");"
tdLog
.
debug
(
"creating super table: "
+
sql
)
tdSql
.
execute
(
sql
)
tdSql
.
query
(
'show stables'
)
tdSql
.
checkRows
(
1
)
for
i
in
range
(
10
):
sql
=
"create table car%d using cars tags('%d'"
%
(
i
,
i
)
sql
+=
", '0'"
*
(
maxTags
-
1
)
+
");"
tdLog
.
debug
(
"creating table: "
+
sql
)
tdSql
.
execute
(
sql
)
sql
=
"insert into car%d values(now, 0);"
%
i
tdLog
.
debug
(
"inserting data: "
+
sql
)
tdSql
.
execute
(
sql
)
tdSql
.
query
(
'show tables'
)
tdLog
.
info
(
'tdSql.checkRow(10)'
)
tdSql
.
checkRows
(
10
)
tdSql
.
query
(
'select * from cars;'
)
tdSql
.
checkRows
(
10
)
def
checkColumnBoundaries
(
self
):
tdLog
.
debug
(
"checking column boundaries"
)
tdSql
.
prepare
()
# one column is for timestamp
maxCols
=
self
.
getLimitFromSourceCode
(
'TSDB_MAX_COLUMNS'
)
-
1
sql
=
"create table cars (ts timestamp"
for
i
in
range
(
maxCols
):
sql
+=
", c%d int"
%
i
sql
+=
");"
tdSql
.
execute
(
sql
)
tdSql
.
query
(
'show tables'
)
tdSql
.
checkRows
(
1
)
sql
=
"insert into cars values (now"
for
i
in
range
(
maxCols
):
sql
+=
", %d"
%
i
sql
+=
");"
tdSql
.
execute
(
sql
)
tdSql
.
query
(
'select * from cars'
)
tdSql
.
checkRows
(
1
)
def
checkTableNameBoundaries
(
self
):
tdLog
.
debug
(
"checking table name boundaries"
)
tdSql
.
prepare
()
maxTableNameLen
=
self
.
getLimitFromSourceCode
(
'TSDB_TABLE_NAME_LEN'
)
tdLog
.
notice
(
"table name max length is %d"
%
maxTableNameLen
)
name
=
self
.
generateString
(
maxTableNameLen
-
1
)
tdLog
.
info
(
"table name is '%s'"
%
name
)
tdSql
.
execute
(
"create table %s (ts timestamp, value int)"
%
name
)
tdSql
.
execute
(
"insert into %s values(now, 0)"
%
name
)
tdSql
.
query
(
'show tables'
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
'select * from %s'
%
name
)
tdSql
.
checkRows
(
1
)
def
checkRowBoundaries
(
self
):
tdLog
.
debug
(
"checking row boundaries"
)
tdSql
.
prepare
()
# 8 bytes for timestamp
maxRowSize
=
65536
-
8
maxCols
=
self
.
getLimitFromSourceCode
(
'TSDB_MAX_COLUMNS'
)
-
1
# for binary cols, 2 bytes are used for length
colLen
=
(
maxRowSize
-
maxCols
*
2
)
//
maxCols
firstColLen
=
maxRowSize
-
2
*
maxCols
-
colLen
*
(
maxCols
-
1
)
sql
=
"create table cars (ts timestamp, c0 binary(%d)"
%
firstColLen
for
i
in
range
(
1
,
maxCols
):
sql
+=
", c%d binary(%d)"
%
(
i
,
colLen
)
sql
+=
");"
tdSql
.
execute
(
sql
)
tdSql
.
query
(
'show tables'
)
tdSql
.
checkRows
(
1
)
col
=
self
.
generateString
(
firstColLen
)
sql
=
"insert into cars values (now, '%s'"
%
col
col
=
self
.
generateString
(
colLen
)
for
i
in
range
(
1
,
maxCols
):
sql
+=
", '%s'"
%
col
sql
+=
");"
tdLog
.
info
(
sql
);
tdSql
.
execute
(
sql
)
tdSql
.
query
(
"select * from cars"
)
tdSql
.
checkRows
(
1
)
def
run
(
self
):
self
.
checkTagBoundaries
()
self
.
checkColumnBoundaries
()
self
.
checkTableNameBoundaries
()
self
.
checkRowBoundaries
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/table/column_name.py
浏览文件 @
4331a90f
# -*- coding: utf-8 -*-
import
sys
import
string
import
random
import
subprocess
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
...
...
@@ -14,34 +17,9 @@ class TDTestCase:
def
run
(
self
):
tdSql
.
prepare
()
# TSIM: system sh/stop_dnodes.sh
# TSIM:
# TSIM: system sh/ip.sh -i 1 -s up
# TSIM: system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
# TSIM: system sh/cfg.sh -n dnode1 -c walLevel -v 0
# TSIM: system sh/exec.sh -n dnode1 -s start
# TSIM:
# TSIM: sleep 3000
# TSIM: sql connect
# TSIM:
# TSIM: $i = 0
# TSIM: $dbPrefix = lm_cm_db
# TSIM: $tbPrefix = lm_cm_tb
# TSIM: $db = $dbPrefix . $i
# TSIM: $tb = $tbPrefix . $i
# TSIM:
# TSIM: print =============== step1
tdLog
.
info
(
'=============== step1'
)
# TSIM: sql create database $db
# TSIM: sql use $db
# TSIM:
# TSIM: sql drop table dd -x step0
tdLog
.
info
(
'drop table dd -x step0'
)
tdSql
.
error
(
'drop table dd'
)
# TSIM: return -1
# TSIM: step0:
# TSIM:
# TSIM: sql create table $tb(ts timestamp, int) -x step1
tdLog
.
info
(
'create table tb(ts timestamp, int) -x step1'
)
tdSql
.
error
(
'create table tb(ts timestamp, int)'
)
# TSIM: return -1
...
...
@@ -112,37 +90,24 @@ class TDTestCase:
tdLog
.
info
(
'=============== step4'
)
# TSIM: sql create table $tb (ts timestamp,
# a0123456789012345678901234567890123456789 int)
getMaxColNum
=
"grep -w '#define TSDB_COL_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'"
boundary
=
int
(
subprocess
.
check_output
(
getMaxColNum
,
shell
=
True
))
tdLog
.
info
(
"get max column name length is %d"
%
boundary
)
chars
=
string
.
ascii_uppercase
+
string
.
ascii_lowercase
# col_name = ''.join(random.choices(chars, k=boundary+1))
# tdLog.info(
# 'create table tb (ts timestamp, %s int), col_name length is %d' % (col_name, len(col_name)))
# tdSql.error(
# 'create table tb (ts timestamp, %s int)' % col_name)
col_name
=
''
.
join
(
random
.
choices
(
chars
,
k
=
boundary
))
tdLog
.
info
(
'create table tb (ts timestamp, a0123456789012345678901234567890123456789 int)'
)
'create table tb (ts timestamp, %s int), col_name length is %d'
%
(
col_name
,
len
(
col_name
)))
tdSql
.
execute
(
'create table tb (ts timestamp, a0123456789012345678901234567890123456789 int)'
)
# TSIM: sql drop table $tb
tdLog
.
info
(
'drop table tb'
)
tdSql
.
execute
(
'drop table tb'
)
# TSIM:
# TSIM: sql show tables
tdLog
.
info
(
'show tables'
)
tdSql
.
query
(
'show tables'
)
# TSIM: if $rows != 0 then
tdLog
.
info
(
'tdSql.checkRow(0)'
)
tdSql
.
checkRows
(
0
)
# TSIM: return -1
# TSIM: endi
# TSIM:
# TSIM: print =============== step5
tdLog
.
info
(
'=============== step5'
)
# TSIM: sql create table $tb (ts timestamp, a0123456789 int)
tdLog
.
info
(
'create table tb (ts timestamp, a0123456789 int)'
)
tdSql
.
execute
(
'create table tb (ts timestamp, a0123456789 int)'
)
# TSIM: sql show tables
tdLog
.
info
(
'show tables'
)
tdSql
.
query
(
'show tables'
)
# TSIM: if $rows != 1 then
tdLog
.
info
(
'tdSql.checkRow(1)'
)
tdSql
.
checkRows
(
1
)
# TSIM: return -1
# TSIM: endi
# TSIM:
'create table tb (ts timestamp, %s int)'
%
col_name
)
# TSIM: sql insert into $tb values (now , 1)
tdLog
.
info
(
"insert into tb values (now , 1)"
)
tdSql
.
execute
(
"insert into tb values (now , 1)"
)
...
...
@@ -152,24 +117,6 @@ class TDTestCase:
# TSIM: if $rows != 1 then
tdLog
.
info
(
'tdSql.checkRow(1)'
)
tdSql
.
checkRows
(
1
)
# TSIM: return -1
# TSIM: endi
# TSIM:
# TSIM: sql drop database $db
tdLog
.
info
(
'drop database db'
)
tdSql
.
execute
(
'drop database db'
)
# TSIM: sql show databases
tdLog
.
info
(
'show databases'
)
tdSql
.
query
(
'show databases'
)
# TSIM: if $rows != 0 then
tdLog
.
info
(
'tdSql.checkRow(0)'
)
tdSql
.
checkRows
(
0
)
# TSIM: return -1
# TSIM: endi
# TSIM:
# TSIM:
# TSIM:
# TSIM:
# convert end
def
stop
(
self
):
...
...
tests/pytest/table/column_num.py
浏览文件 @
4331a90f
...
...
@@ -76,7 +76,7 @@ class TDTestCase:
tdSql
.
checkRows
(
2
)
data
=
"now"
for
x
in
range
(
0
,
boundary
-
1
):
for
x
in
range
(
0
,
boundary
-
1
):
data
=
data
+
", %d"
%
x
tdLog
.
info
(
"insert into tb1 values (%s)"
%
data
)
tdSql
.
execute
(
"insert into tb1 values (%s)"
%
data
)
...
...
tests/pytest/table/tablename-boundary.py
浏览文件 @
4331a90f
...
...
@@ -21,7 +21,7 @@ class TDTestCase:
tableNameMaxLen
=
int
(
subprocess
.
check_output
(
getTableNameLen
,
shell
=
True
))
tdLog
.
notice
(
"table name max length is %d"
%
tableNameMaxLen
)
tdLog
.
info
(
"table name max length is %d"
%
tableNameMaxLen
)
chars
=
string
.
ascii_uppercase
+
string
.
ascii_lowercase
tb_name
=
''
.
join
(
random
.
choices
(
chars
,
k
=
tableNameMaxLen
))
tdLog
.
info
(
'tb_name length %d'
%
len
(
tb_name
))
...
...
tests/pytest/util/cases.py
浏览文件 @
4331a90f
...
...
@@ -71,7 +71,7 @@ class TDCases:
case
.
run
()
except
Exception
as
e
:
tdLog
.
notice
(
repr
(
e
))
tdLog
.
exit
(
"%s failed
: %s"
%
(
__file__
,
fileName
))
tdLog
.
exit
(
"%s failed
"
%
(
fileName
))
case
.
stop
()
runNum
+=
1
continue
...
...
tests/pytest/util/sql.py
浏览文件 @
4331a90f
...
...
@@ -15,6 +15,7 @@ import sys
import
os
import
time
import
datetime
import
inspect
from
util.log
import
*
...
...
@@ -44,7 +45,12 @@ class TDSql:
except
BaseException
:
expectErrNotOccured
=
False
if
expectErrNotOccured
:
tdLog
.
exit
(
"failed: sql:%.40s, expect error not occured"
%
(
sql
))
frame
=
inspect
.
stack
()[
1
]
callerModule
=
inspect
.
getmodule
(
frame
[
0
])
callerFilename
=
callerModule
.
__file__
tdLog
.
exit
(
"%s failed: sql:%.40s, expect error not occured"
%
(
callerFilename
,
sql
))
else
:
tdLog
.
info
(
"sql:%.40s, expect error occured"
%
(
sql
))
...
...
@@ -62,33 +68,39 @@ class TDSql:
def
checkRows
(
self
,
expectRows
):
if
self
.
queryRows
!=
expectRows
:
frame
=
inspect
.
stack
()[
1
]
callerModule
=
inspect
.
getmodule
(
frame
[
0
])
callerFilename
=
callerModule
.
__file__
tdLog
.
exit
(
"failed: sql:%.40s, queryRows:%d != expect:%d"
%
(
self
.
sql
,
self
.
queryRows
,
expectRows
))
"
%s
failed: sql:%.40s, queryRows:%d != expect:%d"
%
(
callerFilename
,
self
.
sql
,
self
.
queryRows
,
expectRows
))
tdLog
.
info
(
"sql:%.40s, queryRows:%d == expect:%d"
%
(
self
.
sql
,
self
.
queryRows
,
expectRows
))
def
checkData
(
self
,
row
,
col
,
data
):
frame
=
inspect
.
stack
()[
1
]
callerModule
=
inspect
.
getmodule
(
frame
[
0
])
callerFilename
=
callerModule
.
__file__
if
row
<
0
:
tdLog
.
exit
(
"failed: sql:%.40s, row:%d is smaller than zero"
%
(
self
.
sql
,
row
))
"
%s
failed: sql:%.40s, row:%d is smaller than zero"
%
(
callerFilename
,
self
.
sql
,
row
))
if
col
<
0
:
tdLog
.
exit
(
"failed: sql:%.40s, col:%d is smaller than zero"
%
(
self
.
sql
,
col
))
"
%s
failed: sql:%.40s, col:%d is smaller than zero"
%
(
callerFilename
,
self
.
sql
,
col
))
if
row
>=
self
.
queryRows
:
tdLog
.
exit
(
"failed: sql:%.40s, row:%d is larger than queryRows:%d"
%
(
self
.
sql
,
row
,
self
.
queryRows
))
"
%s
failed: sql:%.40s, row:%d is larger than queryRows:%d"
%
(
callerFilename
,
self
.
sql
,
row
,
self
.
queryRows
))
if
col
>=
self
.
queryCols
:
tdLog
.
exit
(
"failed: sql:%.40s, col:%d is larger than queryRows:%d"
%
(
self
.
sql
,
col
,
self
.
queryCols
))
"
%s
failed: sql:%.40s, col:%d is larger than queryRows:%d"
%
(
callerFilename
,
self
.
sql
,
col
,
self
.
queryCols
))
if
self
.
queryResult
[
row
][
col
]
!=
data
:
tdLog
.
exit
(
"failed: sql:%.40s row:%d col:%d data:%s != expect:%s"
%
(
self
.
sql
,
row
,
col
,
self
.
queryResult
[
row
][
col
],
data
))
tdLog
.
exit
(
"%s failed: sql:%.40s row:%d col:%d data:%s != expect:%s"
%
(
callerFilename
,
self
.
sql
,
row
,
col
,
self
.
queryResult
[
row
][
col
],
data
))
if
data
is
None
:
tdLog
.
info
(
"sql:%.40s, row:%d col:%d data:%s == expect:%s"
%
...
...
@@ -104,22 +116,26 @@ class TDSql:
(
self
.
sql
,
row
,
col
,
self
.
queryResult
[
row
][
col
],
data
))
def
getData
(
self
,
row
,
col
):
frame
=
inspect
.
stack
()[
1
]
callerModule
=
inspect
.
getmodule
(
frame
[
0
])
callerFilename
=
callerModule
.
__file__
if
row
<
0
:
tdLog
.
exit
(
"failed: sql:%.40s, row:%d is smaller than zero"
%
(
self
.
sql
,
row
))
"
%s
failed: sql:%.40s, row:%d is smaller than zero"
%
(
callerFilename
,
self
.
sql
,
row
))
if
col
<
0
:
tdLog
.
exit
(
"failed: sql:%.40s, col:%d is smaller than zero"
%
(
self
.
sql
,
col
))
"
%s
failed: sql:%.40s, col:%d is smaller than zero"
%
(
callerFilename
,
self
.
sql
,
col
))
if
row
>=
self
.
queryRows
:
tdLog
.
exit
(
"failed: sql:%.40s, row:%d is larger than queryRows:%d"
%
(
self
.
sql
,
row
,
self
.
queryRows
))
"
%s
failed: sql:%.40s, row:%d is larger than queryRows:%d"
%
(
callerFilename
,
self
.
sql
,
row
,
self
.
queryRows
))
if
col
>=
self
.
queryCols
:
tdLog
.
exit
(
"failed: sql:%.40s, col:%d is larger than queryRows:%d"
%
(
self
.
sql
,
col
,
self
.
queryCols
))
"
%s
failed: sql:%.40s, col:%d is larger than queryRows:%d"
%
(
callerFilename
,
self
.
sql
,
col
,
self
.
queryCols
))
return
self
.
queryResult
[
row
][
col
]
def
executeTimes
(
self
,
sql
,
times
):
...
...
@@ -137,8 +153,12 @@ class TDSql:
def
checkAffectedRows
(
self
,
expectAffectedRows
):
if
self
.
affectedRows
!=
expectAffectedRows
:
tdLog
.
exit
(
"failed: sql:%.40s, affectedRows:%d != expect:%d"
%
(
self
.
sql
,
self
.
affectedRows
,
expectAffectedRows
))
frame
=
inspect
.
stack
()[
1
]
callerModule
=
inspect
.
getmodule
(
frame
[
0
])
callerFilename
=
callerModule
.
__file__
tdLog
.
exit
(
"%s failed: sql:%.40s, affectedRows:%d != expect:%d"
%
(
callerFilename
,
self
.
sql
,
self
.
affectedRows
,
expectAffectedRows
))
tdLog
.
info
(
"sql:%.40s, affectedRows:%d == expect:%d"
%
(
self
.
sql
,
self
.
affectedRows
,
expectAffectedRows
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录