Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
f9e22c16
T
TDengine
项目概览
taosdata
/
TDengine
大约 1 年 前同步成功
通知
1185
Star
22015
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
f9e22c16
编写于
8月 27, 2021
作者:
D
dapan1121
提交者:
GitHub
8月 27, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7512 from taosdata/feature/szhou/schemaless
[schemaless][RunCITest]
上级
d8bcb007
a2a845fa
变更
13
展开全部
隐藏空白更改
内联
并排
Showing
13 changed file
with
1223 addition
and
434 deletion
+1223
-434
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+1
-0
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+0
-9
src/client/src/tscParseLineProtocol.c
src/client/src/tscParseLineProtocol.c
+292
-262
src/client/src/tscPrepare.c
src/client/src/tscPrepare.c
+2
-2
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+26
-41
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+25
-0
src/inc/taoserror.h
src/inc/taoserror.h
+6
-0
src/mnode/src/mnodeTable.c
src/mnode/src/mnodeTable.c
+15
-2
src/util/src/terror.c
src/util/src/terror.c
+6
-0
tests/examples/c/schemaless.c
tests/examples/c/schemaless.c
+2
-116
tests/pytest/query/queryDiffColsOr.py
tests/pytest/query/queryDiffColsOr.py
+545
-0
tests/pytest/tools/schemalessInsertPerformance.py
tests/pytest/tools/schemalessInsertPerformance.py
+301
-0
tests/pytest/util/common.py
tests/pytest/util/common.py
+2
-2
未找到文件。
src/client/inc/tsclient.h
浏览文件 @
f9e22c16
...
...
@@ -492,6 +492,7 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
void
tscSetBoundColumnInfo
(
SParsedDataColInfo
*
pColInfo
,
SSchema
*
pSchema
,
int32_t
numOfCols
);
char
*
tscGetErrorMsgPayload
(
SSqlCmd
*
pCmd
);
int32_t
tscErrorMsgWithCode
(
int32_t
code
,
char
*
dstBuffer
,
const
char
*
errMsg
,
const
char
*
sql
);
int32_t
tscInvalidOperationMsg
(
char
*
msg
,
const
char
*
additionalInfo
,
const
char
*
sql
);
int32_t
tscSQLSyntaxErrMsg
(
char
*
msg
,
const
char
*
additionalInfo
,
const
char
*
sql
);
...
...
src/client/src/tscAsync.c
浏览文件 @
f9e22c16
...
...
@@ -363,15 +363,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
if
(
TSDB_QUERY_HAS_TYPE
(
pCmd
->
insertParam
.
insertType
,
TSDB_QUERY_TYPE_STMT_INSERT
))
{
// stmt insert
STableMetaInfo
*
pTableMetaInfo
=
tscGetMetaInfo
(
pQueryInfo
,
0
);
code
=
tscGetTableMeta
(
pSql
,
pTableMetaInfo
);
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
taosReleaseRef
(
tscObjRef
,
pSql
->
self
);
return
;
}
else
{
assert
(
code
==
TSDB_CODE_SUCCESS
);
}
(
*
pSql
->
fp
)(
pSql
->
param
,
pSql
,
code
);
}
else
if
(
TSDB_QUERY_HAS_TYPE
(
pCmd
->
insertParam
.
insertType
,
TSDB_QUERY_TYPE_FILE_INSERT
))
{
// file insert
tscImportDataFromFile
(
pSql
);
...
...
src/client/src/tscParseLineProtocol.c
浏览文件 @
f9e22c16
此差异已折叠。
点击以展开。
src/client/src/tscPrepare.c
浏览文件 @
f9e22c16
...
...
@@ -1540,6 +1540,8 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes
->
qId
=
0
;
pRes
->
numOfRows
=
1
;
registerSqlObj
(
pSql
);
strtolower
(
pSql
->
sqlstr
,
sql
);
tscDebugL
(
"0x%"
PRIx64
" SQL: %s"
,
pSql
->
self
,
pSql
->
sqlstr
);
...
...
@@ -1549,8 +1551,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pSql
->
cmd
.
insertParam
.
numOfParams
=
0
;
pSql
->
cmd
.
batchSize
=
0
;
registerSqlObj
(
pSql
);
int32_t
ret
=
stmtParseInsertTbTags
(
pSql
,
pStmt
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
STMT_RET
(
ret
);
...
...
src/client/src/tscSQLParser.c
浏览文件 @
f9e22c16
...
...
@@ -117,7 +117,7 @@ static int32_t validateColumnName(char* name);
static
int32_t
setKillInfo
(
SSqlObj
*
pSql
,
struct
SSqlInfo
*
pInfo
,
int32_t
killType
);
static
int32_t
setCompactVnodeInfo
(
SSqlObj
*
pSql
,
struct
SSqlInfo
*
pInfo
);
static
bool
validateOneTags
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pTagField
);
static
int32_t
validateOneTag
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pTagField
);
static
bool
hasTimestampForPointInterpQuery
(
SQueryInfo
*
pQueryInfo
);
static
bool
hasNormalColumnFilter
(
SQueryInfo
*
pQueryInfo
);
...
...
@@ -1538,9 +1538,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
/*
* tags name /column name is truncated in sql.y
*/
bool
validateOneTags
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pTagField
)
{
//const char* msg1 = "timestamp not allowed in tags";
const
char
*
msg2
=
"duplicated column names"
;
int32_t
validateOneTag
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pTagField
)
{
const
char
*
msg3
=
"tag length too long"
;
const
char
*
msg4
=
"invalid tag name"
;
const
char
*
msg5
=
"invalid binary/nchar tag length"
;
...
...
@@ -1555,8 +1553,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
// no more max columns
if
(
numOfTags
+
numOfCols
>=
TSDB_MAX_COLUMNS
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg7
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg7
);
}
// no more than 6 tags
...
...
@@ -1564,8 +1561,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
char
msg
[
128
]
=
{
0
};
sprintf
(
msg
,
"tags no more than %d"
,
TSDB_MAX_TAGS
);
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg
);
}
// no timestamp allowable
...
...
@@ -1575,8 +1571,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
//}
if
((
pTagField
->
type
<
TSDB_DATA_TYPE_BOOL
)
||
(
pTagField
->
type
>
TSDB_DATA_TYPE_UBIGINT
))
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
}
SSchema
*
pTagSchema
=
tscGetTableTagSchema
(
pTableMetaInfo
->
pTableMeta
);
...
...
@@ -1588,20 +1583,17 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
// length less than TSDB_MAX_TASG_LEN
if
(
nLen
+
pTagField
->
bytes
>
TSDB_MAX_TAGS_LEN
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
}
// tags name can not be a keyword
if
(
validateColumnName
(
pTagField
->
name
)
!=
TSDB_CODE_SUCCESS
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
}
// binary(val), val can not be equalled to or less than 0
if
((
pTagField
->
type
==
TSDB_DATA_TYPE_BINARY
||
pTagField
->
type
==
TSDB_DATA_TYPE_NCHAR
)
&&
pTagField
->
bytes
<=
0
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg5
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg5
);
}
// field name must be unique
...
...
@@ -1609,17 +1601,15 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
for
(
int32_t
i
=
0
;
i
<
numOfTags
+
numOfCols
;
++
i
)
{
if
(
strncasecmp
(
pTagField
->
name
,
pSchema
[
i
].
name
,
sizeof
(
pTagField
->
name
)
-
1
)
==
0
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
return
false
;
return
tscErrorMsgWithCode
(
TSDB_CODE_TSC_DUP_COL_NAMES
,
tscGetErrorMsgPayload
(
pCmd
),
pTagField
->
name
,
NULL
);
}
}
return
true
;
return
TSDB_CODE_SUCCESS
;
}
bool
validateOneColumn
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pColField
)
{
int32_t
validateOneColumn
(
SSqlCmd
*
pCmd
,
TAOS_FIELD
*
pColField
)
{
const
char
*
msg1
=
"too many columns"
;
const
char
*
msg2
=
"duplicated column names"
;
const
char
*
msg3
=
"column length too long"
;
const
char
*
msg4
=
"invalid data type"
;
const
char
*
msg5
=
"invalid column name"
;
...
...
@@ -1634,18 +1624,15 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
// no more max columns
if
(
numOfCols
>=
TSDB_MAX_COLUMNS
||
numOfTags
+
numOfCols
>=
TSDB_MAX_COLUMNS
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg1
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg1
);
}
if
(
pColField
->
type
<
TSDB_DATA_TYPE_BOOL
||
pColField
->
type
>
TSDB_DATA_TYPE_UBIGINT
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
}
if
(
validateColumnName
(
pColField
->
name
)
!=
TSDB_CODE_SUCCESS
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg5
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg5
);
}
SSchema
*
pSchema
=
tscGetTableSchema
(
pTableMeta
);
...
...
@@ -1656,25 +1643,22 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
}
if
(
pColField
->
bytes
<=
0
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg6
);
}
// length less than TSDB_MAX_BYTES_PER_ROW
if
(
nLen
+
pColField
->
bytes
>
TSDB_MAX_BYTES_PER_ROW
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
return
false
;
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
}
// field name must be unique
for
(
int32_t
i
=
0
;
i
<
numOfTags
+
numOfCols
;
++
i
)
{
if
(
strncasecmp
(
pColField
->
name
,
pSchema
[
i
].
name
,
sizeof
(
pColField
->
name
)
-
1
)
==
0
)
{
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
return
false
;
return
tscErrorMsgWithCode
(
TSDB_CODE_TSC_DUP_COL_NAMES
,
tscGetErrorMsgPayload
(
pCmd
),
pColField
->
name
,
NULL
);
}
}
return
true
;
return
TSDB_CODE_SUCCESS
;
}
/* is contained in pFieldList or not */
...
...
@@ -6068,7 +6052,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const
char
*
msg19
=
"invalid new tag name"
;
const
char
*
msg20
=
"table is not super table"
;
const
char
*
msg21
=
"only binary/nchar column length could be modified"
;
const
char
*
msg22
=
"new column length should be bigger than old one"
;
const
char
*
msg23
=
"only column length coulbe be modified"
;
const
char
*
msg24
=
"invalid binary/nchar column length"
;
...
...
@@ -6120,8 +6103,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
TAOS_FIELD
*
p
=
taosArrayGet
(
pFieldList
,
0
);
if
(
!
validateOneTags
(
pCmd
,
p
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
int32_t
ret
=
validateOneTag
(
pCmd
,
p
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
return
ret
;
}
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
p
);
...
...
@@ -6298,8 +6282,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
TAOS_FIELD
*
p
=
taosArrayGet
(
pFieldList
,
0
);
if
(
!
validateOneColumn
(
pCmd
,
p
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
int32_t
ret
=
validateOneColumn
(
pCmd
,
p
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
return
ret
;
}
tscFieldInfoAppend
(
&
pQueryInfo
->
fieldsInfo
,
p
);
...
...
@@ -6362,7 +6347,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
if
(
pItem
->
bytes
<=
pColSchema
->
bytes
)
{
return
invalidOperationMsg
(
pMsg
,
msg22
);
return
tscErrorMsgWithCode
(
TSDB_CODE_TSC_INVALID_COLUMN_LENGTH
,
pMsg
,
pItem
->
name
,
NULL
);
}
SSchema
*
pSchema
=
(
SSchema
*
)
pTableMetaInfo
->
pTableMeta
->
schema
;
...
...
@@ -6413,7 +6398,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
if
(
pItem
->
bytes
<=
pColSchema
->
bytes
)
{
return
invalidOperationMsg
(
pMsg
,
msg22
);
return
tscErrorMsgWithCode
(
TSDB_CODE_TSC_INVALID_TAG_LENGTH
,
pMsg
,
pItem
->
name
,
NULL
);
}
SSchema
*
pSchema
=
tscGetTableTagSchema
(
pTableMetaInfo
->
pTableMeta
);
...
...
src/client/src/tscUtil.c
浏览文件 @
f9e22c16
...
...
@@ -4168,6 +4168,31 @@ int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
int32_t
tscErrorMsgWithCode
(
int32_t
code
,
char
*
dstBuffer
,
const
char
*
errMsg
,
const
char
*
sql
)
{
const
char
*
msgFormat1
=
"%s:%s"
;
const
char
*
msgFormat2
=
"%s:
\'
%s
\'
(%s)"
;
const
char
*
msgFormat3
=
"%s:
\'
%s
\'
"
;
const
int32_t
BACKWARD_CHAR_STEP
=
0
;
if
(
sql
==
NULL
)
{
assert
(
errMsg
!=
NULL
);
sprintf
(
dstBuffer
,
msgFormat1
,
tstrerror
(
code
),
errMsg
);
return
code
;
}
char
buf
[
64
]
=
{
0
};
// only extract part of sql string
strncpy
(
buf
,
(
sql
-
BACKWARD_CHAR_STEP
),
tListLen
(
buf
)
-
1
);
if
(
errMsg
!=
NULL
)
{
sprintf
(
dstBuffer
,
msgFormat2
,
tstrerror
(
code
),
buf
,
errMsg
);
}
else
{
sprintf
(
dstBuffer
,
msgFormat3
,
tstrerror
(
code
),
buf
);
// no additional information for invalid sql error
}
return
code
;
}
bool
tscHasReachLimitation
(
SQueryInfo
*
pQueryInfo
,
SSqlRes
*
pRes
)
{
assert
(
pQueryInfo
!=
NULL
&&
pQueryInfo
->
clauseLimit
!=
0
);
return
(
pQueryInfo
->
clauseLimit
>
0
&&
pRes
->
numOfClauseTotal
>=
pQueryInfo
->
clauseLimit
);
...
...
src/inc/taoserror.h
浏览文件 @
f9e22c16
...
...
@@ -103,6 +103,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty")
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line")
#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached")
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names")
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length")
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length")
// mnode
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
...
...
@@ -185,6 +188,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func")
#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize")
#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length")
#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length")
#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available")
#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists")
#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options")
...
...
src/mnode/src/mnodeTable.c
浏览文件 @
f9e22c16
...
...
@@ -1246,13 +1246,13 @@ static int32_t mnodeAddSuperTableTag(SMnodeMsg *pMsg, SSchema schema[], int32_t
if
(
mnodeFindSuperTableColumnIndex
(
pStable
,
schema
[
i
].
name
)
>
0
)
{
mError
(
"msg:%p, app:%p stable:%s, add tag, column:%s already exist"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
schema
[
i
].
name
);
return
TSDB_CODE_MND_
TAG
_ALREAY_EXIST
;
return
TSDB_CODE_MND_
FIELD
_ALREAY_EXIST
;
}
if
(
mnodeFindSuperTableTagIndex
(
pStable
,
schema
[
i
].
name
)
>
0
)
{
mError
(
"msg:%p, app:%p stable:%s, add tag, tag:%s already exist"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
schema
[
i
].
name
);
return
TSDB_CODE_MND_
FIELD
_ALREAY_EXIST
;
return
TSDB_CODE_MND_
TAG
_ALREAY_EXIST
;
}
}
...
...
@@ -1518,6 +1518,13 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) {
// update
SSchema
*
schema
=
(
SSchema
*
)
(
pStable
->
schema
+
col
);
ASSERT
(
schema
->
type
==
TSDB_DATA_TYPE_BINARY
||
schema
->
type
==
TSDB_DATA_TYPE_NCHAR
);
if
(
pAlter
->
schema
[
0
].
bytes
<=
schema
->
bytes
)
{
mError
(
"msg:%p, app:%p stable:%s, modify column len. column:%s, len from %d to %d"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
name
,
schema
->
bytes
,
pAlter
->
schema
[
0
].
bytes
);
return
TSDB_CODE_MND_INVALID_COLUMN_LENGTH
;
}
schema
->
bytes
=
pAlter
->
schema
[
0
].
bytes
;
pStable
->
sversion
++
;
mInfo
(
"msg:%p, app:%p stable %s, start to modify column %s len to %d"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
...
...
@@ -1548,6 +1555,12 @@ static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg) {
// update
SSchema
*
schema
=
(
SSchema
*
)
(
pStable
->
schema
+
col
+
pStable
->
numOfColumns
);
ASSERT
(
schema
->
type
==
TSDB_DATA_TYPE_BINARY
||
schema
->
type
==
TSDB_DATA_TYPE_NCHAR
);
if
(
pAlter
->
schema
[
0
].
bytes
<=
schema
->
bytes
)
{
mError
(
"msg:%p, app:%p stable:%s, modify tag len. tag:%s, len from %d to %d"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
name
,
schema
->
bytes
,
pAlter
->
schema
[
0
].
bytes
);
return
TSDB_CODE_MND_INVALID_TAG_LENGTH
;
}
schema
->
bytes
=
pAlter
->
schema
[
0
].
bytes
;
pStable
->
tversion
++
;
mInfo
(
"msg:%p, app:%p stable %s, start to modify tag len %s to %d"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
...
...
src/util/src/terror.c
浏览文件 @
f9e22c16
...
...
@@ -112,6 +112,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too lon
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_FILE_EMPTY
,
"File is empty"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_LINE_SYNTAX_ERROR
,
"Syntax error in Line"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_NO_META_CACHED
,
"No table meta cached"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_DUP_COL_NAMES
,
"duplicated column names"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_INVALID_TAG_LENGTH
,
"Invalid tag length"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_TSC_INVALID_COLUMN_LENGTH
,
"Invalid column length"
)
// mnode
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_MSG_NOT_PROCESSED
,
"Message not processed"
)
...
...
@@ -194,6 +197,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists")
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_INVALID_FUNC
,
"Invalid func"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_INVALID_FUNC_BUFSIZE
,
"Invalid func bufSize"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_INVALID_TAG_LENGTH
,
"invalid tag length"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_INVALID_COLUMN_LENGTH
,
"invalid column length"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_DB_NOT_SELECTED
,
"Database not specified or available"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_DB_ALREADY_EXIST
,
"Database already exists"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_MND_INVALID_DB_OPTION
,
"Invalid database options"
)
...
...
tests/examples/c/schemaless.c
浏览文件 @
f9e22c16
...
...
@@ -61,7 +61,7 @@ int main(int argc, char* argv[]) {
time_t
ct
=
time
(
0
);
int64_t
ts
=
ct
*
1000
;
char
*
lineFormat
=
"sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=25
5
u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
%lldms"
;
char
*
lineFormat
=
"sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=25
4
u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
%lldms"
;
char
**
lines
=
calloc
(
numSuperTables
*
numChildTables
*
numRowsPerChildTable
,
sizeof
(
char
*
));
int
l
=
0
;
...
...
@@ -75,7 +75,7 @@ int main(int argc, char* argv[]) {
}
}
}
shuffle
(
lines
,
numSuperTables
*
numChildTables
*
numRowsPerChildTable
);
//
shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
printf
(
"%s
\n
"
,
"begin taos_insert_lines"
);
int64_t
begin
=
getTimeInUs
();
...
...
@@ -83,119 +83,5 @@ int main(int argc, char* argv[]) {
int64_t
end
=
getTimeInUs
();
printf
(
"code: %d, %s. time used: %"
PRId64
"
\n
"
,
code
,
tstrerror
(
code
),
end
-
begin
);
char
*
lines_000_0
[]
=
{
"sta1,id=sta1_1,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639000us"
};
code
=
taos_insert_lines
(
taos
,
lines_000_0
,
sizeof
(
lines_000_0
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_000_0 should return error
\n
"
);
return
-
1
;
}
char
*
lines_000_1
[]
=
{
"sta2,id=
\"
sta2_1
\"
,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639001"
};
code
=
taos_insert_lines
(
taos
,
lines_000_1
,
sizeof
(
lines_000_1
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_000_1 should return error
\n
"
);
return
-
1
;
}
char
*
lines_000_2
[]
=
{
"sta3,id=
\"
sta3_1
\"
,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
0"
};
code
=
taos_insert_lines
(
taos
,
lines_000_2
,
sizeof
(
lines_000_2
)
/
sizeof
(
char
*
));
if
(
0
!=
code
)
{
printf
(
"taos_insert_lines() lines_000_2 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_001_0
[]
=
{
"sta4,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639000us"
,
};
code
=
taos_insert_lines
(
taos
,
lines_001_0
,
sizeof
(
lines_001_0
)
/
sizeof
(
char
*
));
if
(
0
!=
code
)
{
printf
(
"taos_insert_lines() lines_001_0 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_001_1
[]
=
{
"sta5,id=
\"
sta5_1
\"
,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639001"
};
code
=
taos_insert_lines
(
taos
,
lines_001_1
,
sizeof
(
lines_001_1
)
/
sizeof
(
char
*
));
if
(
0
!=
code
)
{
printf
(
"taos_insert_lines() lines_001_1 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_001_2
[]
=
{
"sta6,id=
\"
sta6_1
\"
,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
0"
};
code
=
taos_insert_lines
(
taos
,
lines_001_2
,
sizeof
(
lines_001_2
)
/
sizeof
(
char
*
));
if
(
0
!=
code
)
{
printf
(
"taos_insert_lines() lines_001_2 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_002
[]
=
{
"stb,id=
\"
stb_1
\"
,t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639000000ns"
,
"stc,id=
\"
stc_1
\"
,t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833639019us"
,
"stc,id=
\"
stc_1
\"
,t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006833640ms"
,
"stc,id=
\"
stc_1
\"
,t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=
\"
binaryTagValue
\"
,t12=L
\"
ncharTagValue
\"
c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=
\"
binaryValue
\"
,c12=L
\"
ncharValue
\"
1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_002
,
sizeof
(
lines_002
)
/
sizeof
(
char
*
));
if
(
0
!=
code
)
{
printf
(
"taos_insert_lines() lines_002 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
//Duplicate key check;
char
*
lines_003_1
[]
=
{
"std,id=
\"
std_3_1
\"
,t1=4i64,Id=
\"
std
\"
,t2=true c1=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_1
,
sizeof
(
lines_003_1
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_1 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_2
[]
=
{
"std,id=
\"
std_3_2
\"
,tag1=4i64,Tag2=true,tAg3=2,TaG2=
\"
dup!
\"
c1=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_2
,
sizeof
(
lines_003_2
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_2 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_3
[]
=
{
"std,id=
\"
std_3_3
\"
,tag1=4i64 field1=true,Field2=2,FIElD1=
\"
dup!
\"
,fIeLd4=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_3
,
sizeof
(
lines_003_3
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_3 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_4
[]
=
{
"std,id=
\"
std_3_4
\"
,tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=
\"
1234
\"
1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_4
,
sizeof
(
lines_003_4
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_4 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
return
0
;
}
tests/pytest/query/queryDiffColsOr.py
0 → 100644
浏览文件 @
f9e22c16
此差异已折叠。
点击以展开。
tests/pytest/tools/schemalessInsertPerformance.py
0 → 100644
浏览文件 @
f9e22c16
###################################################################
# Copyright (c) 2021 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
traceback
import
random
import
string
from
taos.error
import
LinesError
import
datetime
import
time
from
copy
import
deepcopy
import
numpy
as
np
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.common
import
tdCom
import
threading
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
_conn
=
conn
def
genRandomTs
(
self
):
year
=
random
.
randint
(
2000
,
2021
)
month
=
random
.
randint
(
10
,
12
)
day
=
random
.
randint
(
10
,
29
)
hour
=
random
.
randint
(
10
,
24
)
minute
=
random
.
randint
(
10
,
59
)
second
=
random
.
randint
(
10
,
59
)
m_second
=
random
.
randint
(
101
,
199
)
date_time
=
f
'
{
year
}
-
{
month
}
-
{
day
}
{
hour
}
:
{
minute
}
:
{
second
}
'
print
(
date_time
)
timeArray
=
time
.
strptime
(
date_time
,
"%Y-%m-%d %H:%M:%S"
)
ts
=
int
(
time
.
mktime
(
timeArray
))
print
(
"------"
,
ts
)
# timestamp = time.mktime(datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S.%f").timetuple())
return
f
'
{
ts
}
s'
def
genMultiColStr
(
self
,
int_count
=
4
,
double_count
=
0
,
binary_count
=
0
):
"""
genType must be tag/col
"""
col_str
=
""
if
double_count
==
0
and
binary_count
==
0
:
for
i
in
range
(
0
,
int_count
):
if
i
<
(
int_count
-
1
):
col_str
+=
f
'c
{
i
}
=
{
random
.
randint
(
0
,
255
)
}
i32,'
else
:
col_str
+=
f
'c
{
i
}
=
{
random
.
randint
(
0
,
255
)
}
i32 '
elif
double_count
>
0
and
binary_count
==
0
:
for
i
in
range
(
0
,
int_count
):
col_str
+=
f
'c
{
i
}
=
{
random
.
randint
(
0
,
255
)
}
i32,'
for
i
in
range
(
0
,
double_count
):
if
i
<
(
double_count
-
1
):
col_str
+=
f
'c
{
i
+
int_count
}
=
{
random
.
randint
(
1
,
255
)
}
.
{
i
}
f64,'
else
:
col_str
+=
f
'c
{
i
+
int_count
}
=
{
random
.
randint
(
1
,
255
)
}
.
{
i
}
f64 '
elif
double_count
==
0
and
binary_count
>
0
:
for
i
in
range
(
0
,
int_count
):
col_str
+=
f
'c
{
i
}
=
{
random
.
randint
(
0
,
255
)
}
i32,'
for
i
in
range
(
0
,
binary_count
):
if
i
<
(
binary_count
-
1
):
col_str
+=
f
'c
{
i
+
int_count
}
=
\"
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
\"
,'
else
:
col_str
+=
f
'c
{
i
+
int_count
}
=
\"
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
\"
'
elif
double_count
>
0
and
binary_count
>
0
:
for
i
in
range
(
0
,
int_count
):
col_str
+=
f
'c
{
i
}
=
{
random
.
randint
(
0
,
255
)
}
i32,'
for
i
in
range
(
0
,
double_count
):
col_str
+=
f
'c
{
i
+
int_count
}
=
{
random
.
randint
(
1
,
255
)
}
.
{
i
}
f64,'
for
i
in
range
(
0
,
binary_count
):
if
i
<
(
binary_count
-
1
):
col_str
+=
f
'c
{
i
+
int_count
+
double_count
}
=
\"
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
\"
,'
else
:
col_str
+=
f
'c
{
i
+
int_count
+
double_count
}
=
\"
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
\"
'
return
col_str
def
genLongSql
(
self
,
int_count
=
4
,
double_count
=
0
,
binary_count
=
0
,
init
=
False
):
if
init
:
tag_str
=
f
'id="init",t0=
{
random
.
randint
(
0
,
65535
)
}
i32,t1=
\"
{
tdCom
.
getLongName
(
10
,
"letters"
)
}
\"
'
else
:
tag_str
=
f
'id="sub_
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
_
{
tdCom
.
getLongName
(
5
,
"letters"
)
}
",t0=
{
random
.
randint
(
0
,
65535
)
}
i32,t1=
\"
{
tdCom
.
getLongName
(
10
,
"letters"
)
}
\"
'
col_str
=
self
.
genMultiColStr
(
int_count
,
double_count
,
binary_count
)
long_sql
=
'stb'
+
','
+
tag_str
+
' '
+
col_str
+
'0'
return
long_sql
def
getPerfSql
(
self
,
count
=
4
,
init
=
False
):
if
count
==
4
:
input_sql
=
self
.
genLongSql
(
init
=
init
)
elif
count
==
1000
:
input_sql
=
self
.
genLongSql
(
400
,
400
,
200
,
init
=
init
)
elif
count
==
4000
:
input_sql
=
self
.
genLongSql
(
1900
,
1900
,
200
,
init
=
init
)
return
input_sql
def
tableGenerator
(
self
,
count
=
4
,
table_count
=
1000
):
for
i
in
range
(
table_count
):
yield
self
.
getPerfSql
(
count
)
def
genTableList
(
self
,
count
=
4
,
table_count
=
10000
):
table_list
=
list
()
for
i
in
range
(
1
,
table_count
+
1
):
table_list
.
append
(
self
.
getPerfSql
(
count
))
return
table_list
def
splitTableList
(
self
,
count
=
4
,
thread_count
=
10
,
table_count
=
1000
):
per_list_len
=
int
(
table_count
/
thread_count
)
table_list
=
self
.
genTableList
(
count
=
count
)
# ts = int(time.time())
list_of_group
=
zip
(
*
(
iter
(
table_list
),)
*
per_list_len
)
end_list
=
[
list
(
i
)
for
i
in
list_of_group
]
# i is a tuple
count
=
len
(
table_list
)
%
per_list_len
end_list
.
append
(
table_list
[
-
count
:])
if
count
!=
0
else
end_list
return
table_list
,
end_list
def
rowsGenerator
(
self
,
end_list
):
ts
=
int
(
time
.
time
())
input_sql_list
=
list
()
for
elm_list
in
end_list
:
for
elm
in
elm_list
:
for
i
in
range
(
1
,
10000
):
ts
-=
1
elm_new
=
self
.
replaceLastStr
(
elm
,
str
(
ts
))
+
's'
input_sql_list
.
append
(
elm_new
)
yield
input_sql_list
# def insertRows(self, count=4, thread_count=10):
# table_list = self.splitTableList(count=count, thread_count=thread_count)[0]
# for
def
replaceLastStr
(
self
,
str
,
new
):
list_ori
=
list
(
str
)
list_ori
[
-
1
]
=
new
return
''
.
join
(
list_ori
)
def
genDataList
(
self
,
table_list
,
row_count
=
10
):
data_list
=
list
()
ts
=
int
(
time
.
time
())
for
table_str
in
table_list
:
for
i
in
range
(
1
,
row_count
+
1
):
ts
-=
1
table_str_new
=
self
.
replaceLastStr
(
table_str
,
f
'
{
str
(
ts
)
}
s'
)
data_list
.
append
(
table_str_new
)
print
(
data_list
)
return
data_list
def
insertRows
(
self
,
count
=
4
,
table_count
=
1000
):
table_generator
=
self
.
tableGenerator
(
count
=
count
,
table_count
=
table_count
)
for
table_name
in
table_generator
:
pass
def
perfTableInsert
(
self
):
table_generator
=
self
.
tableGenerator
()
for
input_sql
in
table_generator
:
self
.
_conn
.
insert_lines
([
input_sql
])
# for i in range(10):
# self._conn.insert_lines([input_sql])
def
perfDataInsert
(
self
,
count
=
4
):
table_generator
=
self
.
tableGenerator
(
count
=
count
)
ts
=
int
(
time
.
time
())
for
input_sql
in
table_generator
:
print
(
"input_sql-----------"
,
input_sql
)
self
.
_conn
.
insert_lines
([
input_sql
])
for
i
in
range
(
100000
):
ts
-=
1
input_sql_new
=
self
.
replaceLastStr
(
input_sql
,
str
(
ts
))
+
's'
print
(
"input_sql_new---------"
,
input_sql_new
)
self
.
_conn
.
insert_lines
([
input_sql_new
])
def
batchInsertTable
(
self
,
batch_list
):
for
insert_list
in
batch_list
:
print
(
threading
.
current_thread
().
name
,
"length="
,
len
(
insert_list
))
print
(
threading
.
current_thread
().
name
,
'firstline'
,
insert_list
[
0
])
print
(
threading
.
current_thread
().
name
,
'lastline:'
,
insert_list
[
-
1
])
self
.
_conn
.
insert_lines
(
insert_list
)
print
(
threading
.
current_thread
().
name
,
'end'
)
def
genTableThread
(
self
,
thread_count
=
10
):
threads
=
list
()
for
i
in
range
(
thread_count
):
t
=
threading
.
Thread
(
target
=
self
.
perfTableInsert
)
threads
.
append
(
t
)
return
threads
def
genMultiThread
(
self
,
count
,
thread_count
=
10
):
threads
=
list
()
for
i
in
range
(
thread_count
):
t
=
threading
.
Thread
(
target
=
self
.
perfDataInsert
,
args
=
(
count
,))
threads
.
append
(
t
)
return
threads
def
multiThreadRun
(
self
,
threads
):
for
t
in
threads
:
t
.
start
()
for
t
in
threads
:
t
.
join
()
def
createStb
(
self
,
count
=
4
):
input_sql
=
self
.
getPerfSql
(
count
=
count
,
init
=
True
)
self
.
_conn
.
insert_lines
([
input_sql
])
def
threadInsertTable
(
self
,
end_list
,
thread_count
=
10
):
threads
=
list
()
for
i
in
range
(
thread_count
):
t
=
threading
.
Thread
(
target
=
self
.
batchInsertTable
,
args
=
(
end_list
,))
threads
.
append
(
t
)
return
threads
def
finalRun
(
self
):
self
.
createStb
()
table_list
,
end_list
=
self
.
splitTableList
()
batchInsertTableThread
=
self
.
threadInsertTable
(
end_list
=
end_list
)
self
.
multiThreadRun
(
batchInsertTableThread
)
# print(end_list)
# def createTb(self, count=4):
# input_sql = self.getPerfSql(count=count)
# for i in range(10000):
# self._conn.insert_lines([input_sql])
# def createTb1(self, count=4):
# start_time = time.time()
# self.multiThreadRun(self.genMultiThread(input_sql))
# end_time = time.time()
# return end_time - start_time
# def calInsertTableTime(self):
# start_time = time.time()
# self.createStb()
# self.multiThreadRun(self.genMultiThread())
# end_time = time.time()
# return end_time - start_time
def
calRunTime
(
self
,
count
=
4
):
start_time
=
time
.
time
()
self
.
createStb
()
self
.
multiThreadRun
(
self
.
genMultiThread
(
count
=
count
))
end_time
=
time
.
time
()
return
end_time
-
start_time
def
calRunTime1
(
self
,
count
=
4
):
start_time
=
time
.
time
()
self
.
createStb
()
self
.
multiThreadRun
(
self
.
perfTableInsert
())
# self.perfTableInsert()
# def schemalessInsertPerfTest(self, count=4):
# input_sql = self.getPerfSql(count)
# self.calRunTime(input_sql)
# def test(self):
# sql1 = 'stb,id="init",t0=14865i32,t1="tvnqbjuqck" c0=37i32,c1=217i32,c2=3i32,c3=88i32 1626006833640ms'
# sql2 = 'stb,id="init",t0=14865i32,t1="tvnqbjuqck" c0=38i32,c1=217i32,c2=3i32,c3=88i32 1626006833641ms'
# self._conn.insert_lines([sql1])
# self._conn.insert_lines([sql2])
def
run
(
self
):
print
(
"running {}"
.
format
(
__file__
))
tdSql
.
prepare
()
self
.
finalRun
()
# print(self.calRunTime1(count=4))
# print(self.calRunTime(count=4))
# print(self.genRandomTs())
# self.calInsertTableTime()
# self.test()
# table_list = self.splitTableList()[0]
# data_list = self.genDataList(table_list)
# print(len(data_list))
# end_list = [['stb,id="sub_vzvfx_dbuxp",t0=9961i32,t1="zjjfayhfep" c0=83i32,c1=169i32,c2=177i32,c3=4i32 0','stb,id="sub_vzvfx_dbuxp",t0=9961i32,t1="zjjfayhfep" c0=83i32,c1=169i32,c2=177i32,c3=4i32 0'], ['stb,id="sub_vzvfx_dbuxp",t0=9961i32,t1="zjjfayhfep" c0=83i32,c1=169i32,c2=177i32,c3=4i32 0','stb,id="sub_vzvfx_dbuxp",t0=9961i32,t1="zjjfayhfep" c0=83i32,c1=169i32,c2=177i32,c3=4i32 0']]
# rowsGenerator = self.rowsGenerator(end_list)
# for i in rowsGenerator:
# print(i)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/util/common.py
浏览文件 @
f9e22c16
###################################################################
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
...
...
@@ -50,4 +50,4 @@ class TDCom:
def
close
(
self
):
self
.
cursor
.
close
()
tdCom
=
TDCom
()
\ No newline at end of file
tdCom
=
TDCom
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录