Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
8f46f81f
T
TDengine
项目概览
taosdata
/
TDengine
大约 1 年 前同步成功
通知
1185
Star
22015
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
8f46f81f
编写于
11月 16, 2020
作者:
dengyihao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into test1
上级
961757ff
8d9a0d7b
变更
26
隐藏空白更改
内联
并排
Showing
26 changed file
with
1845 addition
and
234 deletion
+1845
-234
src/client/src/tscLocal.c
src/client/src/tscLocal.c
+15
-3
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+4
-0
src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
.../jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
+2
-1
src/kit/taosdump/taosdump.c
src/kit/taosdump/taosdump.c
+102
-66
src/query/inc/qHistogram.h
src/query/inc/qHistogram.h
+2
-0
tests/examples/JDBC/mybatisplus-demo/.gitignore
tests/examples/JDBC/mybatisplus-demo/.gitignore
+33
-0
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
...mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
+118
-0
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar
...ples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar
+0
-0
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties
...BC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties
+2
-0
tests/examples/JDBC/mybatisplus-demo/mvnw
tests/examples/JDBC/mybatisplus-demo/mvnw
+322
-0
tests/examples/JDBC/mybatisplus-demo/mvnw.cmd
tests/examples/JDBC/mybatisplus-demo/mvnw.cmd
+182
-0
tests/examples/JDBC/mybatisplus-demo/pom.xml
tests/examples/JDBC/mybatisplus-demo/pom.xml
+101
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java
...a/example/mybatisplusdemo/MybatisplusDemoApplication.java
+15
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java
...ata/example/mybatisplusdemo/config/MybatisPlusConfig.java
+34
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java
.../taosdata/example/mybatisplusdemo/domain/Temperature.java
+15
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java
.../com/taosdata/example/mybatisplusdemo/domain/Weather.java
+15
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java
...ata/example/mybatisplusdemo/mapper/TemperatureMapper.java
+23
-0
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
...aosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
+8
-0
tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml
.../JDBC/mybatisplus-demo/src/main/resources/application.yml
+34
-0
tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java
...example/mybatisplusdemo/mapper/TemperatureMapperTest.java
+140
-0
tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
...ata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
+88
-0
tests/examples/c/demo.c
tests/examples/c/demo.c
+15
-30
tests/pytest/concurrent_inquiry.py
tests/pytest/concurrent_inquiry.py
+231
-117
tests/pytest/cq.py
tests/pytest/cq.py
+169
-0
tests/pytest/functions/function_arithmetic.py
tests/pytest/functions/function_arithmetic.py
+71
-0
tests/pytest/insert/restfulInsert.py
tests/pytest/insert/restfulInsert.py
+104
-17
未找到文件。
src/client/src/tscLocal.c
浏览文件 @
8f46f81f
...
...
@@ -619,7 +619,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
for
(
int32_t
i
=
0
;
i
<
numOfRows
;
++
i
)
{
uint8_t
type
=
pSchema
[
i
].
type
;
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
pSchema
->
bytes
);
int32_t
bytes
=
pSchema
[
i
].
bytes
-
VARSTR_HEADER_SIZE
;
if
(
type
==
TSDB_DATA_TYPE_NCHAR
)
{
bytes
=
bytes
/
TSDB_NCHAR_SIZE
;
}
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
bytes
);
}
else
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s,"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
);
}
...
...
@@ -642,7 +646,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for
(
int32_t
i
=
0
;
i
<
numOfRows
;
++
i
)
{
uint8_t
type
=
pSchema
[
i
].
type
;
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
pSchema
->
bytes
);
int32_t
bytes
=
pSchema
[
i
].
bytes
-
VARSTR_HEADER_SIZE
;
if
(
type
==
TSDB_DATA_TYPE_NCHAR
)
{
bytes
=
bytes
/
TSDB_NCHAR_SIZE
;
}
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
bytes
);
}
else
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s,"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
type
].
aName
);
}
...
...
@@ -652,7 +660,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for
(
int32_t
i
=
numOfRows
;
i
<
totalRows
;
i
++
)
{
uint8_t
type
=
pSchema
[
i
].
type
;
if
(
type
==
TSDB_DATA_TYPE_BINARY
||
type
==
TSDB_DATA_TYPE_NCHAR
)
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
pSchema
->
bytes
);
int32_t
bytes
=
pSchema
[
i
].
bytes
-
VARSTR_HEADER_SIZE
;
if
(
type
==
TSDB_DATA_TYPE_NCHAR
)
{
bytes
=
bytes
/
TSDB_NCHAR_SIZE
;
}
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s(%d),"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
pSchema
[
i
].
type
].
aName
,
bytes
);
}
else
{
snprintf
(
result
+
strlen
(
result
),
TSDB_MAX_BINARY_LEN
-
strlen
(
result
),
"%s %s,"
,
pSchema
[
i
].
name
,
tDataTypeDesc
[
type
].
aName
);
}
...
...
src/client/src/tscParseInsert.c
浏览文件 @
8f46f81f
...
...
@@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
index
=
0
;
sToken
=
tStrGetToken
(
str
,
&
index
,
false
,
0
,
NULL
);
if
(
sToken
.
type
!=
TK_STRING
&&
sToken
.
type
!=
TK_ID
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"file path is required following keyword FILE"
,
sToken
.
z
);
goto
_error
;
}
str
+=
index
;
if
(
sToken
.
n
==
0
)
{
code
=
tscInvalidSQLErrMsg
(
pCmd
->
payload
,
"file path is required following keyword FILE"
,
sToken
.
z
);
...
...
src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java
浏览文件 @
8f46f81f
...
...
@@ -197,7 +197,8 @@ public class TSDBConnection implements Connection {
public
SQLWarning
getWarnings
()
throws
SQLException
{
//todo: implement getWarnings according to the warning messages returned from TDengine
throw
new
SQLException
(
TSDBConstants
.
UNSUPPORT_METHOD_EXCEPTIONZ_MSG
);
return
null
;
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public
void
clearWarnings
()
throws
SQLException
{
...
...
src/kit/taosdump/taosdump.c
浏览文件 @
8f46f81f
...
...
@@ -40,19 +40,22 @@ typedef struct {
enum
_show_db_index
{
TSDB_SHOW_DB_NAME_INDEX
,
TSDB_SHOW_DB_CREATED_TIME_INDEX
,
TSDB_SHOW_DB_VGROUPS_INDEX
,
TSDB_SHOW_DB_NTABLES_INDEX
,
TSDB_SHOW_DB_VGROUPS_INDEX
,
TSDB_SHOW_DB_REPLICA_INDEX
,
TSDB_SHOW_DB_QUORUM_INDEX
,
TSDB_SHOW_DB_DAYS_INDEX
,
TSDB_SHOW_DB_KEEP_INDEX
,
TSDB_SHOW_DB_TABLES_INDEX
,
TSDB_SHOW_DB_ROWS_INDEX
,
TSDB_SHOW_DB_CACHE_INDEX
,
TSDB_SHOW_DB_ABLOCKS_INDEX
,
TSDB_SHOW_DB_TBLOCKS_INDEX
,
TSDB_SHOW_DB_CTIME_INDEX
,
TSDB_SHOW_DB_CLOG_INDEX
,
TSDB_SHOW_DB_BLOCKS_INDEX
,
TSDB_SHOW_DB_MINROWS_INDEX
,
TSDB_SHOW_DB_MAXROWS_INDEX
,
TSDB_SHOW_DB_WALLEVEL_INDEX
,
TSDB_SHOW_DB_FSYNC_INDEX
,
TSDB_SHOW_DB_COMP_INDEX
,
TSDB_SHOW_DB_PRECISION_INDEX
,
TSDB_SHOW_DB_UPDATE_INDEX
,
TSDB_SHOW_DB_STATUS_INDEX
,
TSDB_MAX_SHOW_DB
};
...
...
@@ -90,17 +93,23 @@ extern char version[];
typedef
struct
{
char
name
[
TSDB_DB_NAME_LEN
+
1
];
int32_t
replica
;
int32_t
days
;
int32_t
keep
;
int32_t
tables
;
int32_t
rows
;
int32_t
cache
;
int32_t
ablocks
;
int32_t
tblocks
;
int32_t
ctime
;
int32_t
clog
;
int32_t
comp
;
int32_t
tables
;
int32_t
vgroups
;
int16_t
replications
;
int16_t
quorum
;
int16_t
daysPerFile
;
int16_t
daysToKeep
;
int16_t
daysToKeep1
;
int16_t
daysToKeep2
;
int32_t
cacheBlockSize
;
//MB
int32_t
totalBlocks
;
int32_t
minRowsPerFileBlock
;
int32_t
maxRowsPerFileBlock
;
int8_t
walLevel
;
int32_t
fsyncPeriod
;
int8_t
compression
;
int8_t
precision
;
// time resolution
int8_t
update
;
}
SDbInfo
;
typedef
struct
{
...
...
@@ -173,6 +182,7 @@ static struct argp_option options[] = {
{
"start-time"
,
'S'
,
"START_TIME"
,
0
,
"Start time to dump."
,
3
},
{
"end-time"
,
'E'
,
"END_TIME"
,
0
,
"End time to dump."
,
3
},
{
"data-batch"
,
'N'
,
"DATA_BATCH"
,
0
,
"Number of data point per insert statement. Default is 1."
,
3
},
{
"max-sql-len"
,
'L'
,
"SQL_LEN"
,
0
,
"Max length of one sql. Default is 65480."
,
3
},
{
"table-batch"
,
't'
,
"TABLE_BATCH"
,
0
,
"Number of table dumpout into one output file. Default is 1."
,
3
},
{
"thread_num"
,
'T'
,
"THREAD_NUM"
,
0
,
"Number of thread for dump in file. Default is 5."
,
3
},
{
"allow-sys"
,
'a'
,
0
,
0
,
"Allow to dump sys database"
,
3
},
...
...
@@ -200,6 +210,7 @@ struct arguments {
int64_t
start_time
;
int64_t
end_time
;
int32_t
data_batch
;
int32_t
max_sql_len
;
int32_t
table_batch
;
// num of table which will be dump into one output file.
bool
allow_sys
;
// other options
...
...
@@ -298,6 +309,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case
'N'
:
arguments
->
data_batch
=
atoi
(
arg
);
break
;
case
'L'
:
{
int32_t
len
=
atoi
(
arg
);
if
(
len
>
TSDB_MAX_ALLOWED_SQL_LEN
)
{
len
=
TSDB_MAX_ALLOWED_SQL_LEN
;
}
else
if
(
len
<
TSDB_MAX_SQL_LEN
)
{
len
=
TSDB_MAX_SQL_LEN
;
}
arguments
->
max_sql_len
=
len
;
break
;
}
case
't'
:
arguments
->
table_batch
=
atoi
(
arg
);
break
;
...
...
@@ -360,6 +382,7 @@ struct arguments tsArguments = {
0
,
INT64_MAX
,
1
,
TSDB_MAX_SQL_LEN
,
1
,
false
,
// other options
...
...
@@ -415,7 +438,9 @@ int main(int argc, char *argv[]) {
printf
(
"start_time: %"
PRId64
"
\n
"
,
tsArguments
.
start_time
);
printf
(
"end_time: %"
PRId64
"
\n
"
,
tsArguments
.
end_time
);
printf
(
"data_batch: %d
\n
"
,
tsArguments
.
data_batch
);
printf
(
"max_sql_len: %d
\n
"
,
tsArguments
.
max_sql_len
);
printf
(
"table_batch: %d
\n
"
,
tsArguments
.
table_batch
);
printf
(
"thread_num: %d
\n
"
,
tsArguments
.
thread_num
);
printf
(
"allow_sys: %d
\n
"
,
tsArguments
.
allow_sys
);
printf
(
"abort: %d
\n
"
,
tsArguments
.
abort
);
printf
(
"isDumpIn: %d
\n
"
,
tsArguments
.
isDumpIn
);
...
...
@@ -682,8 +707,8 @@ int taosDumpOut(struct arguments *arguments) {
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
result
);
while
((
row
=
taos_fetch_row
(
result
))
!=
NULL
)
{
// sys database name : '
monitor
', but subsequent version changed to 'log'
if
(
strncasecmp
(
row
[
TSDB_SHOW_DB_NAME_INDEX
],
"
monitor
"
,
fields
[
TSDB_SHOW_DB_NAME_INDEX
].
bytes
)
==
0
&&
// sys database name : '
log
', but subsequent version changed to 'log'
if
(
strncasecmp
(
row
[
TSDB_SHOW_DB_NAME_INDEX
],
"
log
"
,
fields
[
TSDB_SHOW_DB_NAME_INDEX
].
bytes
)
==
0
&&
(
!
arguments
->
allow_sys
))
continue
;
...
...
@@ -711,20 +736,27 @@ int taosDumpOut(struct arguments *arguments) {
}
strncpy
(
dbInfos
[
count
]
->
name
,
(
char
*
)
row
[
TSDB_SHOW_DB_NAME_INDEX
],
fields
[
TSDB_SHOW_DB_NAME_INDEX
].
bytes
);
#if 0
dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]));
dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]));
dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]);
dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]);
dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]);
dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]);
dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]);
dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX]));
dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]);
dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX]));
dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
#if 0
if (arguments->with_property) {
dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
dbInfos[count]->daysToKeep1;
dbInfos[count]->daysToKeep2;
dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
#endif
count
++
;
if
(
arguments
->
databases
)
{
...
...
@@ -1037,10 +1069,13 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr
+=
sprintf
(
pstr
,
"CREATE DATABASE IF NOT EXISTS %s"
,
dbInfo
->
name
);
if
(
isDumpProperty
)
{
#if 0
pstr += sprintf(pstr,
" REPLICA %d DAYS %d KEEP %d TABLES %d ROWS %d CACHE %d ABLOCKS %d TBLOCKS %d CTIME %d CLOG %d COMP %d"
,
dbInfo
->
replica
,
dbInfo
->
days
,
dbInfo
->
keep
,
dbInfo
->
tables
,
dbInfo
->
rows
,
dbInfo
->
cache
,
dbInfo
->
ablocks
,
dbInfo
->
tblocks
,
dbInfo
->
ctime
,
dbInfo
->
clog
,
dbInfo
->
comp
);
"TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d",
dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize,
dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression,
dbInfo->precision, dbInfo->update);
#endif
}
pstr
+=
sprintf
(
pstr
,
";"
);
...
...
@@ -1459,7 +1494,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
return
-
1
;
}
char
*
tmpBuffer
=
(
char
*
)
calloc
(
1
,
COMMAND_SIZE
);
int32_t
sql_buf_len
=
arguments
->
max_sql_len
;
char
*
tmpBuffer
=
(
char
*
)
calloc
(
1
,
sql_buf_len
+
128
);
if
(
tmpBuffer
==
NULL
)
{
fprintf
(
stderr
,
"failed to allocate memory
\n
"
);
free
(
tmpCommand
);
...
...
@@ -1502,85 +1538,83 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
return
-
1
;
}
char
sqlStr
[
8
]
=
"
\0
"
;
if
(
arguments
->
mysqlFlag
)
{
sprintf
(
sqlStr
,
"INSERT"
);
}
else
{
sprintf
(
sqlStr
,
"IMPORT"
);
}
int
rowFlag
=
0
;
int32_t
curr_sqlstr_len
=
0
;
int32_t
total_sqlstr_len
=
0
;
count
=
0
;
while
((
row
=
taos_fetch_row
(
tmpResult
))
!=
NULL
)
{
pstr
=
tmpBuffer
;
curr_sqlstr_len
=
0
;
int32_t
*
length
=
taos_fetch_lengths
(
tmpResult
);
// act len
if
(
count
==
0
)
{
pstr
+=
sprintf
(
pstr
,
"%s INTO %s VALUES ("
,
sqlStr
,
tbname
);
}
else
{
total_sqlstr_len
=
0
;
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"INSERT INTO %s VALUES ("
,
tbname
);
}
else
{
if
(
arguments
->
mysqlFlag
)
{
if
(
0
==
rowFlag
)
{
pstr
+=
sprintf
(
pstr
,
"("
);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"("
);
rowFlag
++
;
}
else
{
pstr
+=
sprintf
(
pstr
,
", ("
);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
", ("
);
}
}
else
{
pstr
+=
sprintf
(
pstr
,
"("
);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"("
);
}
}
for
(
int
col
=
0
;
col
<
numFields
;
col
++
)
{
if
(
col
!=
0
)
pstr
+=
sprintf
(
pstr
,
", "
);
if
(
col
!=
0
)
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
", "
);
if
(
row
[
col
]
==
NULL
)
{
pstr
+=
sprintf
(
pstr
,
"NULL"
);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"NULL"
);
continue
;
}
switch
(
fields
[
col
].
type
)
{
case
TSDB_DATA_TYPE_BOOL
:
pstr
+=
sprintf
(
pstr
,
"%d"
,
((((
int32_t
)(
*
((
char
*
)
row
[
col
])))
==
1
)
?
1
:
0
));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%d"
,
((((
int32_t
)(
*
((
char
*
)
row
[
col
])))
==
1
)
?
1
:
0
));
break
;
case
TSDB_DATA_TYPE_TINYINT
:
pstr
+=
sprintf
(
pstr
,
"%d"
,
*
((
int8_t
*
)
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%d"
,
*
((
int8_t
*
)
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
pstr
+=
sprintf
(
pstr
,
"%d"
,
*
((
int16_t
*
)
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%d"
,
*
((
int16_t
*
)
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_INT
:
pstr
+=
sprintf
(
pstr
,
"%d"
,
*
((
int32_t
*
)
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%d"
,
*
((
int32_t
*
)
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_BIGINT
:
pstr
+=
sprintf
(
pstr
,
"%"
PRId64
""
,
*
((
int64_t
*
)
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%"
PRId64
""
,
*
((
int64_t
*
)
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_FLOAT
:
pstr
+=
sprintf
(
pstr
,
"%f"
,
GET_FLOAT_VAL
(
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%f"
,
GET_FLOAT_VAL
(
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_DOUBLE
:
pstr
+=
sprintf
(
pstr
,
"%f"
,
GET_DOUBLE_VAL
(
row
[
col
]));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%f"
,
GET_DOUBLE_VAL
(
row
[
col
]));
break
;
case
TSDB_DATA_TYPE_BINARY
:
*
(
pstr
++
)
=
'\''
;
//
*(pstr++) = '\'';
converStringToReadable
((
char
*
)
row
[
col
],
length
[
col
],
tbuf
,
COMMAND_SIZE
);
pstr
=
stpcpy
(
pstr
,
tbuf
);
*
(
pstr
++
)
=
'\''
;
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
pstr
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"
\'
%s
\'
"
,
tbuf
);
break
;
case
TSDB_DATA_TYPE_NCHAR
:
convertNCharToReadable
((
char
*
)
row
[
col
],
length
[
col
],
tbuf
,
COMMAND_SIZE
);
pstr
+=
sprintf
(
pstr
,
"
\'
%s
\'
"
,
tbuf
);
pstr
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"
\'
%s
\'
"
,
tbuf
);
break
;
case
TSDB_DATA_TYPE_TIMESTAMP
:
if
(
!
arguments
->
mysqlFlag
)
{
pstr
+=
sprintf
(
pstr
,
"%"
PRId64
""
,
*
(
int64_t
*
)
row
[
col
]);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"%"
PRId64
""
,
*
(
int64_t
*
)
row
[
col
]);
}
else
{
char
buf
[
64
]
=
"
\0
"
;
int64_t
ts
=
*
((
int64_t
*
)
row
[
col
]);
time_t
tt
=
(
time_t
)(
ts
/
1000
);
struct
tm
*
ptm
=
localtime
(
&
tt
);
strftime
(
buf
,
64
,
"%y-%m-%d %H:%M:%S"
,
ptm
);
pstr
+=
sprintf
(
pstr
,
"
\'
%s.%03d
\'
"
,
buf
,
(
int
)(
ts
%
1000
));
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
"
\'
%s.%03d
\'
"
,
buf
,
(
int
)(
ts
%
1000
));
}
break
;
default:
...
...
@@ -1588,13 +1622,15 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
}
}
pstr
+=
sprintf
(
pstr
,
") "
);
curr_sqlstr_len
+=
sprintf
(
pstr
+
curr_sqlstr_len
,
") "
);
totalRows
++
;
count
++
;
fprintf
(
fp
,
"%s"
,
tmpBuffer
);
if
(
count
>=
arguments
->
data_batch
)
{
total_sqlstr_len
+=
curr_sqlstr_len
;
if
((
count
>=
arguments
->
data_batch
)
||
(
sql_buf_len
-
total_sqlstr_len
<
TSDB_MAX_BYTES_PER_ROW
))
{
fprintf
(
fp
,
";
\n
"
);
count
=
0
;
}
//else {
...
...
src/query/inc/qHistogram.h
浏览文件 @
8f46f81f
...
...
@@ -43,6 +43,8 @@ typedef struct SHistogramInfo {
int32_t
numOfElems
;
int32_t
numOfEntries
;
int32_t
maxEntries
;
double
min
;
double
max
;
double
min
;
double
max
;
...
...
tests/examples/JDBC/mybatisplus-demo/.gitignore
0 → 100644
浏览文件 @
8f46f81f
README.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
0 → 100644
浏览文件 @
8f46f81f
/*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import
java.net.*
;
import
java.io.*
;
import
java.nio.channels.*
;
import
java.util.Properties
;
public
class
MavenWrapperDownloader
{
private
static
final
String
WRAPPER_VERSION
=
"0.5.6"
;
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private
static
final
String
DEFAULT_DOWNLOAD_URL
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+
WRAPPER_VERSION
+
"/maven-wrapper-"
+
WRAPPER_VERSION
+
".jar"
;
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private
static
final
String
MAVEN_WRAPPER_PROPERTIES_PATH
=
".mvn/wrapper/maven-wrapper.properties"
;
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private
static
final
String
MAVEN_WRAPPER_JAR_PATH
=
".mvn/wrapper/maven-wrapper.jar"
;
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private
static
final
String
PROPERTY_NAME_WRAPPER_URL
=
"wrapperUrl"
;
public
static
void
main
(
String
args
[])
{
System
.
out
.
println
(
"- Downloader started"
);
File
baseDirectory
=
new
File
(
args
[
0
]);
System
.
out
.
println
(
"- Using base directory: "
+
baseDirectory
.
getAbsolutePath
());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File
mavenWrapperPropertyFile
=
new
File
(
baseDirectory
,
MAVEN_WRAPPER_PROPERTIES_PATH
);
String
url
=
DEFAULT_DOWNLOAD_URL
;
if
(
mavenWrapperPropertyFile
.
exists
())
{
FileInputStream
mavenWrapperPropertyFileInputStream
=
null
;
try
{
mavenWrapperPropertyFileInputStream
=
new
FileInputStream
(
mavenWrapperPropertyFile
);
Properties
mavenWrapperProperties
=
new
Properties
();
mavenWrapperProperties
.
load
(
mavenWrapperPropertyFileInputStream
);
url
=
mavenWrapperProperties
.
getProperty
(
PROPERTY_NAME_WRAPPER_URL
,
url
);
}
catch
(
IOException
e
)
{
System
.
out
.
println
(
"- ERROR loading '"
+
MAVEN_WRAPPER_PROPERTIES_PATH
+
"'"
);
}
finally
{
try
{
if
(
mavenWrapperPropertyFileInputStream
!=
null
)
{
mavenWrapperPropertyFileInputStream
.
close
();
}
}
catch
(
IOException
e
)
{
// Ignore ...
}
}
}
System
.
out
.
println
(
"- Downloading from: "
+
url
);
File
outputFile
=
new
File
(
baseDirectory
.
getAbsolutePath
(),
MAVEN_WRAPPER_JAR_PATH
);
if
(!
outputFile
.
getParentFile
().
exists
())
{
if
(!
outputFile
.
getParentFile
().
mkdirs
())
{
System
.
out
.
println
(
"- ERROR creating output directory '"
+
outputFile
.
getParentFile
().
getAbsolutePath
()
+
"'"
);
}
}
System
.
out
.
println
(
"- Downloading to: "
+
outputFile
.
getAbsolutePath
());
try
{
downloadFileFromURL
(
url
,
outputFile
);
System
.
out
.
println
(
"Done"
);
System
.
exit
(
0
);
}
catch
(
Throwable
e
)
{
System
.
out
.
println
(
"- Error downloading"
);
e
.
printStackTrace
();
System
.
exit
(
1
);
}
}
private
static
void
downloadFileFromURL
(
String
urlString
,
File
destination
)
throws
Exception
{
if
(
System
.
getenv
(
"MVNW_USERNAME"
)
!=
null
&&
System
.
getenv
(
"MVNW_PASSWORD"
)
!=
null
)
{
String
username
=
System
.
getenv
(
"MVNW_USERNAME"
);
char
[]
password
=
System
.
getenv
(
"MVNW_PASSWORD"
).
toCharArray
();
Authenticator
.
setDefault
(
new
Authenticator
()
{
@Override
protected
PasswordAuthentication
getPasswordAuthentication
()
{
return
new
PasswordAuthentication
(
username
,
password
);
}
});
}
URL
website
=
new
URL
(
urlString
);
ReadableByteChannel
rbc
;
rbc
=
Channels
.
newChannel
(
website
.
openStream
());
FileOutputStream
fos
=
new
FileOutputStream
(
destination
);
fos
.
getChannel
().
transferFrom
(
rbc
,
0
,
Long
.
MAX_VALUE
);
fos
.
close
();
rbc
.
close
();
}
}
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar
0 → 100644
浏览文件 @
8f46f81f
文件已添加
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties
0 → 100644
浏览文件 @
8f46f81f
distributionUrl
=
https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
wrapperUrl
=
https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
tests/examples/JDBC/mybatisplus-demo/mvnw
0 → 100755
浏览文件 @
8f46f81f
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
if
[
-z
"
$MAVEN_SKIP_RC
"
]
;
then
if
[
-f
/etc/mavenrc
]
;
then
.
/etc/mavenrc
fi
if
[
-f
"
$HOME
/.mavenrc"
]
;
then
.
"
$HOME
/.mavenrc"
fi
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin
=
false
darwin
=
false
mingw
=
false
case
"
$(
uname
)
"
in
CYGWIN
*
)
cygwin
=
true
;;
MINGW
*
)
mingw
=
true
;;
Darwin
*
)
darwin
=
true
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
if
[
-z
"
$JAVA_HOME
"
]
;
then
if
[
-x
"/usr/libexec/java_home"
]
;
then
export
JAVA_HOME
=
"
$(
/usr/libexec/java_home
)
"
else
export
JAVA_HOME
=
"/Library/Java/Home"
fi
fi
;;
esac
if
[
-z
"
$JAVA_HOME
"
]
;
then
if
[
-r
/etc/gentoo-release
]
;
then
JAVA_HOME
=
$(
java-config
--jre-home
)
fi
fi
if
[
-z
"
$M2_HOME
"
]
;
then
## resolve links - $0 may be a link to maven's home
PRG
=
"
$0
"
# need this for relative symlinks
while
[
-h
"
$PRG
"
]
;
do
ls
=
$(
ls
-ld
"
$PRG
"
)
link
=
$(
expr
"
$ls
"
:
'.*-> \(.*\)$'
)
if
expr
"
$link
"
:
'/.*'
>
/dev/null
;
then
PRG
=
"
$link
"
else
PRG
=
"
$(
dirname
"
$PRG
"
)
/
$link
"
fi
done
saveddir
=
$(
pwd
)
M2_HOME
=
$(
dirname
"
$PRG
"
)
/..
# make it fully qualified
M2_HOME
=
$(
cd
"
$M2_HOME
"
&&
pwd
)
cd
"
$saveddir
"
# echo Using m2 at $M2_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if
$cygwin
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
$(
cygpath
--unix
"
$M2_HOME
"
)
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
$(
cygpath
--unix
"
$JAVA_HOME
"
)
[
-n
"
$CLASSPATH
"
]
&&
CLASSPATH
=
$(
cygpath
--path
--unix
"
$CLASSPATH
"
)
fi
# For Mingw, ensure paths are in UNIX format before anything is touched
if
$mingw
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
"
$(
(
cd
"
$M2_HOME
"
pwd
)
)
"
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
"
$(
(
cd
"
$JAVA_HOME
"
pwd
)
)
"
fi
if
[
-z
"
$JAVA_HOME
"
]
;
then
javaExecutable
=
"
$(
which javac
)
"
if
[
-n
"
$javaExecutable
"
]
&&
!
[
"
$(
expr
\"
$javaExecutable
\"
:
'\([^ ]*\)'
)
"
=
"no"
]
;
then
# readlink(1) is not available as standard on Solaris 10.
readLink
=
$(
which
readlink
)
if
[
!
$(
expr
"
$readLink
"
:
'\([^ ]*\)'
)
=
"no"
]
;
then
if
$darwin
;
then
javaHome
=
"
$(
dirname
\"
$javaExecutable
\"
)
"
javaExecutable
=
"
$(
cd
\"
$javaHome
\"
&&
pwd
-P
)
/javac"
else
javaExecutable
=
"
$(
readlink
-f
\"
$javaExecutable
\"
)
"
fi
javaHome
=
"
$(
dirname
\"
$javaExecutable
\"
)
"
javaHome
=
$(
expr
"
$javaHome
"
:
'\(.*\)/bin'
)
JAVA_HOME
=
"
$javaHome
"
export
JAVA_HOME
fi
fi
fi
if
[
-z
"
$JAVACMD
"
]
;
then
if
[
-n
"
$JAVA_HOME
"
]
;
then
if
[
-x
"
$JAVA_HOME
/jre/sh/java"
]
;
then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD
=
"
$JAVA_HOME
/jre/sh/java"
else
JAVACMD
=
"
$JAVA_HOME
/bin/java"
fi
else
JAVACMD
=
"
$(
which java
)
"
fi
fi
if
[
!
-x
"
$JAVACMD
"
]
;
then
echo
"Error: JAVA_HOME is not defined correctly."
>
&2
echo
" We cannot execute
$JAVACMD
"
>
&2
exit
1
fi
if
[
-z
"
$JAVA_HOME
"
]
;
then
echo
"Warning: JAVA_HOME environment variable is not set."
fi
CLASSWORLDS_LAUNCHER
=
org.codehaus.plexus.classworlds.launcher.Launcher
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir
()
{
if
[
-z
"
$1
"
]
;
then
echo
"Path not specified to find_maven_basedir"
return
1
fi
basedir
=
"
$1
"
wdir
=
"
$1
"
while
[
"
$wdir
"
!=
'/'
]
;
do
if
[
-d
"
$wdir
"
/.mvn
]
;
then
basedir
=
$wdir
break
fi
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
if
[
-d
"
${
wdir
}
"
]
;
then
wdir
=
$(
cd
"
$wdir
/.."
pwd
)
fi
# end of workaround
done
echo
"
${
basedir
}
"
}
# concatenates all lines of a file
concat_lines
()
{
if
[
-f
"
$1
"
]
;
then
echo
"
$(
tr
-s
'\n'
' '
<
"
$1
"
)
"
fi
}
BASE_DIR
=
$(
find_maven_basedir
"
$(
pwd
)
"
)
if
[
-z
"
$BASE_DIR
"
]
;
then
exit
1
fi
##########################################################################################
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
# This allows using the maven wrapper in projects that prohibit checking in binary data.
##########################################################################################
if
[
-r
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.jar"
]
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found .mvn/wrapper/maven-wrapper.jar"
fi
else
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
fi
if
[
-n
"
$MVNW_REPOURL
"
]
;
then
jarUrl
=
"
$MVNW_REPOURL
/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
else
jarUrl
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
fi
while
IFS
=
"="
read
key value
;
do
case
"
$key
"
in
wrapperUrl
)
jarUrl
=
"
$value
"
break
;;
esac
done
<
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.properties"
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Downloading from:
$jarUrl
"
fi
wrapperJarPath
=
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.jar"
if
$cygwin
;
then
wrapperJarPath
=
$(
cygpath
--path
--windows
"
$wrapperJarPath
"
)
fi
if
command
-v
wget
>
/dev/null
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found wget ... using wget"
fi
if
[
-z
"
$MVNW_USERNAME
"
]
||
[
-z
"
$MVNW_PASSWORD
"
]
;
then
wget
"
$jarUrl
"
-O
"
$wrapperJarPath
"
else
wget
--http-user
=
$MVNW_USERNAME
--http-password
=
$MVNW_PASSWORD
"
$jarUrl
"
-O
"
$wrapperJarPath
"
fi
elif
command
-v
curl
>
/dev/null
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found curl ... using curl"
fi
if
[
-z
"
$MVNW_USERNAME
"
]
||
[
-z
"
$MVNW_PASSWORD
"
]
;
then
curl
-o
"
$wrapperJarPath
"
"
$jarUrl
"
-f
else
curl
--user
$MVNW_USERNAME
:
$MVNW_PASSWORD
-o
"
$wrapperJarPath
"
"
$jarUrl
"
-f
fi
else
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Falling back to using Java to download"
fi
javaClass
=
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.java"
# For Cygwin, switch paths to Windows format before running javac
if
$cygwin
;
then
javaClass
=
$(
cygpath
--path
--windows
"
$javaClass
"
)
fi
if
[
-e
"
$javaClass
"
]
;
then
if
[
!
-e
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.class"
]
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
" - Compiling MavenWrapperDownloader.java ..."
fi
# Compiling the Java class
(
"
$JAVA_HOME
/bin/javac"
"
$javaClass
"
)
fi
if
[
-e
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.class"
]
;
then
# Running the downloader
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
" - Running MavenWrapperDownloader.java ..."
fi
(
"
$JAVA_HOME
/bin/java"
-cp
.mvn/wrapper MavenWrapperDownloader
"
$MAVEN_PROJECTBASEDIR
"
)
fi
fi
fi
fi
##########################################################################################
# End of extension
##########################################################################################
export
MAVEN_PROJECTBASEDIR
=
${
MAVEN_BASEDIR
:-
"
$BASE_DIR
"
}
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
$MAVEN_PROJECTBASEDIR
fi
MAVEN_OPTS
=
"
$(
concat_lines
"
$MAVEN_PROJECTBASEDIR
/.mvn/jvm.config"
)
$MAVEN_OPTS
"
# For Cygwin, switch paths to Windows format before running java
if
$cygwin
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
$(
cygpath
--path
--windows
"
$M2_HOME
"
)
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
$(
cygpath
--path
--windows
"
$JAVA_HOME
"
)
[
-n
"
$CLASSPATH
"
]
&&
CLASSPATH
=
$(
cygpath
--path
--windows
"
$CLASSPATH
"
)
[
-n
"
$MAVEN_PROJECTBASEDIR
"
]
&&
MAVEN_PROJECTBASEDIR
=
$(
cygpath
--path
--windows
"
$MAVEN_PROJECTBASEDIR
"
)
fi
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS
=
"
$MAVEN_CONFIG
$@
"
export
MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER
=
org.apache.maven.wrapper.MavenWrapperMain
exec
"
$JAVACMD
"
\
$MAVEN_OPTS
\
-classpath
"
$MAVEN_PROJECTBASEDIR
/.mvn/wrapper/maven-wrapper.jar"
\
"-Dmaven.home=
${
M2_HOME
}
"
"-Dmaven.multiModuleProjectDirectory=
${
MAVEN_PROJECTBASEDIR
}
"
\
${
WRAPPER_LAUNCHER
}
$MAVEN_CONFIG
"
$@
"
tests/examples/JDBC/mybatisplus-demo/mvnw.cmd
0 → 100644
浏览文件 @
8f46f81f
@REM ----------------------------------------------------------------------------
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM https://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM ----------------------------------------------------------------------------
@REM ----------------------------------------------------------------------------
@REM Maven Start Up Batch script
@REM
@REM Required ENV vars:
@REM JAVA_HOME - location of a JDK home dir
@REM
@REM Optional ENV vars
@REM M2_HOME - location of maven2's installed home dir
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
@REM e.g. to debug Maven itself, use
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
@REM ----------------------------------------------------------------------------
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
@echo
off
@REM set title of command window
title
%
0
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
@if
"
%MAVEN_BATCH_ECHO%
"
==
"on"
echo
%MAVEN_BATCH_ECHO%
@REM set %HOME% to equivalent of $HOME
if
"
%HOME%
"
==
""
(
set
"HOME=
%HOMEDRIVE%%
HOMEPATH
%
"
)
@REM Execute a user defined script before this one
if
not
"
%MAVEN_SKIP_RC%
"
==
""
goto
skipRcPre
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
if
exist
"
%HOME%
\mavenrc_pre.bat"
call
"
%HOME%
\mavenrc_pre.bat"
if
exist
"
%HOME%
\mavenrc_pre.cmd"
call
"
%HOME%
\mavenrc_pre.cmd"
:skipRcPre
@setlocal
set
ERROR_CODE
=
0
@REM To isolate internal variables from possible post scripts, we use another setlocal
@setlocal
@REM ==== START VALIDATION ====
if
not
"
%JAVA_HOME%
"
==
""
goto
OkJHome
echo
.
echo
Error
:
JAVA_HOME
not
found
in
your
environment
.
>&
2
echo
Please
set
the
JAVA_HOME
variable
in
your
environment
to
match
the
>&
2
echo
location
of
your
Java
installation
.
>&
2
echo
.
goto
error
:OkJHome
if
exist
"
%JAVA_HOME%
\bin\java.exe"
goto
init
echo
.
echo
Error
:
JAVA_HOME
is
set
to
an
invalid
directory
.
>&
2
echo
JAVA_HOME
=
"
%JAVA_HOME%
"
>&
2
echo
Please
set
the
JAVA_HOME
variable
in
your
environment
to
match
the
>&
2
echo
location
of
your
Java
installation
.
>&
2
echo
.
goto
error
@REM ==== END VALIDATION ====
:init
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
@REM Fallback to current working directory if not found.
set
MAVEN_PROJECTBASEDIR
=
%MAVEN_BASEDIR%
IF
NOT
"
%MAVEN_PROJECTBASEDIR%
"
==
""
goto
endDetectBaseDir
set
EXEC_DIR
=
%CD%
set
WDIR
=
%EXEC_DIR%
:findBaseDir
IF
EXIST
"
%WDIR%
"
\.mvn
goto
baseDirFound
cd
..
IF
"
%WDIR%
"
==
"
%CD%
"
goto
baseDirNotFound
set
WDIR
=
%CD%
goto
findBaseDir
:baseDirFound
set
MAVEN_PROJECTBASEDIR
=
%WDIR%
cd
"
%EXEC_DIR%
"
goto
endDetectBaseDir
:baseDirNotFound
set
MAVEN_PROJECTBASEDIR
=
%EXEC_DIR%
cd
"
%EXEC_DIR%
"
:endDetectBaseDir
IF
NOT
EXIST
"
%MAVEN_PROJECTBASEDIR%
\.mvn\jvm.config"
goto
endReadAdditionalConfig
@setlocal
EnableExtensions
EnableDelayedExpansion
for
/F
"usebackq delims="
%%a
in
(
"
%MAVEN_PROJECTBASEDIR%
\.mvn\jvm.config"
)
do
set
JVM_CONFIG_MAVEN_PROPS
=
!JVM_CONFIG_MAVEN_PROPS!
%%a
@endlocal
&
set
JVM_CONFIG_MAVEN_PROPS
=
%JVM_CONFIG_MAVEN_PROPS%
:endReadAdditionalConfig
SET
MAVEN_JAVA_EXE
=
"
%JAVA_HOME%
\bin\java.exe"
set
WRAPPER_JAR
=
"
%MAVEN_PROJECTBASEDIR%
\.mvn\wrapper\maven-wrapper.jar"
set
WRAPPER_LAUNCHER
=
org
.apache.maven.wrapper.MavenWrapperMain
set
DOWNLOAD_URL
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
FOR
/F
"tokens=1,2 delims=="
%%A
IN
(
"
%MAVEN_PROJECTBASEDIR%
\.mvn\wrapper\maven-wrapper.properties"
)
DO
(
IF
"
%%A
"
==
"wrapperUrl"
SET
DOWNLOAD_URL
=
%%B
)
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
if
exist
%WRAPPER_JAR%
(
if
"
%MVNW_VERBOSE%
"
==
"true"
(
echo
Found
%WRAPPER_JAR%
)
)
else
(
if
not
"
%MVNW_REPOURL%
"
==
""
(
SET
DOWNLOAD_URL
=
"
%MVNW_REPOURL%
/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
)
if
"
%MVNW_VERBOSE%
"
==
"true"
(
echo
Couldn
't find
%WRAPPER_JAR%
, downloading it ...
echo Downloading from:
%DOWNLOAD_URL%
)
powershell -Command "&{"
^
"$webclient = new-object System.Net.WebClient;"
^
"if (-not ([string]::IsNullOrEmpty('
%MVNW_USERNAME%
') -and [string]::IsNullOrEmpty('
%MVNW_PASSWORD%
'))) {"
^
"$webclient.Credentials = new-object System.Net.NetworkCredential('
%MVNW_USERNAME%
', '
%MVNW_PASSWORD%
');"
^
"}"
^
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('
%DOWNLOAD_URL%
', '
%WRAPPER_JAR%
')"
^
"}"
if "
%MVNW_VERBOSE%
" == "true" (
echo Finished downloading
%WRAPPER_JAR%
)
)
@REM End of extension
@REM Provide a "standardized" way to retrieve the CLI args that will
@REM work with both Windows and non-Windows executions.
set MAVEN_CMD_LINE_ARGS=
%
*
%MAVEN_JAVA_EXE%
%JVM_CONFIG_MAVEN_PROPS%
%MAVEN_OPTS%
%MAVEN_DEBUG_OPTS%
-classpath
%WRAPPER_JAR%
"-Dmaven.multiModuleProjectDirectory=
%MAVEN_PROJECTBASEDIR%
"
%WRAPPER_LAUNCHER%
%MAVEN_CONFIG%
%
*
if ERRORLEVEL 1 goto error
goto end
:error
set ERROR_CODE=1
:end
@endlocal & set ERROR_CODE=
%ERROR_CODE%
if not "
%MAVEN_SKIP_RC%
" == "" goto skipRcPost
@REM check for post script, once with legacy .bat ending and once with .cmd ending
if exist "
%HOME%
\mavenrc_post.bat" call "
%HOME%
\mavenrc_post.bat"
if exist "
%HOME%
\mavenrc_post.cmd" call "
%HOME%
\mavenrc_post.cmd"
:skipRcPost
@REM pause the script if MAVEN_BATCH_PAUSE is set to '
on
'
if "
%MAVEN_BATCH_PAUSE%
" == "on" pause
if "
%MAVEN_TERMINATE_CMD%
" == "on" exit
%ERROR_CODE%
exit /B
%ERROR_CODE%
tests/examples/JDBC/mybatisplus-demo/pom.xml
0 → 100644
浏览文件 @
8f46f81f
<?xml version="1.0" encoding="UTF-8"?>
<project
xmlns=
"http://maven.apache.org/POM/4.0.0"
xmlns:xsi=
"http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"
>
<modelVersion>
4.0.0
</modelVersion>
<parent>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-parent
</artifactId>
<version>
2.4.0
</version>
<relativePath/>
<!-- lookup parent from repository -->
</parent>
<groupId>
com.taosdata.example
</groupId>
<artifactId>
mybatisplus-demo
</artifactId>
<version>
0.0.1-SNAPSHOT
</version>
<name>
mybatisplus-demo
</name>
<description>
Demo project for tdengine
</description>
<properties>
<java.version>
1.8
</java.version>
</properties>
<dependencies>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter
</artifactId>
</dependency>
<dependency>
<groupId>
org.projectlombok
</groupId>
<artifactId>
lombok
</artifactId>
<optional>
true
</optional>
</dependency>
<dependency>
<groupId>
com.baomidou
</groupId>
<artifactId>
mybatis-plus-boot-starter
</artifactId>
<version>
3.1.2
</version>
</dependency>
<dependency>
<groupId>
com.h2database
</groupId>
<artifactId>
h2
</artifactId>
<scope>
runtime
</scope>
</dependency>
<dependency>
<groupId>
com.taosdata.jdbc
</groupId>
<artifactId>
taos-jdbcdriver
</artifactId>
<version>
2.0.11
</version>
</dependency>
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
<dependency>
<groupId>
mysql
</groupId>
<artifactId>
mysql-connector-java
</artifactId>
<version>
5.1.47
</version>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-web
</artifactId>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-devtools
</artifactId>
<scope>
runtime
</scope>
<optional>
true
</optional>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-test
</artifactId>
<scope>
test
</scope>
</dependency>
<dependency>
<groupId>
junit
</groupId>
<artifactId>
junit
</artifactId>
<version>
4.12
</version>
<scope>
test
</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-maven-plugin
</artifactId>
</plugin>
<plugin>
<groupId>
org.apache.maven.plugins
</groupId>
<artifactId>
maven-surefire-plugin
</artifactId>
<version>
2.17
</version>
<configuration>
<includes>
<include>
**/*Test.java
</include>
</includes>
<excludes>
<exclude>
**/Abstract*.java
</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo
;
import
org.mybatis.spring.annotation.MapperScan
;
import
org.springframework.boot.SpringApplication
;
import
org.springframework.boot.autoconfigure.SpringBootApplication
;
@SpringBootApplication
@MapperScan
(
"com.taosdata.example.mybatisplusdemo.mapper"
)
public
class
MybatisplusDemoApplication
{
public
static
void
main
(
String
[]
args
)
{
SpringApplication
.
run
(
MybatisplusDemoApplication
.
class
,
args
);
}
}
\ No newline at end of file
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.config
;
import
com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor
;
import
org.springframework.context.annotation.Bean
;
import
org.springframework.context.annotation.Configuration
;
@Configuration
public
class
MybatisPlusConfig
{
/** mybatis 3.4.1 pagination config start ***/
// @Bean
// public MybatisPlusInterceptor mybatisPlusInterceptor() {
// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
// return interceptor;
// }
// @Bean
// public ConfigurationCustomizer configurationCustomizer() {
// return configuration -> configuration.setUseDeprecatedExecutor(false);
// }
@Bean
public
PaginationInterceptor
paginationInterceptor
()
{
// return new PaginationInterceptor();
PaginationInterceptor
paginationInterceptor
=
new
PaginationInterceptor
();
//TODO: mybatis-plus do not support TDengine, use postgresql Dialect
paginationInterceptor
.
setDialectType
(
"postgresql"
);
return
paginationInterceptor
;
}
}
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.domain
;
import
lombok.Data
;
import
java.sql.Timestamp
;
@Data
public
class
Temperature
{
private
Timestamp
ts
;
private
float
temperature
;
private
String
location
;
private
int
tbIndex
;
}
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.domain
;
import
lombok.Data
;
import
java.sql.Timestamp
;
@Data
public
class
Weather
{
private
Timestamp
ts
;
private
float
temperature
;
private
int
humidity
;
private
String
location
;
}
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.mapper
;
import
com.baomidou.mybatisplus.core.mapper.BaseMapper
;
import
com.taosdata.example.mybatisplusdemo.domain.Temperature
;
import
org.apache.ibatis.annotations.Insert
;
import
org.apache.ibatis.annotations.Param
;
import
org.apache.ibatis.annotations.Update
;
public
interface
TemperatureMapper
extends
BaseMapper
<
Temperature
>
{
@Update
(
"CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)"
)
int
createSuperTable
();
@Update
(
"create table #{tbName} using temperature tags( #{location}, #{tbindex})"
)
int
createTable
(
@Param
(
"tbName"
)
String
tbName
,
@Param
(
"location"
)
String
location
,
@Param
(
"tbindex"
)
int
tbindex
);
@Update
(
"drop table if exists temperature"
)
void
dropSuperTable
();
@Insert
(
"insert into t${tbIndex}(ts, temperature) values(#{ts}, #{temperature})"
)
int
insertOne
(
Temperature
one
);
}
tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.mapper
;
import
com.baomidou.mybatisplus.core.mapper.BaseMapper
;
import
com.taosdata.example.mybatisplusdemo.domain.Weather
;
public
interface
WeatherMapper
extends
BaseMapper
<
Weather
>
{
}
tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml
0 → 100644
浏览文件 @
8f46f81f
spring
:
datasource
:
# driver-class-name: org.h2.Driver
# schema: classpath:db/schema-mysql.sql
# data: classpath:db/data-mysql.sql
# url: jdbc:h2:mem:test
# username: root
# password: test
# driver-class-name: com.mysql.jdbc.Driver
# url: jdbc:mysql://master:3306/test?useSSL=false
# username: root
# password: 123456
driver-class-name
:
com.taosdata.jdbc.TSDBDriver
url
:
jdbc:TAOS://localhost:6030/mp_test
user
:
root
password
:
taosdata
charset
:
UTF-8
locale
:
en_US.UTF-8
timezone
:
UTC-8
mybatis-plus
:
configuration
:
map-underscore-to-camel-case
:
false
logging
:
level
:
com
:
taosdata
:
example
:
mybatisplusdemo
:
mapper
:
debug
tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.mapper
;
import
com.baomidou.mybatisplus.core.conditions.query.QueryWrapper
;
import
com.baomidou.mybatisplus.core.metadata.IPage
;
import
com.baomidou.mybatisplus.extension.plugins.pagination.Page
;
import
com.taosdata.example.mybatisplusdemo.domain.Temperature
;
import
org.junit.After
;
import
org.junit.Assert
;
import
org.junit.Before
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringJUnit4ClassRunner
;
import
java.sql.Timestamp
;
import
java.util.HashMap
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Random
;
@RunWith
(
SpringJUnit4ClassRunner
.
class
)
@SpringBootTest
public
class
TemperatureMapperTest
{
private
static
Random
random
=
new
Random
(
System
.
currentTimeMillis
());
private
static
String
[]
locations
=
{
"北京"
,
"上海"
,
"深圳"
,
"广州"
,
"杭州"
};
@Before
public
void
before
()
{
mapper
.
dropSuperTable
();
// create table temperature
mapper
.
createSuperTable
();
// create table t_X using temperature
for
(
int
i
=
0
;
i
<
10
;
i
++)
{
mapper
.
createTable
(
"t"
+
i
,
locations
[
random
.
nextInt
(
locations
.
length
)],
i
);
}
// insert into table
int
affectRows
=
0
;
// insert 10 tables
for
(
int
i
=
0
;
i
<
10
;
i
++)
{
// each table insert 5 rows
for
(
int
j
=
0
;
j
<
5
;
j
++)
{
Temperature
one
=
new
Temperature
();
one
.
setTs
(
new
Timestamp
(
1605024000000
l
));
one
.
setTemperature
(
random
.
nextFloat
()
*
50
);
one
.
setLocation
(
"望京"
);
one
.
setTbIndex
(
i
);
affectRows
+=
mapper
.
insertOne
(
one
);
}
}
Assert
.
assertEquals
(
50
,
affectRows
);
}
@After
public
void
after
()
{
mapper
.
dropSuperTable
();
}
@Autowired
private
TemperatureMapper
mapper
;
/***
* test SelectList
* **/
@Test
public
void
testSelectList
()
{
List
<
Temperature
>
temperatureList
=
mapper
.
selectList
(
null
);
temperatureList
.
forEach
(
System
.
out
::
println
);
}
/***
* test InsertOne which is a custom metheod
* ***/
@Test
public
void
testInsert
()
{
Temperature
one
=
new
Temperature
();
one
.
setTs
(
new
Timestamp
(
1605024000000
l
));
one
.
setTemperature
(
random
.
nextFloat
()
*
50
);
one
.
setLocation
(
"望京"
);
int
affectRows
=
mapper
.
insertOne
(
one
);
Assert
.
assertEquals
(
1
,
affectRows
);
}
/***
* test SelectOne
* **/
@Test
public
void
testSelectOne
()
{
QueryWrapper
<
Temperature
>
wrapper
=
new
QueryWrapper
<>();
wrapper
.
eq
(
"location"
,
"beijing"
);
Temperature
one
=
mapper
.
selectOne
(
wrapper
);
System
.
out
.
println
(
one
);
Assert
.
assertNotNull
(
one
);
}
/***
* test select By map
* ***/
@Test
public
void
testSelectByMap
()
{
Map
<
String
,
Object
>
map
=
new
HashMap
<>();
map
.
put
(
"location"
,
"beijing"
);
List
<
Temperature
>
temperatures
=
mapper
.
selectByMap
(
map
);
Assert
.
assertEquals
(
1
,
temperatures
.
size
());
}
/***
* test selectObjs
* **/
@Test
public
void
testSelectObjs
()
{
List
<
Object
>
ts
=
mapper
.
selectObjs
(
null
);
System
.
out
.
println
(
ts
);
}
/**
* test selectC ount
* **/
@Test
public
void
testSelectCount
()
{
int
count
=
mapper
.
selectCount
(
null
);
Assert
.
assertEquals
(
5
,
count
);
}
/****
* 分页
*/
@Test
public
void
testSelectPage
()
{
IPage
page
=
new
Page
(
1
,
2
);
IPage
<
Temperature
>
temperatureIPage
=
mapper
.
selectPage
(
page
,
null
);
System
.
out
.
println
(
"total : "
+
temperatureIPage
.
getTotal
());
System
.
out
.
println
(
"pages : "
+
temperatureIPage
.
getPages
());
for
(
Temperature
temperature
:
temperatureIPage
.
getRecords
())
{
System
.
out
.
println
(
temperature
);
}
}
}
\ No newline at end of file
tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
0 → 100644
浏览文件 @
8f46f81f
package
com.taosdata.example.mybatisplusdemo.mapper
;
import
com.baomidou.mybatisplus.core.conditions.query.QueryWrapper
;
import
com.baomidou.mybatisplus.core.metadata.IPage
;
import
com.baomidou.mybatisplus.extension.plugins.pagination.Page
;
import
com.taosdata.example.mybatisplusdemo.domain.Weather
;
import
org.junit.Assert
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringJUnit4ClassRunner
;
import
java.sql.Timestamp
;
import
java.util.HashMap
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Random
;
@RunWith
(
SpringJUnit4ClassRunner
.
class
)
@SpringBootTest
public
class
WeatherMapperTest
{
private
static
Random
random
=
new
Random
(
System
.
currentTimeMillis
());
@Autowired
private
WeatherMapper
mapper
;
@Test
public
void
testSelectList
()
{
List
<
Weather
>
weathers
=
mapper
.
selectList
(
null
);
weathers
.
forEach
(
System
.
out
::
println
);
}
@Test
public
void
testInsert
()
{
Weather
one
=
new
Weather
();
one
.
setTs
(
new
Timestamp
(
1605024000000
l
));
one
.
setTemperature
(
random
.
nextFloat
()
*
50
);
one
.
setHumidity
(
random
.
nextInt
(
100
));
one
.
setLocation
(
"望京"
);
int
affectRows
=
mapper
.
insert
(
one
);
Assert
.
assertEquals
(
1
,
affectRows
);
}
@Test
public
void
testSelectOne
()
{
QueryWrapper
<
Weather
>
wrapper
=
new
QueryWrapper
<>();
wrapper
.
eq
(
"location"
,
"beijing"
);
Weather
one
=
mapper
.
selectOne
(
wrapper
);
System
.
out
.
println
(
one
);
Assert
.
assertEquals
(
12.22f
,
one
.
getTemperature
(),
0.00f
);
Assert
.
assertEquals
(
"beijing"
,
one
.
getLocation
());
}
@Test
public
void
testSelectByMap
()
{
Map
<
String
,
Object
>
map
=
new
HashMap
<>();
map
.
put
(
"location"
,
"beijing"
);
List
<
Weather
>
weathers
=
mapper
.
selectByMap
(
map
);
Assert
.
assertEquals
(
1
,
weathers
.
size
());
}
@Test
public
void
testSelectObjs
()
{
List
<
Object
>
ts
=
mapper
.
selectObjs
(
null
);
System
.
out
.
println
(
ts
);
}
@Test
public
void
testSelectCount
()
{
int
count
=
mapper
.
selectCount
(
null
);
// Assert.assertEquals(5, count);
System
.
out
.
println
(
count
);
}
@Test
public
void
testSelectPage
()
{
IPage
page
=
new
Page
(
1
,
2
);
IPage
<
Weather
>
weatherIPage
=
mapper
.
selectPage
(
page
,
null
);
System
.
out
.
println
(
"total : "
+
weatherIPage
.
getTotal
());
System
.
out
.
println
(
"pages : "
+
weatherIPage
.
getPages
());
for
(
Weather
weather
:
weatherIPage
.
getRecords
())
{
System
.
out
.
println
(
weather
);
}
}
}
\ No newline at end of file
tests/examples/c/demo.c
浏览文件 @
8f46f81f
...
...
@@ -50,10 +50,10 @@ static void queryDB(TAOS *taos, char *command) {
taos_free_result
(
pSql
);
}
void
Test
(
char
*
qstr
,
const
char
*
input
,
int
i
);
int
main
(
int
argc
,
char
*
argv
[])
{
TAOS
*
taos
;
char
qstr
[
1024
];
TAOS_RES
*
result
;
// connect to server
if
(
argc
<
2
)
{
...
...
@@ -63,41 +63,26 @@ int main(int argc, char *argv[]) {
// init TAOS
taos_init
();
taos
=
taos_connect
(
argv
[
1
],
"root"
,
"taosdata"
,
NULL
,
0
);
for
(
int
i
=
0
;
i
<
4000000
;
i
++
)
{
Test
(
qstr
,
argv
[
1
],
i
);
}
taos_cleanup
();
}
void
Test
(
char
*
qstr
,
const
char
*
input
,
int
index
)
{
TAOS
*
taos
=
taos_connect
(
input
,
"root"
,
"taosdata"
,
NULL
,
0
);
printf
(
"==================test at %d
\n
================================"
,
index
);
queryDB
(
taos
,
"drop database if exists demo"
);
queryDB
(
taos
,
"create database demo"
);
TAOS_RES
*
result
;
if
(
taos
==
NULL
)
{
printf
(
"failed to connect to server, reason:%s
\n
"
,
"null taos"
/*taos_errstr(taos)*/
);
exit
(
1
);
}
printf
(
"success to connect to server
\n
"
);
//taos_query(taos, "drop database demo");
queryDB
(
taos
,
"drop database if exists demo"
);
//result = taos_query(taos, "create database demo");
//if (result == NULL) {
// printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/);
// exit(1);
//}
queryDB
(
taos
,
"create database demo"
);
printf
(
"success to create database
\n
"
);
//taos_query(taos, "use demo");
queryDB
(
taos
,
"use demo"
);
// create table
//if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) {
// printf("failed to create table, reason:%s\n", taos_errstr(result));
// exit(1);
//}
queryDB
(
taos
,
"create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"
);
printf
(
"success to create table
\n
"
);
// sleep for one second to make sure table is created on data node
// taosMsleep(1000);
// insert 10 records
int
i
=
0
;
for
(
i
=
0
;
i
<
10
;
++
i
)
{
sprintf
(
qstr
,
"insert into m1 values (%"
PRId64
", %d, %d, %d, %d, %f, %lf, '%s')"
,
1546300800000
+
i
*
1000
,
i
,
i
,
i
,
i
*
10000000
,
i
*
1
.
0
,
i
*
2
.
0
,
"hello"
);
...
...
@@ -117,7 +102,6 @@ int main(int argc, char *argv[]) {
}
taos_free_result
(
result
);
//sleep(1);
}
printf
(
"success to insert rows, total %d rows
\n
"
,
i
);
...
...
@@ -147,5 +131,6 @@ int main(int argc, char *argv[]) {
taos_free_result
(
result
);
printf
(
"====demo end====
\n\n
"
);
return
getchar
(
);
taos_close
(
taos
);
}
tests/pytest/concurrent_inquiry.py
浏览文件 @
8f46f81f
...
...
@@ -16,112 +16,202 @@ import sys
import
json
import
time
import
random
# query sql
query_sql
=
[
# first supertable
"select count(*) from test.meters ;"
,
"select count(*) from test.meters where t3 > 2;"
,
"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';"
,
"select count(*) from test.meters where t7 like 'taos_1%';"
,
"select count(*) from test.meters where t7 like '_____2';"
,
"select count(*) from test.meters where t8 like '%思%';"
,
"select count(*) from test.meters interval(1n) order by ts desc;"
,
#"select max(c0) from test.meters group by tbname",
"select first(ts) from test.meters where t5 >5000 and t5<5100;"
,
"select last(ts) from test.meters where t5 >5000 and t5<5100;"
,
"select last_row(*) from test.meters;"
,
"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select bottom(c1, 2) from test.t1;"
,
"select diff(c1) from test.t1;"
,
"select leastsquares(c1, 1, 1) from test.t1 ;"
,
"select max(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select min(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;"
,
"select percentile(c1, 50) from test.t1;"
,
"select spread(c1) from test.t1 ;"
,
"select stddev(c1) from test.t1;"
,
"select sum(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;"
"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;"
,
"select diff(c4) from test.t1 where t5 >5000 and t5<5100;"
,
"select leastsquares(c4, 1, 1) from test.t1 ;"
,
"select max(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select min(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;"
,
"select percentile(c5, 50) from test.t1;"
,
"select spread(c5) from test.t1 ;"
,
"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;"
,
"select sum(c5) from test.meters where t5 >5000 and t5<5100;"
,
"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;"
,
#all vnode
"select count(*) from test.meters where t5 >5000 and t5<5100"
,
"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100"
,
"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100"
,
"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100"
,
"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100"
,
# second supertable
"select count(*) from test.meters1 where t3 > 2;"
,
"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';"
,
"select count(*) from test.meters where t7 like 'taos_1%';"
,
"select count(*) from test.meters where t7 like '_____2';"
,
"select count(*) from test.meters where t8 like '%思%';"
,
"select count(*) from test.meters1 interval(1n) order by ts desc;"
,
#"select max(c0) from test.meters1 group by tbname",
"select first(ts) from test.meters1 where t5 >5000 and t5<5100;"
,
"select last(ts) from test.meters1 where t5 >5000 and t5<5100;"
,
"select last_row(*) from test.meters1 ;"
,
"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;"
,
"select diff(c1) from test.m1 ;"
,
"select leastsquares(c1, 1, 1) from test.m1 ;"
,
"select max(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select min(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select c1 + c2 + c1 / c0 + c2 from test.m1 ;"
,
"select percentile(c1, 50) from test.m1;"
,
"select spread(c1) from test.m1 ;"
,
"select stddev(c1) from test.m1;"
,
"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;"
,
"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select bottom(c5, 2) from test.m1;"
,
"select diff(c5) from test.m1;"
,
"select leastsquares(c5, 1, 1) from test.m1 ;"
,
"select max(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select min(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select c5 + c2 + c4 / c5 + c0 from test.m1;"
,
"select percentile(c4, 50) from test.m1;"
,
"select spread(c4) from test.m1 ;"
,
"select stddev(c4) from test.m1;"
,
"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;"
,
"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;"
,
"select count(*) from test.meters1 where t5 >5100 and t5<5300"
,
#all vnode
"select count(*) from test.meters1 where t5 >5100 and t5<5300"
,
"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100"
,
"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100"
,
"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100"
,
"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100"
,
#join
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5",
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7",
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8",
# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8"
import
requests
from
requests.auth
import
HTTPBasicAuth
func_list
=
[
'avg'
,
'count'
,
'twa'
,
'sum'
,
'stddev'
,
'leastsquares'
,
'min'
,
'max'
,
'first'
,
'last'
,
'top'
,
'bottom'
,
'percentile'
,
'apercentile'
,
'last_row'
,
'diff'
,
'spread'
]
condition_list
=
[
"where _c0 > now -10d "
,
'interval(10s)'
,
'limit 10'
,
'group by'
,
'order by'
,
'fill(null)'
]
where_list
=
[
'_c0>now-10d'
,
' <50'
,
" like
\'
%a%
\'
"
]
class
ConcurrentInquiry
:
def
initConnection
(
self
):
self
.
numOfTherads
=
50
def
__init__
(
self
,
n_Therads
=
25
,
r_Therads
=
25
):
self
.
n_numOfTherads
=
n_Therads
self
.
r_numOfTherads
=
r_Therads
self
.
ts
=
1500000001000
self
.
dbname
=
'test'
self
.
stb_list
=
[]
self
.
subtb_list
=
[]
self
.
stb_stru_list
=
[]
self
.
subtb_stru_list
=
[]
self
.
stb_tag_list
=
[]
self
.
subtb_tag_list
=
[]
def
SetThreadsNum
(
self
,
num
):
self
.
numOfTherads
=
num
def
query_thread
(
self
,
threadID
):
host
=
"10.211.55.14"
def
ret_fcol
(
self
,
cl
,
sql
):
#返回结果的第一列
cl
.
execute
(
sql
)
fcol_list
=
[]
for
data
in
cl
:
fcol_list
.
append
(
data
[
0
])
return
fcol_list
def
r_stb_list
(
self
,
cl
):
#返回超级表列表
sql
=
'show '
+
self
.
dbname
+
'.stables'
self
.
stb_list
=
self
.
ret_fcol
(
cl
,
sql
)
def
r_subtb_list
(
self
,
cl
,
stablename
):
#每个超级表返回2个子表
sql
=
'select tbname from '
+
self
.
dbname
+
'.'
+
stablename
+
' limit 2;'
self
.
subtb_list
+=
self
.
ret_fcol
(
cl
,
sql
)
def
cal_struct
(
self
,
cl
,
tbname
):
#查看表结构
tb
=
[]
tag
=
[]
sql
=
'describe '
+
self
.
dbname
+
'.'
+
tbname
+
';'
cl
.
execute
(
sql
)
for
data
in
cl
:
if
data
[
3
]:
tag
.
append
(
data
[
0
])
else
:
tb
.
append
(
data
[
0
])
return
tb
,
tag
def
r_stb_stru
(
self
,
cl
):
#获取所有超级表的表结构
for
i
in
self
.
stb_list
:
tb
,
tag
=
self
.
cal_struct
(
cl
,
i
)
self
.
stb_stru_list
.
append
(
tb
)
self
.
stb_tag_list
.
append
(
tag
)
def
r_subtb_stru
(
self
,
cl
):
#返回所有子表的表结构
for
i
in
self
.
subtb_list
:
tb
,
tag
=
self
.
cal_struct
(
cl
,
i
)
self
.
subtb_stru_list
.
append
(
tb
)
self
.
subtb_tag_list
.
append
(
tag
)
def
get_full
(
self
):
#获取所有的表、表结构
host
=
"127.0.0.1"
user
=
"root"
password
=
"taosdata"
conn
=
taos
.
connect
(
host
,
user
,
password
,
)
cl
=
conn
.
cursor
()
self
.
r_stb_list
(
cl
)
for
i
in
self
.
stb_list
:
self
.
r_subtb_list
(
cl
,
i
)
self
.
r_stb_stru
(
cl
)
self
.
r_subtb_stru
(
cl
)
cl
.
close
()
conn
.
close
()
#query condition
def
con_where
(
self
,
tlist
):
l
=
[]
for
i
in
range
(
random
.
randint
(
0
,
len
(
tlist
))):
c
=
random
.
choice
(
where_list
)
if
c
==
'_c0>now-10d'
:
l
.
append
(
c
)
else
:
l
.
append
(
random
.
choice
(
tlist
)
+
c
)
return
'where '
+
random
.
choice
([
' and '
,
' or '
]).
join
(
l
)
def
con_interval
(
self
,
tlist
):
return
random
.
choice
([
'interval(10s)'
,
'interval(10d)'
,
'interval(1n)'
])
def
con_limit
(
self
,
tlist
):
return
random
.
choice
([
'limit 10'
,
'limit 10 offset 10'
,
'slimit 10'
,
'slimit 10 offset 10'
,
'limit 10 slimit 10'
,
'limit 10 offset 5 slimit 5 soffset 10'
])
def
con_fill
(
self
,
tlist
):
return
random
.
choice
([
'fill(null)'
,
'fill(prev)'
,
'fill(none)'
,
'fill(LINEAR)'
])
def
con_group
(
self
,
tlist
):
return
'group by '
+
random
.
choice
(
tlist
)
def
con_order
(
self
,
tlist
):
return
'order by '
+
random
.
choice
(
tlist
)
def
gen_query_sql
(
self
):
#生成查询语句
tbi
=
random
.
randint
(
0
,
len
(
self
.
subtb_list
)
+
len
(
self
.
stb_list
))
#随机决定查询哪张表
tbname
=
''
col_list
=
[]
tag_list
=
[]
is_stb
=
0
if
tbi
>
len
(
self
.
stb_list
)
:
tbi
=
tbi
-
len
(
self
.
stb_list
)
tbname
=
self
.
subtb_list
[
tbi
-
1
]
col_list
=
self
.
subtb_stru_list
[
tbi
-
1
]
tag_list
=
self
.
subtb_tag_list
[
tbi
-
1
]
else
:
tbname
=
self
.
stb_list
[
tbi
-
1
]
col_list
=
self
.
stb_stru_list
[
tbi
-
1
]
tag_list
=
self
.
stb_tag_list
[
tbi
-
1
]
is_stb
=
1
tlist
=
col_list
+
tag_list
con_rand
=
random
.
randint
(
0
,
len
(
condition_list
))
func_rand
=
random
.
randint
(
0
,
len
(
func_list
))
col_rand
=
random
.
randint
(
0
,
len
(
col_list
))
tag_rand
=
random
.
randint
(
0
,
len
(
tag_list
))
t_rand
=
random
.
randint
(
0
,
len
(
tlist
))
sql
=
'select '
#select
random
.
shuffle
(
col_list
)
random
.
shuffle
(
func_list
)
sel_col_list
=
[]
col_rand
=
random
.
randint
(
0
,
len
(
col_list
))
for
i
,
j
in
zip
(
col_list
[
0
:
col_rand
],
func_list
):
#决定每个被查询col的函数
if
j
==
'leastsquares'
:
sel_col_list
.
append
(
j
+
'('
+
i
+
',1,1)'
)
elif
j
==
'top'
or
j
==
'bottom'
or
j
==
'percentile'
or
j
==
'apercentile'
:
sel_col_list
.
append
(
j
+
'('
+
i
+
',1)'
)
else
:
sel_col_list
.
append
(
j
+
'('
+
i
+
')'
)
sql
=
sql
+
','
.
join
(
sel_col_list
)
+
' from '
+
random
.
choice
(
self
.
stb_list
+
self
.
subtb_list
)
+
' '
#select col & func
con_func
=
[
self
.
con_where
,
self
.
con_interval
,
self
.
con_limit
,
self
.
con_group
,
self
.
con_order
,
self
.
con_fill
]
sel_con
=
random
.
sample
(
con_func
,
random
.
randint
(
0
,
len
(
con_func
)))
sel_con_list
=
[]
for
i
in
sel_con
:
sel_con_list
.
append
(
i
(
tlist
))
#获取对应的条件函数
sql
+=
' '
.
join
(
sel_con_list
)
# condition
print
(
sql
)
return
sql
def
rest_query
(
self
,
sql
):
#rest 接口
host
=
"127.0.0.1"
user
=
"root"
password
=
"taosdata"
port
=
6041
url
=
"http://{}:{}/rest/sql"
.
format
(
host
,
port
)
try
:
r
=
requests
.
post
(
url
,
data
=
'use test'
,
auth
=
HTTPBasicAuth
(
'root'
,
'taosdata'
))
r
=
requests
.
post
(
url
,
data
=
sql
,
auth
=
HTTPBasicAuth
(
'root'
,
'taosdata'
))
except
:
print
(
"REST API Failure (TODO: more info here)"
)
raise
rj
=
r
.
json
()
if
(
'status'
not
in
rj
):
raise
RuntimeError
(
"No status in REST response"
)
if
rj
[
'status'
]
==
'error'
:
# clearly reported error
if
(
'code'
not
in
rj
):
# error without code
raise
RuntimeError
(
"REST error return without code"
)
errno
=
rj
[
'code'
]
# May need to massage this in the future
# print("Raising programming error with REST return: {}".format(rj))
raise
taos
.
error
.
ProgrammingError
(
rj
[
'desc'
],
errno
)
# todo: check existance of 'desc'
if
rj
[
'status'
]
!=
'succ'
:
# better be this
raise
RuntimeError
(
"Unexpected REST return status: {}"
.
format
(
rj
[
'status'
]))
nRows
=
rj
[
'rows'
]
if
(
'rows'
in
rj
)
else
0
return
nRows
def
query_thread_n
(
self
,
threadID
):
#使用原生python接口查询
host
=
"127.0.0.1"
user
=
"root"
password
=
"taosdata"
conn
=
taos
.
connect
(
...
...
@@ -135,35 +225,59 @@ class ConcurrentInquiry:
print
(
"Thread %d: starting"
%
threadID
)
while
True
:
ran_query_sql
=
query_sql
random
.
shuffle
(
ran_query_sql
)
for
i
in
ran_query_sql
:
print
(
"Thread %d : %s"
%
(
threadID
,
i
))
try
:
sql
=
self
.
gen_query_sql
()
print
(
"sql is "
,
sql
)
start
=
time
.
time
()
cl
.
execute
(
i
)
cl
.
fetchall
cl
.
execute
(
sql
)
cl
.
fetchall
()
end
=
time
.
time
()
print
(
"time cost :"
,
end
-
start
)
except
Exception
as
e
:
print
(
"Failure thread%d, sql: %s,exception: %s"
%
(
threadID
,
str
(
i
),
str
(
e
)))
exit
(
-
1
)
(
threadID
,
str
(
sql
),
str
(
e
)))
#
exit(-1)
print
(
"Thread %d: finishing"
%
threadID
)
print
(
"Thread %d: finishing"
%
threadID
)
def
query_thread_r
(
self
,
threadID
):
#使用rest接口查询
print
(
"Thread %d: starting"
%
threadID
)
while
True
:
try
:
sql
=
self
.
gen_query_sql
()
print
(
"sql is "
,
sql
)
start
=
time
.
time
()
self
.
rest_query
(
sql
)
end
=
time
.
time
()
print
(
"time cost :"
,
end
-
start
)
except
Exception
as
e
:
print
(
"Failure thread%d, sql: %s,exception: %s"
%
(
threadID
,
str
(
sql
),
str
(
e
)))
#exit(-1)
print
(
"Thread %d: finishing"
%
threadID
)
def
run
(
self
):
print
(
self
.
n_numOfTherads
,
self
.
r_numOfTherads
)
threads
=
[]
for
i
in
range
(
self
.
numOfTherads
):
thread
=
threading
.
Thread
(
target
=
self
.
query_thread
,
args
=
(
i
,))
for
i
in
range
(
self
.
n
_n
umOfTherads
):
thread
=
threading
.
Thread
(
target
=
self
.
query_thread
_n
,
args
=
(
i
,))
threads
.
append
(
thread
)
thread
.
start
()
q
=
ConcurrentInquiry
()
q
.
initConnection
()
for
i
in
range
(
self
.
r_numOfTherads
):
# for i in range(1):
thread
=
threading
.
Thread
(
target
=
self
.
query_thread_r
,
args
=
(
i
,))
threads
.
append
(
thread
)
thread
.
start
()
if
len
(
sys
.
argv
)
>
1
:
q
=
ConcurrentInquiry
(
n_Therads
=
sys
.
argv
[
1
],
r_Therads
=
sys
.
argv
[
2
])
else
:
q
=
ConcurrentInquiry
()
q
.
get_full
()
#q.gen_query_sql()
q
.
run
()
tests/pytest/cq.py
0 → 100644
浏览文件 @
8f46f81f
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
threading
import
taos
import
sys
import
json
import
time
import
random
# query sql
query_sql
=
[
# first supertable
"select count(*) from test.meters ;"
,
"select count(*) from test.meters where t3 > 2;"
,
"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';"
,
"select count(*) from test.meters where t7 like 'taos_1%';"
,
"select count(*) from test.meters where t7 like '_____2';"
,
"select count(*) from test.meters where t8 like '%思%';"
,
"select count(*) from test.meters interval(1n) order by ts desc;"
,
#"select max(c0) from test.meters group by tbname",
"select first(ts) from test.meters where t5 >5000 and t5<5100;"
,
"select last(ts) from test.meters where t5 >5000 and t5<5100;"
,
"select last_row(*) from test.meters;"
,
"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select bottom(c1, 2) from test.t1;"
,
"select diff(c1) from test.t1;"
,
"select leastsquares(c1, 1, 1) from test.t1 ;"
,
"select max(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select min(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;"
,
"select percentile(c1, 50) from test.t1;"
,
"select spread(c1) from test.t1 ;"
,
"select stddev(c1) from test.t1;"
,
"select sum(c1) from test.meters where t5 >5000 and t5<5100;"
,
"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;"
"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;"
,
"select diff(c4) from test.t1 where t5 >5000 and t5<5100;"
,
"select leastsquares(c4, 1, 1) from test.t1 ;"
,
"select max(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select min(c4) from test.meters where t5 >5000 and t5<5100;"
,
"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;"
,
"select percentile(c5, 50) from test.t1;"
,
"select spread(c5) from test.t1 ;"
,
"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;"
,
"select sum(c5) from test.meters where t5 >5000 and t5<5100;"
,
"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;"
,
#all vnode
"select count(*) from test.meters where t5 >5000 and t5<5100"
,
"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100"
,
"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100"
,
"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100"
,
"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100"
,
# second supertable
"select count(*) from test.meters1 where t3 > 2;"
,
"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';"
,
"select count(*) from test.meters where t7 like 'taos_1%';"
,
"select count(*) from test.meters where t7 like '_____2';"
,
"select count(*) from test.meters where t8 like '%思%';"
,
"select count(*) from test.meters1 interval(1n) order by ts desc;"
,
#"select max(c0) from test.meters1 group by tbname",
"select first(ts) from test.meters1 where t5 >5000 and t5<5100;"
,
"select last(ts) from test.meters1 where t5 >5000 and t5<5100;"
,
"select last_row(*) from test.meters1 ;"
,
"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;"
,
"select diff(c1) from test.m1 ;"
,
"select leastsquares(c1, 1, 1) from test.m1 ;"
,
"select max(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select min(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select c1 + c2 + c1 / c0 + c2 from test.m1 ;"
,
"select percentile(c1, 50) from test.m1;"
,
"select spread(c1) from test.m1 ;"
,
"select stddev(c1) from test.m1;"
,
"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;"
,
"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;"
,
"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000"
,
"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select bottom(c5, 2) from test.m1;"
,
"select diff(c5) from test.m1;"
,
"select leastsquares(c5, 1, 1) from test.m1 ;"
,
"select max(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select min(c5) from test.meters1 where t5 >5000 and t5<5100;"
,
"select c5 + c2 + c4 / c5 + c0 from test.m1;"
,
"select percentile(c4, 50) from test.m1;"
,
"select spread(c4) from test.m1 ;"
,
"select stddev(c4) from test.m1;"
,
"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;"
,
"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;"
,
"select count(*) from test.meters1 where t5 >5100 and t5<5300"
,
#all vnode
"select count(*) from test.meters1 where t5 >5100 and t5<5300"
,
"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100"
,
"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100"
,
"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100"
,
"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100"
,
#join
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5",
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7",
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8",
# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8"
]
class
ConcurrentInquiry
:
def
initConnection
(
self
):
self
.
numOfTherads
=
50
self
.
ts
=
1500000001000
def
SetThreadsNum
(
self
,
num
):
self
.
numOfTherads
=
num
def
query_thread
(
self
,
threadID
):
host
=
"10.211.55.14"
user
=
"root"
password
=
"taosdata"
conn
=
taos
.
connect
(
host
,
user
,
password
,
)
cl
=
conn
.
cursor
()
cl
.
execute
(
"use test;"
)
print
(
"Thread %d: starting"
%
threadID
)
while
True
:
ran_query_sql
=
query_sql
random
.
shuffle
(
ran_query_sql
)
for
i
in
ran_query_sql
:
print
(
"Thread %d : %s"
%
(
threadID
,
i
))
try
:
start
=
time
.
time
()
cl
.
execute
(
i
)
cl
.
fetchall
()
end
=
time
.
time
()
print
(
"time cost :"
,
end
-
start
)
except
Exception
as
e
:
print
(
"Failure thread%d, sql: %s,exception: %s"
%
(
threadID
,
str
(
i
),
str
(
e
)))
exit
(
-
1
)
print
(
"Thread %d: finishing"
%
threadID
)
def
run
(
self
):
threads
=
[]
for
i
in
range
(
self
.
numOfTherads
):
thread
=
threading
.
Thread
(
target
=
self
.
query_thread
,
args
=
(
i
,))
threads
.
append
(
thread
)
thread
.
start
()
q
=
ConcurrentInquiry
()
q
.
initConnection
()
q
.
run
()
tests/pytest/functions/function_arithmetic.py
0 → 100644
浏览文件 @
8f46f81f
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
'''create table test(ts timestamp, col1 int, col2 int) tags(loc nchar(20))'''
)
tdSql
.
execute
(
"create table test1 using test tags('beijing')"
)
tdSql
.
execute
(
"create table test2 using test tags('shanghai')"
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into test1 values(%d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
))
tdSql
.
execute
(
"insert into test2 values(%d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
))
# arithmetic verifacation
tdSql
.
query
(
"select 0.1 + 0.1 from test"
)
tdSql
.
checkRows
(
self
.
rowNum
*
2
)
for
i
in
range
(
self
.
rowNum
*
2
):
tdSql
.
checkData
(
0
,
0
,
0.20000000
)
tdSql
.
query
(
"select 4 * avg(col1) from test"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
22
)
tdSql
.
query
(
"select 4 * sum(col1) from test"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
440
)
tdSql
.
query
(
"select 4 * avg(col1) * sum(col2) from test"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
2420
)
tdSql
.
query
(
"select 4 * avg(col1) * sum(col2) from test group by loc"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
1210
)
tdSql
.
checkData
(
1
,
0
,
1210
)
tdSql
.
error
(
"select avg(col1 * 2)from test group by loc"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/insert/restfulInsert.py
浏览文件 @
8f46f81f
...
...
@@ -15,25 +15,28 @@ import requests
import
threading
import
random
import
time
import
argparse
class
RestfulInsert
:
def
init
(
self
):
def
__init__
(
self
,
host
,
dbname
,
threads
,
tables
,
records
,
batchSize
,
tbNamePerfix
,
outOfOrder
):
self
.
header
=
{
'Authorization'
:
'Basic cm9vdDp0YW9zZGF0YQ=='
}
self
.
url
=
"http://
127.0.0.1:6041/rest/sql"
self
.
url
=
"http://
%s:6041/rest/sql"
%
host
self
.
ts
=
1500000000000
self
.
numOfThreads
=
20
self
.
numOfTables
=
10000
self
.
recordsPerTable
=
10000
self
.
batchSize
=
1000
self
.
tableNamePerfix
=
't'
self
.
dbname
=
dbname
self
.
numOfThreads
=
threads
self
.
numOfTables
=
tables
self
.
recordsPerTable
=
records
self
.
batchSize
=
batchSize
self
.
tableNamePerfix
=
tbNamePerfix
self
.
outOfOrder
=
outOfOrder
def
createTable
(
self
,
threadID
):
tablesPerThread
=
int
(
self
.
numOfTables
/
self
.
numOfThreads
)
tablesPerThread
=
int
(
self
.
numOfTables
/
self
.
numOfThreads
)
print
(
"create table %d to %d"
%
(
tablesPerThread
*
threadID
,
tablesPerThread
*
(
threadID
+
1
)
-
1
))
for
i
in
range
(
tablesPerThread
):
tableID
=
threadID
*
tablesPerThread
name
=
'beijing'
if
tableID
%
2
==
0
else
'shanghai'
data
=
"create table
test.%s%d using test.meters tags(%d, '%s')"
%
(
self
.
tableNamePerfix
,
tableID
+
i
,
tableID
+
i
,
name
)
data
=
"create table
%s.%s%d using %s.meters tags(%d, '%s')"
%
(
self
.
dbname
,
self
.
tableNamePerfix
,
tableID
+
i
,
self
.
dbname
,
tableID
+
i
,
name
)
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
def
insertData
(
self
,
threadID
):
...
...
@@ -43,17 +46,42 @@ class RestfulInsert:
tableID
=
i
+
threadID
*
tablesPerThread
start
=
self
.
ts
for
j
in
range
(
int
(
self
.
recordsPerTable
/
self
.
batchSize
)):
data
=
"insert into test.%s%d values"
%
(
self
.
tableNamePerfix
,
tableID
)
data
=
"insert into %s.%s%d values"
%
(
self
.
dbname
,
self
.
tableNamePerfix
,
tableID
)
values
=
[]
for
k
in
range
(
self
.
batchSize
):
data
+=
"(%d, %d, %d, %d)"
%
(
start
+
j
*
self
.
batchSize
+
k
,
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
))
data
+=
"(%d, %d, %d, %d)"
%
(
start
+
j
*
self
.
batchSize
+
k
,
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
))
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
def
insertUnlimitedData
(
self
,
threadID
):
print
(
"thread %d started"
%
threadID
)
tablesPerThread
=
int
(
self
.
numOfTables
/
self
.
numOfThreads
)
while
True
:
i
=
0
start
=
self
.
ts
for
i
in
range
(
tablesPerThread
):
tableID
=
i
+
threadID
*
tablesPerThread
data
=
"insert into %s.%s%d values"
%
(
self
.
dbname
,
self
.
tableNamePerfix
,
tableID
)
values
=
[]
for
k
in
range
(
self
.
batchSize
):
values
.
append
(
"(%d, %d, %d, %d)"
%
(
start
+
j
*
self
.
batchSize
+
k
,
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
),
random
.
randint
(
1
,
100
)))
if
(
self
.
outOfOrder
==
False
):
for
k
in
range
(
len
(
values
)):
data
+=
values
[
k
]
else
:
random
.
shuffle
(
values
)
for
k
in
range
(
len
(
values
)):
data
+=
values
[
k
]
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
def
run
(
self
):
data
=
"drop database if exists
test"
data
=
"drop database if exists
%s"
%
self
.
dbname
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
data
=
"create database
test"
data
=
"create database
%s"
%
self
.
dbname
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
data
=
"create table
test.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))"
data
=
"create table
%s.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))"
%
self
.
dbname
requests
.
post
(
self
.
url
,
data
,
headers
=
self
.
header
)
threads
=
[]
...
...
@@ -70,7 +98,10 @@ class RestfulInsert:
threads
=
[]
startTime
=
time
.
time
()
for
i
in
range
(
self
.
numOfThreads
):
thread
=
threading
.
Thread
(
target
=
self
.
insertData
,
args
=
(
i
,))
if
(
self
.
recordsPerTable
!=
-
1
):
thread
=
threading
.
Thread
(
target
=
self
.
insertData
,
args
=
(
i
,))
else
:
thread
=
threading
.
Thread
(
target
=
self
.
insertUnlimitedData
,
args
=
(
i
,))
thread
.
start
()
threads
.
append
(
thread
)
...
...
@@ -78,6 +109,62 @@ class RestfulInsert:
threads
[
i
].
join
()
print
(
"inserting %d records takes %d seconds"
%
(
self
.
numOfTables
*
self
.
recordsPerTable
,
(
time
.
time
()
-
startTime
)))
ri
=
RestfulInsert
()
ri
.
init
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'-H'
,
'--host-name'
,
action
=
'store'
,
default
=
'127.0.0.1'
,
type
=
str
,
help
=
'host name to be connected (default: 127.0.0.1)'
)
parser
.
add_argument
(
'-d'
,
'--db-name'
,
action
=
'store'
,
default
=
'test'
,
type
=
str
,
help
=
'Database name to be created (default: test)'
)
parser
.
add_argument
(
'-t'
,
'--number-of-threads'
,
action
=
'store'
,
default
=
10
,
type
=
int
,
help
=
'Number of threads to create tables and insert datas (default: 10)'
)
parser
.
add_argument
(
'-T'
,
'--number-of-tables'
,
action
=
'store'
,
default
=
1000
,
type
=
int
,
help
=
'Number of tables to be created (default: 1000)'
)
parser
.
add_argument
(
'-r'
,
'--number-of-records'
,
action
=
'store'
,
default
=
1000
,
type
=
int
,
help
=
'Number of record to be created for each table (default: 1000, -1 for unlimited records)'
)
parser
.
add_argument
(
'-s'
,
'--batch-size'
,
action
=
'store'
,
default
=
'1000'
,
type
=
int
,
help
=
'Number of tables to be created (default: 1000)'
)
parser
.
add_argument
(
'-p'
,
'--table-name-prefix'
,
action
=
'store'
,
default
=
't'
,
type
=
str
,
help
=
'Number of tables to be created (default: 1000)'
)
parser
.
add_argument
(
'-o'
,
'--out-of-order'
,
action
=
'store_true'
,
help
=
'The order of test data (default: False)'
)
args
=
parser
.
parse_args
()
ri
=
RestfulInsert
(
args
.
host_name
,
args
.
db_name
,
args
.
number_of_threads
,
args
.
number_of_tables
,
args
.
number_of_records
,
args
.
batch_size
,
args
.
table_name_prefix
,
args
.
out_of_order
)
ri
.
run
()
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录