提交 4f5979bb 编写于 作者: H Haojun Liao

Merge branch 'develop' into feature/query

......@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture)[数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)
# 生成
# 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
......@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
## 生成 TDengine
## 构建 TDengine
### Linux 系统
......@@ -116,6 +116,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc:
```bash
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
......
......@@ -110,6 +110,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
You can use Jemalloc as memory allocator instead of glibc:
```
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
......
......@@ -39,7 +39,7 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
......
......@@ -1496,7 +1496,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
if (pAlterInfo->tagData.dataLen > 0) {
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
}
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
......
......@@ -512,6 +512,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");
......
......@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
// int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
......
......@@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
}
}
static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) {
ASSERT(row < pCols->numOfRows);
return dataColsKeyAt(pCols, row);
}
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
......
......@@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
SDataCols *pTarget = NULL;
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
......
......@@ -303,6 +303,8 @@ static int32_t dnodeInitStorage() {
dnodeCheckDataDirOpenned(tsDnodeDir);
taosGetDisk();
taosPrintDiskInfo();
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}
......
此差异已折叠。
......@@ -29,6 +29,9 @@
#define COMMAND_SIZE 65536
//#define DEFAULT_DUMP_FILE "taosdump.sql"
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
......@@ -1119,12 +1122,11 @@ int taosGetTableDes(
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
......@@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
......
......@@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
int64_t x = (us&0x000000FFFFFFFFFF);
x = x<<24;
pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
......
......@@ -27,18 +27,20 @@ typedef struct {
} SysDiskSize;
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
void taosGetSystemInfo();
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage);
bool taosGetProcMemory(float *memoryUsedMB);
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;
bool taosGetProcMemory(float *memoryUsedMB) ;
bool taosGetSysMemory(float *memoryUsedMB);
void taosPrintOsInfo();
int taosSystem(const char *cmd);
void taosPrintDiskInfo();
int taosSystem(const char * cmd) ;
void taosKillSystem();
bool taosGetSystemUid(char *uid);
char * taosGetCmdlineByPID(int pid);
char *taosGetCmdlineByPID(int pid);
void taosSetCoreDump();
......
......@@ -136,9 +136,6 @@ void taosPrintOsInfo() {
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
......@@ -154,6 +151,14 @@ void taosPrintOsInfo() {
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
uError("function taosKillSystem, exit!");
exit(0);
......
......@@ -506,9 +506,6 @@ void taosPrintOsInfo() {
uInfo(" os openMax: %" PRId64, tsOpenMax);
uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
......@@ -523,6 +520,14 @@ void taosPrintOsInfo() {
uInfo(" os machine: %s", buf.machine);
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
// SIGINT
uInfo("taosd will shut down soon");
......
......@@ -205,10 +205,15 @@ void taosGetSystemInfo() {
void taosPrintOsInfo() {
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
......
......@@ -7330,11 +7330,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STSBuf *pTsBuf = NULL;
if (pTsBufInfo->tsLen > 0) { // open new file to save the result
char *tsBlock = start + pTsBufInfo->tsOffset;
char* tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
pQueryAttr->vgId);
if (pTsBuf == NULL) {
code = TSDB_CODE_QRY_NO_DISKSPACE;
goto _error;
}
tsBufResetPos(pTsBuf);
bool ret = tsBufNextPos(pTsBuf);
UNUSED(ret);
......
......@@ -2,6 +2,7 @@
#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
#include "queryLog.h"
static int32_t getDataStartOffset();
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
......@@ -633,10 +634,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
int32_t r = fseek(pTSBuf->f, 0, SEEK_SET);
if (r != 0) {
qError("fseek failed, errno:%d", errno);
return -1;
}
fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
if (ws != 1) {
qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader));
return -1;
}
return 0;
}
......@@ -853,9 +859,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo);
int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
UNUSED(ret);
if (ret == -1) {
qError("fseek failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f);
UNUSED(sz);
if (sz != len) {
qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len);
tsBufDestroy(pTSBuf);
return NULL;
}
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
......@@ -863,9 +877,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
STSBufUpdateHeader(pTSBuf, &header);
if (STSBufUpdateHeader(pTSBuf, &header) < 0) {
tsBufDestroy(pTSBuf);
return NULL;
}
taosFsync(fileno(pTSBuf->f));
if (taosFsync(fileno(pTSBuf->f)) == -1) {
qError("fsync failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
return pTSBuf;
}
......
......@@ -10,6 +10,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
typedef struct ResultObj {
int32_t numOfResult;
......
......@@ -5,6 +5,10 @@
#include "taos.h"
#include "qHistogram.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
void doHistogramAddTest() {
SHistogramInfo* pHisto = NULL;
......
......@@ -6,6 +6,9 @@
#include "qAggMain.h"
#include "tcompare.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
TEST(testCase, patternMatchTest) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
......
......@@ -7,6 +7,9 @@
#include "qPercentile.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) {
tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end);
......
......@@ -6,6 +6,9 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
// simple test
void simpleTest() {
......
......@@ -9,6 +9,10 @@
#include "ttoken.h"
#include "tutil.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
namespace {
/**
*
......
......@@ -6,14 +6,17 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wsign-compare"
#include "../../client/inc/tscUtil.h"
#include "tutil.h"
#include "tvariant.h"
#include "ttokendef.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
namespace {
int32_t testValidateName(char* name) {
SStrToken token = {0};
......
......@@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) {
return -1;
}
if (realpath(wep.we_wordv[0], odir) == NULL) {
char tmp[PATH_MAX] = {0};
if (realpath(wep.we_wordv[0], tmp) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
wordfree(&wep);
return -1;
}
strcpy(odir, tmp);
wordfree(&wep);
return 0;
......
......@@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
wordfree(&full_path);
char tmp[1025] = {0};
char tmp[PATH_MAX] = {0};
if (realpath(option, tmp) != NULL) {
strcpy(option, tmp);
}
......
......@@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
# python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py -f query/queryStateWindow.py
#stream
......@@ -325,6 +327,7 @@ python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100
self.ts = 1537146000000
self.ts1 = 1537146000000000
def run(self):
# db precison ms
tdSql.prepare()
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
tdSql.execute("create table test1 using test tags('beijing', 10)")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001,1);")
tdSql.execute("insert into gtest2 values(1537146001101,2);")
tdSql.execute("insert into gtest3 values(1537146001101,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101);")
tdSql.execute("insert into gtest5 values(1537146001002,4);")
tdSql.execute("insert into gtest5 values(1537146002202,4);")
tdSql.execute("insert into gtest6 values(1537146000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000,2);")
tdSql.execute("insert into gtest7 values(1537146001000,1);")
tdSql.execute("insert into gtest7 values(1537146008000,2);")
tdSql.execute("insert into gtest7 values(1537146009000,6);")
tdSql.execute("insert into gtest7 values(1537146012000,3);")
tdSql.execute("insert into gtest7 values(1537146015000,3);")
tdSql.execute("insert into gtest7 values(1537146017000,1);")
tdSql.execute("insert into gtest7 values(1537146019000,3);")
tdSql.execute("insert into gtest8 values(1537146000002,4);")
tdSql.execute("insert into gtest8 values(1537146002202,4);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col3) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col4) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col5) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col6) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col11) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col12) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col13) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col14) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
# use db1 precision us
tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1")
tdSql.execute("use db1 ")
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table test1 using test tags('beijing')")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100000,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001000,1);")
tdSql.execute("insert into gtest2 values(1537146001101000,2);")
tdSql.execute("insert into gtest3 values(1537146001101000,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101000);")
tdSql.execute("insert into gtest5 values(1537146001002000,4);")
tdSql.execute("insert into gtest5 values(1537146002202000,4);")
tdSql.execute("insert into gtest6 values(1537146000000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000000,2);")
tdSql.execute("insert into gtest7 values(1537146001000000,1);")
tdSql.execute("insert into gtest7 values(1537146008000000,2);")
tdSql.execute("insert into gtest7 values(1537146009000000,6);")
tdSql.execute("insert into gtest7 values(1537146012000000,3);")
tdSql.execute("insert into gtest7 values(1537146015000000,3);")
tdSql.execute("insert into gtest7 values(1537146017000000,1);")
tdSql.execute("insert into gtest7 values(1537146019000000,3);")
tdSql.execute("insert into gtest8 values(1537146000002000,3);")
tdSql.execute("insert into gtest8 values(1537146001003000,4);")
tdSql.execute("insert into gtest9 values(1537146000000000,4);")
tdSql.execute("insert into gtest9 values(1537146000000001,5);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
tdSql.query("select irate(col1) from gtest8;")
tdSql.checkData(0, 0, 1/1.001)
tdSql.query("select irate(col1) from gtest9;")
tdSql.checkData(0, 0, 1000000)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
此差异已折叠。
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_1")
tdSql.checkData(0, 0, 200)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
import random
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts1 = 1593548685000
self.ts2 = 1593548785000
def run(self):
# tdSql.execute("drop database db ")
tdSql.prepare()
tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))")
node = 5
number = 10
for n in range(node):
for m in range(number):
dt= m*300000+n*60000 # collecting'frequency is 10s
args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n)
# args2=(n,self.ts2+dt,n,120+n,15+n)
tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1)
# tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2)
# interval function
tdSql.query("select avg(value) from st interval(10m)")
# print(tdSql.queryResult)
tdSql.checkRows(6)
tdSql.checkData(0, 0, "2020-07-01 04:20:00")
tdSql.checkData(1, 1, 107.4)
# subquery with interval
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
tdSql.checkData(0, 0, 109.0)
# subquery with interval and select two Column in parent query
tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
# subquery with interval and sliding
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;")
tdSql.checkData(0, 0, "2020-07-01 04:17:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));")
tdSql.checkData(0, 0, 111)
# subquery with interval and offset
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);")
tdSql.checkData(0, 0, "2020-07-01 04:21:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);")
tdSql.checkData(0, 0, 109)
# subquery with interval,sliding and group by ; parent query with interval
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;")
tdSql.checkData(0, 0, "2020-07-01 05:09:00")
tdSql.checkData(0, 1, 118)
tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);")
tdSql.checkData(1, 1, 105)
# # subquery and parent query with interval and sliding
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);")
tdSql.checkData(29, 0, "2020-07-01 05:10:00.000")
# subquery and parent query with top and bottom
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;")
tdSql.checkData(0, 1, 117)
tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;")
tdSql.checkData(0, 1, 111)
#
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));")
tdSql.checkData(0, 1, 120)
# clear env
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf wal/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100000
self.ts = 1537146000000
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))")
tdSql.execute(
'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")')
print("==============step2")
tdSql.execute(
"INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)")
for i in range(self.rowNum):
tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1))
tdSql.query("select count(ts) from dev_001 state_window(t1)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t3)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t7)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t8)")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t11)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 3)
tdSql.query("select count(ts) from dev_001 state_window(t12)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t13)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t14)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_002 state_window(t1)")
tdSql.checkRows(100000)
# with all aggregate function
tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.checkData(1, 1, 10)
tdSql.checkData(0, 2, 1)
# tdSql.checkData(0, 3, 1)
tdSql.checkData(0, 4, np.std([1,5]))
# tdSql.checkData(0, 5, 1)
tdSql.checkData(0, 6, 1)
tdSql.checkData(0, 7, 5)
tdSql.checkData(0, 8, 4)
tdSql.checkData(0, 9, 4.6)
tdSql.checkData(0, 10, 'True')
# with where
tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);")
tdSql.checkData(0, 0, 7)
tdSql.checkData(0, 1, 'True')
# error
tdSql.error("select count(*) from dev_001 state_window(t2)")
tdSql.error("select count(*) from st state_window(t3)")
tdSql.error("select count(*) from dev_001 state_window(t4)")
tdSql.error("select count(*) from dev_001 state_window(t5)")
tdSql.error("select count(*) from dev_001 state_window(t6)")
tdSql.error("select count(*) from dev_001 state_window(t10)")
tdSql.error("select count(*) from dev_001 state_window(tag2)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -46,6 +46,8 @@ class TDTestCase:
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
os.system("rm /tmp/*.sql")
os.system("taosdump --databases db -o /tmp")
tdSql.execute("drop database db")
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1601481600000
self.numberOfTables = 1
self.numberOfRecords = 15000
def run(self):
tdSql.prepare()
tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
tdSql.execute("create table t1 using st tags(0)")
currts = self.ts
finish = 0
while(finish < self.numberOfRecords):
sql = "insert into t1 values"
for i in range(finish, self.numberOfRecords):
sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
finish = i + 1
if (1048576 - len(sql)) < 16384:
break
tdSql.execute(sql)
os.system("rm /tmp/*.sql")
os.system("taosdump --databases db -o /tmp -B 32766 -L 1048576")
tdSql.execute("drop database db")
tdSql.query("show databases")
tdSql.checkRows(0)
os.system("taosdump -i /tmp")
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'db')
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'st')
tdSql.query("select count(*) from t1")
tdSql.checkData(0, 0, self.numberOfRecords)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
import datetime
import inspect
import psutil
import shutil
import json
from util.log import *
from multiprocessing import cpu_count
# TODO: fully test the function. Handle exceptions.
# Handle json format not accepted by taosdemo
class TDTaosdemoCfg:
def __init__(self):
self.insert_cfg = {
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": cpu_count(),
"thread_count_create_tbl": cpu_count(),
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 32766,
"max_sql_len": 32766,
"databases": None
}
self.db = {
"name": 'db',
"drop": 'yes',
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 6,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"cachelast": 0,
"quorum": 1,
"fsync": 3000,
"update": 0
}
self.query_cfg = {
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times": 2,
"query_mode": "taosc",
"specified_table_query": None,
"super_table_query": None
}
self.table_query = {
"query_interval": 1,
"concurrent": 3,
"sqls": None
}
self.stable_query = {
"stblname": "stb",
"query_interval": 1,
"threads": 3,
"sqls": None
}
self.sub_cfg = {
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db",
"confirm_parameter_prompt": "no",
"specified_table_query": None,
"super_table_query": None
}
self.table_sub = {
"concurrent": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stable_sub = {
"stblname": "stb",
"threads": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stbs = []
self.stb_template = {
"name": "stb",
"child_table_exists": "no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 10,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 32766,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count": 1}],
"tags": [{"type": "BIGINT", "count": 1}]
}
self.tb_query_sql = []
self.tb_query_sql_template = {
"sql": "select last_row(*) from stb_0 ",
"result": "temp/query_res0.txt"
}
self.stb_query_sql = []
self.stb_query_sql_template = {
"sql": "select last_row(ts) from xxxx",
"result": "temp/query_res2.txt"
}
self.tb_sub_sql = []
self.tb_sub_sql_template = {
"sql": "select * from stb_0 ;",
"result": "temp/subscribe_res0.txt"
}
self.stb_sub_sql = []
self.stb_sub_sql_template = {
"sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
"result": "temp/subscribe_res1.txt"
}
# The following functions are import functions for different dicts and lists
# except import_sql, all other import functions will a dict and overwrite the origional dict
# dict_in: the dict used to overwrite the target
def import_insert_cfg(self, dict_in):
self.insert_cfg = dict_in
def import_db(self, dict_in):
self.db = dict_in
def import_stbs(self, dict_in):
self.stbs = dict_in
def import_query_cfg(self, dict_in):
self.query_cfg = dict_in
def import_table_query(self, dict_in):
self.table_query = dict_in
def import_stable_query(self, dict_in):
self.stable_query = dict_in
def import_sub_cfg(self, dict_in):
self.sub_cfg = dict_in
def import_table_sub(self, dict_in):
self.table_sub = dict_in
def import_stable_sub(self, dict_in):
self.stable_sub = dict_in
def import_sql(self, Sql_in, mode):
"""used for importing the sql later used
Args:
Sql_in (dict): the imported sql dict
mode (str): the sql storing location within TDTaosdemoCfg
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
"""
if mode == 'query_table':
self.tb_query_sql = Sql_in
elif mode == 'query_stable':
self.stb_query_sql = Sql_in
elif mode == 'sub_table':
self.tb_sub_sql = Sql_in
elif mode == 'sub_stable':
self.stb_sub_sql = Sql_in
# import functions end
# The following functions are alter functions for different dicts
# Args:
# key: the key that is going to be modified
# value: the value of the key that is going to be modified
# if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
# value will not be used
def alter_insert_cfg(self, key, value):
if key == 'databases':
self.insert_cfg[key] = [
{
'dbinfo': self.db,
'super_tables': self.stbs
}
]
else:
self.insert_cfg[key] = value
def alter_db(self, key, value):
self.db[key] = value
def alter_query_tb(self, key, value):
if key == "sqls":
self.table_query[key] = self.tb_query_sql
else:
self.table_query[key] = value
def alter_query_stb(self, key, value):
if key == "sqls":
self.stable_query[key] = self.stb_query_sql
else:
self.stable_query[key] = value
def alter_query_cfg(self, key, value):
if key == "specified_table_query":
self.query_cfg["specified_table_query"] = self.table_query
elif key == "super_table_query":
self.query_cfg["super_table_query"] = self.stable_query
else:
self.table_query[key] = value
def alter_sub_cfg(self, key, value):
if key == "specified_table_query":
self.sub_cfg["specified_table_query"] = self.table_sub
elif key == "super_table_query":
self.sub_cfg["super_table_query"] = self.stable_sub
else:
self.table_query[key] = value
def alter_sub_stb(self, key, value):
if key == "sqls":
self.stable_sub[key] = self.stb_sub_sql
else:
self.stable_sub[key] = value
def alter_sub_tb(self, key, value):
if key == "sqls":
self.table_sub[key] = self.tb_sub_sql
else:
self.table_sub[key] = value
# alter function ends
# the following functions are for handling the sql lists
def append_sql_stb(self, target, value):
"""for appending sql dict into specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
value (dict): the sql dict going to be appended
"""
if target == 'insert_stbs':
self.stbs.append(value)
elif target == 'query_table':
self.tb_query_sql.append(value)
elif target == 'query_stable':
self.stb_query_sql.append(value)
elif target == 'sub_table':
self.tb_sub_sql.append(value)
elif target == 'sub_stable':
self.stb_sub_sql.append(value)
def pop_sql_stb(self, target, index):
"""for poping a sql dict from specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
index (int): the sql dict that is going to be popped
"""
if target == 'insert_stbs':
self.stbs.pop(index)
elif target == 'query_table':
self.tb_query_sql.pop(index)
elif target == 'query_stable':
self.stb_query_sql.pop(index)
elif target == 'sub_table':
self.tb_sub_sql.pop(index)
elif target == 'sub_stable':
self.stb_sub_sql.pop(index)
# sql list modification function end
# The following functions are get functions for different dicts
def get_db(self):
return self.db
def get_stb(self):
return self.stbs
def get_insert_cfg(self):
return self.insert_cfg
def get_query_cfg(self):
return self.query_cfg
def get_tb_query(self):
return self.table_query
def get_stb_query(self):
return self.stable_query
def get_sub_cfg(self):
return self.sub_cfg
def get_tb_sub(self):
return self.table_sub
def get_stb_sub(self):
return self.stable_sub
def get_sql(self, target):
"""general get function for all sql lists
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'query_table':
return self.tb_query_sql
elif target == 'query_stable':
return self.stb_query_sql
elif target == 'sub_table':
return self.tb_sub_sql
elif target == 'sub_stable':
return self.stb_sub_sql
def get_template(self, target):
"""general get function for the default sql template
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'insert_stbs':
return self.stb_template
elif target == 'query_table':
return self.tb_query_sql_template
elif target == 'query_stable':
return self.stb_query_sql_template
elif target == 'sub_table':
return self.tb_sub_sql_template
elif target == 'sub_stable':
return self.stb_sub_sql_template
else:
print(f'did not find {target}')
# the folloing are the file generation functions
"""defalut document:
generator functio for generating taosdemo json file
will assemble the dicts and dump the final json
Args:
pathName (str): the directory wanting the json file to be
fileName (str): the name suffix of the json file
Returns:
str: [pathName]/[filetype]_[filName].json
"""
def generate_insert_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/insert_{fileName}.json'
self.alter_insert_cfg('databases', None)
with open(cfgFileName, 'w') as file:
json.dump(self.insert_cfg, file)
return cfgFileName
def generate_query_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/query_{fileName}.json'
self.alter_query_tb('sqls', None)
self.alter_query_stb('sqls', None)
self.alter_query_cfg('specified_table_query', None)
self.alter_query_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.query_cfg, file)
return cfgFileName
def generate_subscribe_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/subscribe_{fileName}.json'
self.alter_sub_tb('sqls', None)
self.alter_sub_stb('sqls', None)
self.alter_sub_cfg('specified_table_query', None)
self.alter_sub_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.sub_cfg, file)
return cfgFileName
# file generation functions ends
def drop_cfg_file(self, fileName):
os.remove(f'{fileName}')
taosdemoCfg = TDTaosdemoCfg()
......@@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
tstrncpy(configDir, argv[++i], 128);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册