diff --git a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py index 43e281f43769f59c2384fed43d00868c10a05342..b0e25f7f25d04e65aab69a5e861361ae98a34682 100644 --- a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py +++ b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py @@ -11,28 +11,34 @@ # -*- coding: utf-8 -*- -from basic import * +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.dockerNodes import * class TDTestCase: - - def init(self): - # tdLog.debug("start to execute %s" % __file__) - - self.numOfNodes = 5 - self.dockerDir = "/data" - cluster.init(self.numOfNodes, self.dockerDir) - cluster.prepardBuild() - for i in range(self.numOfNodes): - if i == 0: - cluster.cfg("role", "1", i + 1) - else: - cluster.cfg("role", "2", i + 1) - cluster.run() - -td = TDTestCase() -td.init() - - -## usage: python3 OneMnodeMultipleVnodesTest.py - - + + updatecfgDict = {'numOfNodes': 3, '1':{'role': 1}, '2':{'role': 2}, '3':{'role': 2}} + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdLog.sleep(2) + + tdSql.query("show dnodes") + tdSql.checkData(0, 5, 'mnode') + tdSql.checkData(1, 5, 'vnode') + tdSql.checkData(2, 5, 'vnode') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/dockerCluster/insert.json b/tests/pytest/dockerCluster/insert.json deleted file mode 100644 index 60def7be5e28f5167f168735666a08db1e25ccf0..0000000000000000000000000000000000000000 --- a/tests/pytest/dockerCluster/insert.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 1, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "databases": [{ - "dbinfo": { - "name": "db", - "drop": "no", - "replica": 1, - "days": 2, - "cache": 16, - "blocks": 8, - "precision": "ms", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb", - "child_table_exists":"no", - "childtable_count": 1, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "data_source": "rand", - "insert_mode": "taosc", - "insert_rate": 0, - "insert_rows": 100000, - "interlace_rows": 100, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] - }] - }] -} diff --git a/tests/pytest/dockerCluster/taosdemoWrapper.py b/tests/pytest/dockerCluster/taosdemoWrapper.py deleted file mode 100644 index f2bd7bbc2307e2676a083c77f573609720c52450..0000000000000000000000000000000000000000 --- a/tests/pytest/dockerCluster/taosdemoWrapper.py +++ /dev/null @@ -1,142 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import os -import random -import argparse - -class taosdemoWrapper: - - def __init__(self, host, metadata, database, tables, threads, configDir, replica, - columnType, columnsPerTable, rowsPerTable, disorderRatio, disorderRange, charTypeLen): - self.host = host - self.metadata = metadata - self.database = database - self.tables = tables - self.threads = threads - self.configDir = configDir - self.replica = replica - self.columnType = columnType - self.columnsPerTable = columnsPerTable - self.rowsPerTable = rowsPerTable - self.disorderRatio = disorderRatio - self.disorderRange = disorderRange - self.charTypeLen = charTypeLen - - def run(self): - if self.metadata is None: - os.system("%staosBenchmark -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y" - % (self.host, self.database, self.tables, self.threads, self.configDir, self.replica, self.columnType, - self.rowsPerTable, self.disorderRatio, self.disorderRange, self.charTypeLen)) - else: - os.system("%staosBenchmark -f %s" % self.metadata) - - -parser = argparse.ArgumentParser() -parser.add_argument( - '-H', - '--host-name', - action='store', - default='tdnode1', - type=str, - help='host name to be connected (default: tdnode1)') -parser.add_argument( - '-f', - '--metadata', - action='store', - default=None, - type=str, - help='The meta data to execution procedure, if use -f, all other options invalid, Default is NULL') -parser.add_argument( - '-d', - '--db-name', - action='store', - default='test', - type=str, - help='Database name to be created (default: test)') -parser.add_argument( - '-t', - '--num-of-tables', - action='store', - default=10, - type=int, - help='Number of tables (default: 10000)') -parser.add_argument( - '-T', - '--num-of-threads', - action='store', - default=10, - type=int, - help='Number of rest threads (default: 10)') -parser.add_argument( - '-c', - '--config-dir', - action='store', - default='/etc/taos/', - type=str, - help='Configuration directory. (Default is /etc/taos/)') -parser.add_argument( - '-a', - '--replica', - action='store', - default=100, - type=int, - help='Set the replica parameters of the database (default: 1, min: 1, max: 3)') -parser.add_argument( - '-b', - '--column-type', - action='store', - default='int', - type=str, - help='the data_type of columns (default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP)') -parser.add_argument( - '-l', - '--num-of-cols', - action='store', - default=10, - type=int, - help='The number of columns per record (default: 10)') -parser.add_argument( - '-n', - '--num-of-rows', - action='store', - default=1000, - type=int, - help='Number of subtales per stable (default: 1000)') -parser.add_argument( - '-O', - '--disorder-ratio', - action='store', - default=0, - type=int, - help=' (0: in order, > 0: disorder ratio, default: 0)') -parser.add_argument( - '-R', - '--disorder-range', - action='store', - default=0, - type=int, - help='Out of order datas range, ms (default: 1000)') -parser.add_argument( - '-w', - '--char-type-length', - action='store', - default=16, - type=int, - help='Out of order datas range, ms (default: 16)') - -args = parser.parse_args() -taosdemo = taosdemoWrapper(args.host_name, args.metadata, args.db_name, args.num_of_tables, - args.num_of_threads, args.config_dir, args.replica, args.column_type, args.num_of_cols, - args.num_of_rows, args.disorder_ratio, args.disorder_range, args.char_type_length) -taosdemo.run() diff --git a/tests/pytest/test.py b/tests/pytest/test.py index a96ac21496431b811f26fa82091c92f6ae8ecb9a..b15c31dd23fdcf916dc9d42b015e0b2d95f2595c 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -23,6 +23,7 @@ from fabric2 import Connection from util.log import * from util.dnodes import * from util.cases import * +from util.dockerNodes import * import taos @@ -36,14 +37,17 @@ if __name__ == "__main__": logSql = True stop = 0 restart = False + docker = False + dataDir = "/data" windows = 0 - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:d:p:m:l:scghrw', [ + 'file=', 'docker=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( 'A collection of test cases written using Python') tdLog.printNoPrefix('-f Name of test case file written by Python') + tdLog.printNoPrefix('-d docker cluster test') tdLog.printNoPrefix('-p Deploy Path for Simulator') tdLog.printNoPrefix('-m Master Ip for Simulator') tdLog.printNoPrefix('-l logSql Flag') @@ -60,6 +64,10 @@ if __name__ == "__main__": if key in ['-f', '--file']: fileName = value + if key in ['-d', '--docker']: + fileName = os.path.normpath(value) + docker = True + if key in ['-p', '--path']: deployPath = value @@ -123,7 +131,40 @@ if __name__ == "__main__": host = masterIp tdLog.info("Procedures for tdengine deployed in %s" % (host)) - if windows: + if docker: + tdCases.logSql(logSql) + tdLog.info("Procedures for testing self-deployment") + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except BaseException: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + numOfNodes = ucase.updatecfgDict.get('numOfNodes') + cluster.init(numOfNodes, dataDir) + cluster.prepardBuild() + + for i in range(numOfNodes): + if ucase.updatecfgDict.get('%d' % (i + 1)) != None: + config = dict (ucase.updatecfgDict.get('%d' % (i + 1))) + print(config) + for key, value in config.items(): + print(key, value, i + 1) + cluster.cfg(key, value, i + 1) + cluster.run() + conn = cluster.conn + except Exception as e: + print(e.args) + print(str(e)) + exit(1) + tdCases.runOneLinux(conn, fileName) + elif windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") td_clinet = TDSimClient("C:\\TDengine") diff --git a/tests/pytest/dockerCluster/Dockerfile b/tests/pytest/util/Dockerfile similarity index 91% rename from tests/pytest/dockerCluster/Dockerfile rename to tests/pytest/util/Dockerfile index 437dbc65e6430deb20faa16fc78ddc07005c15ac..c9c4d79be981e45609e040bf5835e275fc446260 100644 --- a/tests/pytest/dockerCluster/Dockerfile +++ b/tests/pytest/util/Dockerfile @@ -28,8 +28,6 @@ RUN ulimit -c unlimited COPY --from=builder /root/bin/taosd /usr/bin COPY --from=builder /root/bin/tarbitrator /usr/bin -COPY --from=builder /root/bin/taosdemo /usr/bin -COPY --from=builder /root/bin/taosdump /usr/bin COPY --from=builder /root/bin/taos /usr/bin COPY --from=builder /root/cfg/taos.cfg /etc/taos/ COPY --from=builder /root/lib/libtaos.so.* /usr/lib/libtaos.so.1 diff --git a/tests/pytest/dockerCluster/buildClusterEnv.sh b/tests/pytest/util/buildClusterEnv.sh similarity index 81% rename from tests/pytest/dockerCluster/buildClusterEnv.sh rename to tests/pytest/util/buildClusterEnv.sh index 7bd92cad72c4180d5405364ebe2fbd81a8a20386..f31fafa19b4af1618d499dd8f03c133c3444a307 100755 --- a/tests/pytest/dockerCluster/buildClusterEnv.sh +++ b/tests/pytest/util/buildClusterEnv.sh @@ -34,12 +34,12 @@ done function prepareBuild { - if [ -d $CURR_DIR/../../../release ]; then + if [ -d $CURR_DIR/../../release ]; then echo release exists - rm -rf $CURR_DIR/../../../release/* + rm -rf $CURR_DIR/../../release/* fi - cd $CURR_DIR/../../../packaging + cd $CURR_DIR/../../packaging if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then @@ -47,17 +47,17 @@ function prepareBuild { echo "generating TDeninge enterprise packages" ./release.sh -v cluster -n $VERSION >> /dev/null 2>&1 - if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 fi - if [ ! -e $CURR_DIR/../../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../release/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then echo "no arbitrator install package found" exit 1 fi - cd $CURR_DIR/../../../release + cd $CURR_DIR/../../release mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR fi @@ -67,33 +67,27 @@ function prepareBuild { echo "generating TDeninge community packages" ./release.sh -v edge -n $VERSION >> /dev/null 2>&1 - if [ ! -e $CURR_DIR/../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 fi - if [ ! -e $CURR_DIR/../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + if [ ! -e $CURR_DIR/../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then echo "no arbitrator install package found" exit 1 fi - cd $CURR_DIR/../../../release + cd $CURR_DIR/../../release mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR fi fi - - rm -rf $DOCKER_DIR/*.yml - cd $CURR_DIR - - cp *.yml $DOCKER_DIR - cp Dockerfile $DOCKER_DIR } function clusterUp { echo "docker compose start" - cd $DOCKER_DIR + cd $DOCKER_DIR if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then docker_run="PACKAGE=TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-enterprise-server-$VERSION DIR2=TDengine-enterprise-arbitrator-$VERSION VERSION=$VERSION DATADIR=$DOCKER_DIR docker-compose -f docker-compose.yml " diff --git a/tests/pytest/dockerCluster/docker-compose.yml b/tests/pytest/util/docker-compose.yml similarity index 100% rename from tests/pytest/dockerCluster/docker-compose.yml rename to tests/pytest/util/docker-compose.yml diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/util/dockerNodes.py similarity index 79% rename from tests/pytest/dockerCluster/basic.py rename to tests/pytest/util/dockerNodes.py index 871d69790d328f3dcea9fdfdac27a6abc3bb14bd..634acd48960b6920b2b9f3ec5161912d4d6c0c2a 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/util/dockerNodes.py @@ -16,7 +16,7 @@ import taos class BuildDockerCluser: - def init(self, numOfNodes, dockerDir): + def init(self, numOfNodes = 3, dockerDir = "/data"): self.numOfNodes = numOfNodes self.dockerDir = dockerDir @@ -45,21 +45,25 @@ class BuildDockerCluser: "qdebugFlag":"135", "maxSQLLength":"1048576" } - cmd = "mkdir -p %s" % self.dockerDir - self.execCmd(cmd) + os.makedirs(self.dockerDir, exist_ok=True) # like "mkdir -p" + + real_path = os.path.realpath(__file__) + self.current_dir = os.path.dirname(real_path) - cmd = "cp *.yml %s" % self.dockerDir + cmd = "cp %s/node3.yml %s" % (self.current_dir, self.dockerDir) self.execCmd(cmd) - cmd = "cp Dockerfile %s" % self.dockerDir + cmd = "cp %s/Dockerfile %s" % (self.current_dir, self.dockerDir) self.execCmd(cmd) + cmd = "cp %s/docker-compose.yml %s" % (self.current_dir, self.dockerDir) + self.execCmd(cmd) # execute command, and return the output # ref: https://blog.csdn.net/wowocpp/article/details/80775650 def execCmdAndGetOutput(self, cmd): r = os.popen(cmd) - text = r.read() + text = r.read() r.close() return text @@ -85,10 +89,11 @@ class BuildDockerCluser: host = self.hostName, user = self.user, password = self.password, - config = self.configDir) + config = self.configDir) - def removeFile(self, rootDir, index, dir): + def removeFile(self, rootDir, index, dir): cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir) + print(cmd) self.execCmd(cmd) def clearEnv(self): @@ -100,8 +105,7 @@ class BuildDockerCluser: self.removeFile(self.dockerDir, i, self.dirs[2]) def createDir(self, rootDir, index, dir): - cmd = "mkdir -p %s/node%d/%s" % (rootDir, index, dir) - self.execCmd(cmd) + os.makedirs("%s/node%d/%s" % (rootDir, index, dir), exist_ok=True) # like "mkdir -p" def createDirs(self): for i in range(1, self.numOfNodes + 1): @@ -114,31 +118,28 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + print(cmd) self.execCmd(cmd) def updateLocalhosts(self): - cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts | sed 's: ::g'" - result = self.execCmdAndGetOutput(cmd) - print(result) - if result is None or result.isspace(): - print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" - display = "echo %s" % cmd - self.execCmd(display) - self.execCmd(cmd) + hosts = open('/etc/hosts', 'r') + for line in hosts: + # print(line.split()) + if line.split()[1:] == 'tdNode2': + print("*******") def deploy(self): self.clearEnv() self.createDirs() for i in range(1, self.numOfNodes + 1): self.cfg("firstEp", "tdnode1:6030", i) - for key, value in self.cfgDict.items(): self.cfg(key, value, i) def createDondes(self): - self.cursor = self.conn.cursor() - for i in range(2, self.numOfNodes + 1): + self.cursor = self.conn.cursor() + for i in range(2, self.numOfNodes + 1): + print("create dnode tdnode%d" % i) self.cursor.execute("create dnode tdnode%d" % i) def startArbitrator(self): @@ -155,10 +156,10 @@ class BuildDockerCluser: self.deploy() def run(self): - cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + cmd = "%s/buildClusterEnv.sh -n %d -v %s -d %s" % (self.current_dir, self.numOfNodes, self.getTaosdVersion(), self.dockerDir) display = "echo %s" % cmd self.execCmd(display) - self.execCmd(cmd) + self.execCmd(cmd) self.getConnection() self.createDondes() diff --git a/tests/pytest/dockerCluster/node3.yml b/tests/pytest/util/node3.yml similarity index 100% rename from tests/pytest/dockerCluster/node3.yml rename to tests/pytest/util/node3.yml