未验证 提交 6416277a 编写于 作者: sangshuduo's avatar sangshuduo 提交者: GitHub

[TD-13408]<test>: move tests back for master (#10599)

* restore .gitmodules for master

* Revert "[TD-13408]<test>: move tests out"

This reverts commit 16a385a8336e66c4d247ac0bc6aa07e7e1e39dc8.

* immigrate file change from stand-alone repo to TDengine

for master

* remove tests repository checkout

* remove examples in CMakeLists.txt
Co-authored-by: Ntangfangzhi <fztang@taosdata.com>
上级 ccacb186

要显示的变更太多。

To preserve performance only 1000 of 1000+ files are displayed.
...@@ -13,10 +13,3 @@ ...@@ -13,10 +13,3 @@
[submodule "deps/TSZ"] [submodule "deps/TSZ"]
path = deps/TSZ path = deps/TSZ
url = https://github.com/taosdata/TSZ.git url = https://github.com/taosdata/TSZ.git
[submodule "tests"]
path = tests
url = https://github.com/taosdata/tests
branch = master
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
...@@ -11,7 +11,11 @@ def sync_source() { ...@@ -11,7 +11,11 @@ def sync_source() {
sh ''' sh '''
cd ${WKC} cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard >/dev/null git reset --hard
git fetch
cd ${WK}
git reset --hard
git fetch
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') { if (env.CHANGE_TARGET == 'master') {
...@@ -37,64 +41,65 @@ def sync_source() { ...@@ -37,64 +41,65 @@ def sync_source() {
} }
} }
sh ''' sh '''
export TZ=Asia/Harbin
cd ${WKC} cd ${WKC}
git reset --hard
git remote prune origin git remote prune origin
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git reset --hard
git clean -dfx git clean -dfx
git submodule update --init --recursive --remote
git submodule update --init --recursive
cd ${WK}
git reset --hard
'''
sh '''
cd ${WKCT}
git reset --hard
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') { if (env.CHANGE_TARGET == 'master') {
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout master git checkout master
cd ${WKCT}
git checkout master
''' '''
} else if (env.CHANGE_TARGET == '2.0') { } else if (env.CHANGE_TARGET == '2.0') {
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout 2.0 git checkout 2.0
cd ${WKCT}
git checkout 2.0
''' '''
} else if (env.CHANGE_TARGET == '2.4') { } else if (env.CHANGE_TARGET == '2.4') {
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout 2.4 git checkout 2.4
cd ${WKCT}
git checkout 2.4
''' '''
} else { } else {
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout develop git checkout develop
cd ${WKCT}
git checkout develop
''' '''
} }
} }
sh ''' sh '''
export TZ=Asia/Harbin
cd ${WK} cd ${WK}
git pull >/dev/null git pull >/dev/null
git clean -dfx git clean -dfx
cd ${WKCT} '''
git pull >/dev/null script {
git clean -dfx if (env.CHANGE_URL =~ /\/TDengine\//) {
date sh '''
echo "match /TDengine/ repository"
cd ${WKC}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
sh '''
echo "match /TDinternal/ repository"
cd ${WK}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
} else {
sh '''
echo "unmatched reposiotry ${CHANGE_URL}"
'''
}
}
sh '''
cd ${WKC}
git submodule update --init --recursive
''' '''
} }
def pre_test() { def pre_test() {
...@@ -129,7 +134,6 @@ pipeline { ...@@ -129,7 +134,6 @@ pipeline {
environment{ environment{
WK = '/var/data/jenkins/workspace/TDinternal' WK = '/var/data/jenkins/workspace/TDinternal'
WKC = '/var/data/jenkins/workspace/TDinternal/community' WKC = '/var/data/jenkins/workspace/TDinternal/community'
WKCT = '/var/data/jenkins/workspace/TDinternal/community/tests'
LOGDIR = '/var/data/jenkins/workspace/log' LOGDIR = '/var/data/jenkins/workspace/log'
} }
stages { stages {
...@@ -248,7 +252,7 @@ pipeline { ...@@ -248,7 +252,7 @@ pipeline {
} }
parallel { parallel {
stage ('build worker08_arm32') { stage ('build worker08_arm32') {
agent {label " worker08_arm32 "} agent {label " worker08_arm32"}
steps { steps {
timeout(time: 20, unit: 'MINUTES') { timeout(time: 20, unit: 'MINUTES') {
pre_test() pre_test()
......
Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df
Subproject commit 3f295e991ef7ba7c62e686cd9e88a3744c7edf41
# generate debug version:
# mkdir debug; cd debug; cmake -DCMAKE_BUILD_TYPE=Debug ..
# generate release version:
# mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release ..
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11)
SET(CMAKE_VERBOSE_MAKEFILE ON)
ADD_SUBDIRECTORY(tsim)
ADD_SUBDIRECTORY(test/c)
ADD_SUBDIRECTORY(comparisonTest/tdengine)
### Prepare development environment
1. sudo apt install
build-essential cmake net-tools python-pip python-setuptools python3-pip
python3-setuptools valgrind psmisc curl
2. git clone <https://github.com/taosdata/TDengine>; cd TDengine
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
4. pip install ../src/connector/python ; pip3 install
../src/connector/python
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
> Note: Both Python2 and Python3 are currently supported by the Python test
> framework. Since Python2 is no longer officially supported by Python Software
> Foundation since January 1, 2020, it is recommended that subsequent test case
> development be guaranteed to run correctly on Python3.
> For Python2, please consider being compatible if appropriate without
> additional burden.
>
> If you use some new Linux distribution like Ubuntu 20.04 which already do not
> include Python2, please do not install Python2-related packages.
>
> <https://nakedsecurity.sophos.com/2020/01/03/python-is-dead-long-live-python/> 
### How to run Python test suite
1. cd \<TDengine\>/tests/pytest
2. ./smoketest.sh \# for smoke test
3. ./smoketest.sh -g \# for memory leak detection test with valgrind
4. ./fulltest.sh \# for full test
> Note1: TDengine daemon's configuration and data files are stored in
> \<TDengine\>/sim directory. As a historical design, it's same place with
> TSIM script. So after the TSIM script ran with sudo privilege, the directory
> has been used by TSIM then the python script cannot write it by a normal
> user. You need to remove the directory completely first before running the
> Python test case. We should consider using two different locations to store
> for TSIM and Python script.
> Note2: if you need to debug crash problem with a core dump, you need
> manually edit smoketest.sh or fulltest.sh to add "ulimit -c unlimited"
> before the script line. Then you can look for the core file in
> \<TDengine\>/tests/pytest after the program crash.
### How to add a new test case
**1. TSIM test cases:**
TSIM was the testing framework has been used internally. Now it still be used to run the test cases we develop in the past as a legacy system. We are turning to use Python to develop new test case and are abandoning TSIM gradually.
**2. Python test cases:**
**2.1 Please refer to \<TDengine\>/tests/pytest/insert/basic.py to add a new
test case.** The new test case must implement 3 functions, where self.init()
and self.stop() simply copy the contents of insert/basic.py and the test
logic is implemented in self.run(). You can refer to the code in the util
directory for more information.
**2.2 Edit smoketest.sh to add the path and filename of the new test case**
Note: The Python test framework may continue to be improved in the future,
hopefully, to provide more functionality and ease of writing test cases. The
method of writing the test case above does not exclude that it will also be
affected.
**2.3 What test.py does in detail:**
test.py is the entry program for test case execution and monitoring.
test.py has the following functions.
\-f --file, Specifies the test case file name to be executed
-p --path, Specifies deployment path
\-m --master, Specifies the master server IP for cluster deployment
-c--cluster, test cluster function
-s--stop, terminates all running nodes
\-g--valgrind, load valgrind for memory leak detection test
\-h--help, display help
**2.4 What util/log.py does in detail:**
log.py is quite simple, the main thing is that you can print the output in
different colors as needed. The success() should be called for successful
test case execution and the success() will print green text. The exit() will
print red text and exit the program, exit() should be called for test
failure.
**util/log.py**
...
    def info(self, info):
        printf("%s %s" % (datetime.datetime.now(), info))
 
    def sleep(self, sec):
        printf("%s sleep %d seconds" % (datetime.datetime.now(), sec))
        time.sleep(sec)
 
    def debug(self, err):
        printf("\\033[1;36m%s %s\\033[0m" % (datetime.datetime.now(), err))
 
    def success(self, info):
        printf("\\033[1;32m%s %s\\033[0m" % (datetime.datetime.now(), info))
 
    def notice(self, err):
        printf("\\033[1;33m%s %s\\033[0m" % (datetime.datetime.now(), err))
 
    def exit(self, err):
        printf("\\033[1;31m%s %s\\033[0m" % (datetime.datetime.now(), err))
        sys.exit(1)
 
    def printNoPrefix(self, info):
        printf("\\033[1;36m%s\\033[0m" % (info)
...
**2.5 What util/sql.py does in detail:**
SQL.py is mainly used to execute SQL statements to manipulate the database,
and the code is extracted and commented as follows:
**util/sql.py**
\# prepare() is mainly used to set up the environment for testing table and
data, and to set up the database db for testing. do not call prepare() if you
need to test the database operation command.
def prepare(self):
tdLog.info("prepare database:db")
self.cursor.execute('reset query cache')
self.cursor.execute('drop database if exists db')
self.cursor.execute('create database db')
self.cursor.execute('use db')
...
\# query() is mainly used to execute select statements for normal syntax input
def query(self, sql):
...
\# error() is mainly used to execute the select statement with the wrong syntax
input, the error will be caught as a reasonable behavior, if not caught it will
prove that the test failed
def error()
...
\# checkRows() is used to check the number of returned lines after calling
query(select ...) after calling the query(select ...) to check the number of
rows of returned results.
def checkRows(self, expectRows):
...
\# checkData() is used to check the returned result data after calling
query(select ...) after the query(select ...) is called, failure to meet
expectation is
def checkData(self, row, col, data):
...
\# getData() returns the result data after calling query(select ...) to return
the resulting data after calling query(select ...)
def getData(self, row, col):
...
\# execute() used to execute sql and return the number of affected rows
def execute(self, sql):
...
\# executeTimes() Multiple executions of the same sql statement
def executeTimes(self, sql, times):
...
\# CheckAffectedRows() Check if the number of affected rows is as expected
def checkAffectedRows(self, expectAffectedRows):
...
### CI submission adoption principle.
- Every commit / PR compilation must pass. Currently, the warning is treated
as an error, so the warning must also be resolved.
- Test cases that already exist must pass.
- Because CI is very important to support build and automatically test
procedure, it is necessary to manually test the test case before adding it
and do as many iterations as possible to ensure that the test case provides
stable and reliable test results when added.
> Note: In the future, according to the requirements and test development
> progress will add stress testing, performance testing, code style,
> and other features based on functional testing.
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
pipeline {
agent none
environment{
WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'slad1'}
steps {
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage('test_b1') {
agent{label 'slad2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage('test_crash_gen') {
agent{label "slad3"}
steps {
pre_test()
sh '''
cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package >/dev/null
java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh '''
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage('test_valgrind') {
agent{label "slad4"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
body: """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:red"> Failure </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}
\ No newline at end of file
datastax-java-driver {
basic.request {
timeout = 200000 seconds
}
}
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.cassandra.test</groupId>
<artifactId>cassandratest</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-plugins</artifactId>
<version>30</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.0</version>
<configuration>
<archive>
<manifest>
<mainClass>CassandraTest</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
<name>cassandratest</name>
<!-- FIXME change it to the project's website -->
<url>http://www.example.com</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-core</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-query-builder</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-mapper-runtime</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.7</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.7</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.5</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-1.2-api</artifactId>
<version>2.8.2</version>
</dependency>
</dependencies>
</project>
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.*;
import com.datastax.oss.driver.api.core.session.*;
import com.datastax.oss.driver.api.core.config.*;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
//import com.datastax.driver.core.Cluster;
//import com.datastax.driver.core.Cluster;
import java.io.BufferedWriter;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.FileReader;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Random;
import java.math.*;
import java.lang.reflect.Method;
public class CassandraTest{
public static void main(String args[]) {
// begin to parse argument
String datadir = "/home/ubuntu/testdata";
String sqlfile = "/home/ubuntu/fang/cassandra/q1.txt";
String cfgfile = "/home/ubuntu/fang/cassandra/application.conf";
boolean q4flag = false;
int numOfRows = 1000000;
int numOfFiles =0;
int numOfClients =0;
int rowsPerRequest =0;
for (int i = 0; i < args.length; ++i) {
if (args[i].equalsIgnoreCase("-dataDir")) {
if (i < args.length - 1) {
datadir = args[++i];
}
} else if (args[i].equalsIgnoreCase("-numofFiles")) {
if (i < args.length - 1) {
numOfFiles = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-rowsPerRequest")) {
if (i < args.length - 1) {
rowsPerRequest = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-writeClients")) {
if (i < args.length - 1) {
numOfClients = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-sql")) {
sqlfile = args[++i];
} else if (args[i].equalsIgnoreCase("-timetest")) {
q4flag = true;
} else if (args[i].equalsIgnoreCase("-conf")) {
cfgfile = args[++i];
}
}
// file below to make sure no timeout error
File confile = new File(cfgfile);
System.out.println("parameters\n");
if (numOfFiles >0) {
// write data
System.out.printf("----dataDir:%s\n", datadir);
System.out.printf("----numOfFiles:%d\n", numOfFiles);
System.out.printf("----numOfClients:%d\n", numOfClients);
System.out.printf("----rowsPerRequest:%d\n", rowsPerRequest);
// connect to cassandra server
System.out.printf("----connecting to cassandra server\n");
try {
CqlSession session = CqlSession.builder()
.withConfigLoader(DriverConfigLoader.fromFile(confile))
.build();
session.execute("drop keyspace if exists cassandra");
session.execute("CREATE KEYSPACE if not exists cassandra WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}");
if (q4flag) {
session.execute("create table if not exists cassandra.test (devid int, devname text, devgroup int, ts bigint, minute bigint, temperature int, humidity float ,primary key (minute,ts,devgroup,devid,devname))");
} else {
session.execute("create table if not exists cassandra.test (devid int, devname text, devgroup int, ts bigint, temperature int, humidity float ,primary key (devgroup,devid,devname,ts))");
}
session.close();
System.out.printf("----created keyspace cassandra and table test\n");
// begin to insert data
System.out.printf("----begin to insert data\n");
long startTime = System.currentTimeMillis();
int a = numOfFiles/numOfClients;
int b = numOfFiles%numOfClients;
int last = 0;
WriteThread[] writethreads = new WriteThread[numOfClients];
int[] wargs = new int[2]; // data file start, end
wargs[0] = numOfRows; //rows to be read from each file
wargs[1] = rowsPerRequest;
int fstart =0;
int fend =0;
for (int i = 0; i<numOfClients; ++i) {
if (i<b) {
fstart = last;
fend = last+a;
last = last+a+1;
writethreads[i] = new WriteThread(fstart,fend,wargs,datadir,q4flag);
System.out.printf("----Thread %d begin to write\n",i);
writethreads[i].start();
} else {
fstart = last;
fend = last+a-1;
last = last+a;
writethreads[i] = new WriteThread(fstart,fend,wargs,datadir,q4flag);
System.out.printf("----Thread %d begin to write\n",i);
writethreads[i].start();
}
}
for (int i =0; i<numOfClients; ++i) {
try {
writethreads[i].join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
long stopTime = System.currentTimeMillis();
float elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
float speeds = numOfRows*numOfFiles/elapseTime;
System.out.printf("---- insertation speed: %f Rows/Second\n",speeds);
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
} finally {
System.out.printf("---- insertion end\n");
}
// above:write part; below: read part;
} else {
// query data begin
System.out.printf("----sql command file:%s\n", sqlfile);
// connect to cassandra server
try {
CqlSession session = CqlSession.builder()
.withConfigLoader(DriverConfigLoader.fromFile(confile))
.build();
//session.execute("use cassandra;");
BufferedReader br = null;
String line = "";
try {
br = new BufferedReader(new FileReader(sqlfile));
while ((line = br.readLine()) != null && line.length()>10) {
long startTime = System.currentTimeMillis();
// begin to query one line command //
// end querying one line command
try {
ResultSet results = session.execute(line);
long icounter = 0;
for (Row row : results) {
icounter++;
}
long stopTime = System.currentTimeMillis();
float elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("----spend %f seconds to query: %s\n", elapseTime, line);
} catch (Exception ex) {
ex.printStackTrace();
System.out.printf("---- query failed!\n");
System.exit(1);
}
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
session.close();
}
} catch (Exception ex) {
ex.printStackTrace();
} finally {
System.out.println("query end:----\n");
}
} // end write or query
System.exit(0);
}// end main
}// end class
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.math.*;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.*;
import com.datastax.oss.driver.api.core.session.*;
import com.datastax.oss.driver.api.core.config.*;
public class WriteThread extends Thread {
private int[] wargs; // fstart, fend, rows to be read, rows perrequest
private String fdir;
private int fstart;
private int fend;
private boolean q4flag;
public WriteThread (int fstart, int fend,int[] wargs, String fdir, boolean q4flag) {
this.fstart = fstart;
this.fend = fend;
this.fdir = fdir;
this.wargs = wargs;
this.q4flag = q4flag;
}
// begin to insert in this thread
public void run() {
/*
// this configuration file makes sure no timeout error
File confile = new File("/home/ubuntu/fang/cassandra/application.conf");
*/
// connect to server
try {
CqlSession session = CqlSession.builder()
//.withConfigLoader(DriverConfigLoader.fromFile(confile))
.build();
//session.execute("use cassandra");
int tominute = 6000;
for (int i=fstart; i<=fend; i++) {
String csvfile;
csvfile = fdir + "/testdata"+ Integer.toString(i)+".csv";
BufferedReader br = null;
String line = "";
String cvsSplitBy = " ";
try {
br = new BufferedReader(new FileReader(csvfile));
System.out.println("---- begin to read file " +csvfile+"\n");
for (int itotalrow =0; itotalrow<wargs[0]; itotalrow=itotalrow+wargs[1]) {
String cqlstr = "BEGIN BATCH ";
for (int irow =0; irow<wargs[1]; ++irow) {
line = br.readLine();
if (line !=null) {
String[] meter = line.split(cvsSplitBy);
BigInteger tminute = new BigInteger(meter[3]);
tminute = tminute.divide(BigInteger.valueOf(tominute));
if (q4flag) {
cqlstr = cqlstr + "insert into cassandra.test (devid,devname,devgroup,ts, minute,temperature,humidity) values ";
cqlstr = cqlstr +"("+meter[0] +"," +"'" +meter[1] +"'" +"," +meter[2] +"," + meter[3] +",";
cqlstr = cqlstr +tminute.toString() +"," +meter[4] +"," +meter[5] +");";
} else {
cqlstr = cqlstr + "insert into cassandra.test (devid,devname,devgroup,ts,temperature,humidity) values ";
cqlstr = cqlstr +"("+meter[0] +"," +"'" +meter[1] +"'" +"," +meter[2] +"," + meter[3] +",";
cqlstr = cqlstr +meter[4] +"," +meter[5] +");";
}
} // if this line is not null
}//end row iteration in one batch
cqlstr = cqlstr+" APPLY BATCH;";
try {
//System.out.println(cqlstr+"----\n");
session.execute(cqlstr);
} catch (Exception ex) {
ex.printStackTrace();
}
}// end one file reading
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}//end file iteration
session.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}//end run
}//end class
select * from cassandra.test where devgroup=0 allow filtering;
select * from cassandra.test where devgroup=10 allow filtering;
select * from cassandra.test where devgroup=20 allow filtering;
select * from cassandra.test where devgroup=30 allow filtering;
select * from cassandra.test where devgroup=40 allow filtering;
select * from cassandra.test where devgroup=50 allow filtering;
select * from cassandra.test where devgroup=60 allow filtering;
select * from cassandra.test where devgroup=70 allow filtering;
select * from cassandra.test where devgroup=80 allow filtering;
select * from cassandra.test where devgroup=90 allow filtering;
select count(*) from cassandra.test where devgroup<10 allow filtering;
select count(*) from cassandra.test where devgroup<20 allow filtering;
select count(*) from cassandra.test where devgroup<30 allow filtering;
select count(*) from cassandra.test where devgroup<40 allow filtering;
select count(*) from cassandra.test where devgroup<50 allow filtering;
select count(*) from cassandra.test where devgroup<60 allow filtering;
select count(*) from cassandra.test where devgroup<70 allow filtering;
select count(*) from cassandra.test where devgroup<80 allow filtering;
select count(*) from cassandra.test where devgroup<90 allow filtering;
select count(*) from cassandra.test allow filtering;
select avg(temperature) from cassandra.test where devgroup<10 allow filtering;
select avg(temperature) from cassandra.test where devgroup<20 allow filtering;
select avg(temperature) from cassandra.test where devgroup<30 allow filtering;
select avg(temperature) from cassandra.test where devgroup<40 allow filtering;
select avg(temperature) from cassandra.test where devgroup<50 allow filtering;
select avg(temperature) from cassandra.test where devgroup<60 allow filtering;
select avg(temperature) from cassandra.test where devgroup<70 allow filtering;
select avg(temperature) from cassandra.test where devgroup<80 allow filtering;
select avg(temperature) from cassandra.test where devgroup<90 allow filtering;
select avg(temperature) from cassandra.test allow filtering;
select sum(temperature) from cassandra.test where devgroup<10 allow filtering;
select sum(temperature) from cassandra.test where devgroup<20 allow filtering;
select sum(temperature) from cassandra.test where devgroup<30 allow filtering;
select sum(temperature) from cassandra.test where devgroup<40 allow filtering;
select sum(temperature) from cassandra.test where devgroup<50 allow filtering;
select sum(temperature) from cassandra.test where devgroup<60 allow filtering;
select sum(temperature) from cassandra.test where devgroup<70 allow filtering;
select sum(temperature) from cassandra.test where devgroup<80 allow filtering;
select sum(temperature) from cassandra.test where devgroup<90 allow filtering;
select sum(temperature) from cassandra.test allow filtering;
select max(temperature) from cassandra.test where devgroup<10 allow filtering;
select max(temperature) from cassandra.test where devgroup<20 allow filtering;
select max(temperature) from cassandra.test where devgroup<30 allow filtering;
select max(temperature) from cassandra.test where devgroup<40 allow filtering;
select max(temperature) from cassandra.test where devgroup<50 allow filtering;
select max(temperature) from cassandra.test where devgroup<60 allow filtering;
select max(temperature) from cassandra.test where devgroup<70 allow filtering;
select max(temperature) from cassandra.test where devgroup<80 allow filtering;
select max(temperature) from cassandra.test where devgroup<90 allow filtering;
select max(temperature) from cassandra.test allow filtering;
select min(temperature) from cassandra.test where devgroup<10 allow filtering;
select min(temperature) from cassandra.test where devgroup<20 allow filtering;
select min(temperature) from cassandra.test where devgroup<30 allow filtering;
select min(temperature) from cassandra.test where devgroup<40 allow filtering;
select min(temperature) from cassandra.test where devgroup<50 allow filtering;
select min(temperature) from cassandra.test where devgroup<60 allow filtering;
select min(temperature) from cassandra.test where devgroup<70 allow filtering;
select min(temperature) from cassandra.test where devgroup<80 allow filtering;
select min(temperature) from cassandra.test where devgroup<90 allow filtering;
select min(temperature) from cassandra.test allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<10 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<20 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<30 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<40 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<50 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<60 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<70 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<80 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<90 group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test group by devgroup allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<10 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<20 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<30 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<40 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<50 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<60 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<70 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<80 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test where devgroup<90 group by minute allow filtering;
select count(temperature), sum(temperature), avg(temperature) from cassandra.test group by minute;
package com.taosdata.generator;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Random;
public class DataGenerator {
/*
* to simulate the change action of humidity The valid range of humidity is
* [0, 100]
*/
public static class ValueGen {
int center;
int range;
Random rand;
public ValueGen(int center, int range) {
this.center = center;
this.range = range;
this.rand = new Random();
}
double next() {
double v = this.rand.nextGaussian();
if (v < -3) {
v = -3;
}
if (v > 3) {
v = 3;
}
return (this.range / 3.00) * v + center;
}
}
// data scale
private static int timestep = 1000; // sample time interval in milliseconds
private static long dataStartTime = 1563249700000L;
private static int deviceId = 0;
private static String tagPrefix = "dev_";
// MachineNum RowsPerMachine MachinesInOneFile
public static void main(String args[]) {
int numOfDevice = 10000;
int numOfFiles = 100;
int rowsPerDevice = 10000;
String directory = "~/";
for (int i = 0; i < args.length; i++) {
if (args[i].equalsIgnoreCase("-numOfDevices")) {
if (i < args.length - 1) {
numOfDevice = Integer.parseInt(args[++i]);
} else {
System.out.println("'-numOfDevices' requires a parameter, default is 10000");
}
} else if (args[i].equalsIgnoreCase("-numOfFiles")) {
if (i < args.length - 1) {
numOfFiles = Integer.parseInt(args[++i]);
} else {
System.out.println("'-numOfFiles' requires a parameter, default is 100");
}
} else if (args[i].equalsIgnoreCase("-rowsPerDevice")) {
if (i < args.length - 1) {
rowsPerDevice = Integer.parseInt(args[++i]);
} else {
System.out.println("'-rowsPerDevice' requires a parameter, default is 10000");
}
} else if (args[i].equalsIgnoreCase("-dataDir")) {
if (i < args.length - 1) {
directory = args[++i];
} else {
System.out.println("'-dataDir' requires a parameter, default is ~/testdata");
}
}
}
System.out.println("parameters");
System.out.printf("----dataDir:%s\n", directory);
System.out.printf("----numOfFiles:%d\n", numOfFiles);
System.out.printf("----numOfDevice:%d\n", numOfDevice);
System.out.printf("----rowsPerDevice:%d\n", rowsPerDevice);
int numOfDevPerFile = numOfDevice / numOfFiles;
long ts = dataStartTime;
// deviceId, time stamp, humid(int), temp(double), tagString(dev_deviceid)
int humidityDistRadius = 35;
int tempDistRadius = 17;
for (int i = 0; i < numOfFiles; ++i) { // prepare the data file
dataStartTime = ts;
// generate file name
String path = directory;
try {
path += "/testdata" + String.valueOf(i) + ".csv";
getDataInOneFile(path, rowsPerDevice, numOfDevPerFile, humidityDistRadius, tempDistRadius);
} catch (IOException e) {
e.printStackTrace();
}
}
}
private static void getDataInOneFile(String path, int rowsPerDevice, int num, int humidityDistRadius, int tempDistRadius) throws IOException {
DecimalFormat df = new DecimalFormat("0.0000");
long startTime = dataStartTime;
FileWriter fw = new FileWriter(new File(path));
BufferedWriter bw = new BufferedWriter(fw);
for (int i = 0; i < num; ++i) {
deviceId += 1;
Random rand = new Random();
double centralVal = Math.abs(rand.nextInt(100));
if (centralVal < humidityDistRadius) {
centralVal = humidityDistRadius;
}
if (centralVal + humidityDistRadius > 100) {
centralVal = 100 - humidityDistRadius;
}
DataGenerator.ValueGen humidityDataGen = new DataGenerator.ValueGen((int) centralVal, humidityDistRadius);
dataStartTime = startTime;
centralVal = Math.abs(rand.nextInt(22));
DataGenerator.ValueGen tempDataGen = new DataGenerator.ValueGen((int) centralVal, tempDistRadius);
for (int j = 0; j < rowsPerDevice; ++j) {
int humidity = (int) humidityDataGen.next();
double temp = tempDataGen.next();
int deviceGroup = deviceId % 100;
StringBuffer sb = new StringBuffer();
sb.append(deviceId).append(" ").append(tagPrefix).append(deviceId).append(" ").append(deviceGroup)
.append(" ").append(dataStartTime).append(" ").append(humidity).append(" ")
.append(df.format(temp));
bw.write(sb.toString());
bw.write("\n");
dataStartTime += timestep;
}
}
bw.close();
fw.close();
System.out.printf("file:%s generated\n", path);
}
}
package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb1-client/v2"
)
type ProArgs struct {
host string
username string
password string
db string
sql string
dataDir string
filesNum int
writeClients int
rowsPerRequest int
}
type WriteInfo struct {
threadId int
sID int
eID int
}
type StatisInfo struct {
totalRows int64
}
var statis StatisInfo
func main() {
// Configuration
var arguments ProArgs
// Parse options
flag.StringVar(&(arguments.host), "host", "http://localhost:8086", "Server host to connect")
flag.StringVar(&(arguments.db), "db", "db", "DB to insert data")
flag.StringVar(&(arguments.username), "user", "", "Username used to connect to server")
flag.StringVar(&(arguments.password), "pass", "", "Password used to connect to server")
flag.StringVar(&(arguments.sql), "sql", "./sqlCmd.txt", "File name of SQL commands")
flag.StringVar(&(arguments.dataDir), "dataDir", "./testdata", "Raw csv data")
flag.IntVar(&(arguments.filesNum), "numOfFiles", 10, "Number of files int dataDir ")
flag.IntVar(&(arguments.writeClients), "writeClients", 0, "Number of write clients")
flag.IntVar(&(arguments.rowsPerRequest), "rowsPerRequest", 100, "Number of rows per request")
flag.Parse()
statis.totalRows = 0
if arguments.writeClients > 0 {
writeData(&arguments)
} else {
readData(&arguments)
}
}
func writeData(arguments *ProArgs) {
log.Println("write data")
log.Println("---- writeClients:", arguments.writeClients)
log.Println("---- dataDir:", arguments.dataDir)
log.Println("---- numOfFiles:", arguments.filesNum)
log.Println("---- rowsPerRequest:", arguments.rowsPerRequest)
var wg sync.WaitGroup
wg.Add(arguments.writeClients)
st := time.Now()
a := arguments.filesNum / arguments.writeClients
b := arguments.filesNum % arguments.writeClients
last := 0
for i := 0; i < arguments.writeClients; i++ {
var wInfo WriteInfo
wInfo.threadId = i + 1
wInfo.sID = last
if i < b {
wInfo.eID = last + a
} else {
wInfo.eID = last + a - 1
}
last = wInfo.eID + 1
go writeDataImp(&wInfo, &wg, arguments)
}
wg.Wait()
elapsed := time.Since(st)
seconds := float64(elapsed) / float64(time.Second)
log.Println("---- Spent", seconds, "seconds to insert", statis.totalRows, "records, speed:", float64(statis.totalRows)/seconds, "Rows/Second")
}
func writeDataImp(wInfo *WriteInfo, wg *sync.WaitGroup, arguments *ProArgs) {
defer wg.Done()
log.Println("Thread", wInfo.threadId, "writing sID", wInfo.sID, "eID", wInfo.eID)
// Connect to the server
conn, err := client.NewHTTPClient(client.HTTPConfig{
Addr: arguments.host,
Username: arguments.username,
Password: arguments.password,
Timeout: 300 * time.Second,
})
if err != nil {
log.Fatal(err)
}
defer conn.Close()
// Create database
_, err = queryDB(conn, fmt.Sprintf("create database %s", arguments.db), arguments.db)
if err != nil {
log.Fatal(err)
}
// Write data
counter := 0
totalRecords := 0
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: arguments.db,
Precision: "ms",
})
if err != nil {
log.Fatal(err)
}
for j := wInfo.sID; j <= wInfo.eID; j++ {
fileName := fmt.Sprintf("%s/testdata%d.csv", arguments.dataDir, j)
fs, err := os.Open(fileName)
if err != nil {
log.Printf("failed to open file %s", fileName)
log.Fatal(err)
}
log.Printf("open file %s success", fileName)
bfRd := bufio.NewReader(fs)
for {
sline, err := bfRd.ReadString('\n')
if err != nil {
break
}
sline = strings.TrimSuffix(sline, "\n")
s := strings.Split(sline, " ")
if len(s) != 6 {
continue
}
// Create a point and add to batch
tags := map[string]string{
"devid": s[0],
"devname": s[1],
"devgroup": s[2],
}
timestamp, _ := strconv.ParseInt(s[3], 10, 64)
temperature, _ := strconv.ParseInt(s[4], 10, 32)
humidity, _ := strconv.ParseFloat(s[5], 64)
fields := map[string]interface{}{
"temperature": temperature,
"humidity": humidity,
}
pt, err := client.NewPoint("devices", tags, fields, time.Unix(0, timestamp * int64(time.Millisecond)))
if err != nil {
log.Fatalln("Error: ", err)
}
bp.AddPoint(pt)
counter++
if counter >= arguments.rowsPerRequest {
if err := conn.Write(bp); err != nil {
log.Fatal(err)
}
totalRecords += counter
counter = 0
bp, err = client.NewBatchPoints(client.BatchPointsConfig{
Database: arguments.db,
Precision: "ms",
})
if err != nil {
log.Fatal(err)
}
}
}
fs.Close()
}
totalRecords += counter
if counter > 0 {
if err := conn.Write(bp); err != nil {
log.Fatal(err)
}
}
atomic.AddInt64(&statis.totalRows, int64(totalRecords))
}
func readData(arguments *ProArgs) {
log.Println("read data")
log.Println("---- sql:", arguments.sql)
conn, err := client.NewHTTPClient(client.HTTPConfig{
Addr: arguments.host,
Username: arguments.username,
Password: arguments.password,
Timeout: 300 * time.Second,
})
if err != nil {
log.Fatal(err)
}
defer conn.Close()
fs, err := os.Open(arguments.sql)
if err != nil {
log.Printf("failed to open file %s", arguments.sql)
log.Fatal(err)
}
log.Printf("open file %s success", arguments.sql)
bfRd := bufio.NewReader(fs)
for {
sline, err := bfRd.ReadString('\n')
if err != nil {
break
}
sline = strings.TrimSuffix(sline, "\n")
st := time.Now()
_, err = queryDB(conn, sline, arguments.db)
if err != nil {
log.Fatal(err)
}
elapsed := time.Since(st)
seconds := float64(elapsed) / float64(time.Second)
log.Println("---- Spent", seconds, "seconds to query ", sline)
}
}
func queryDB(conn client.Client, cmd string, db string) (res []client.Result, err error) {
query := client.Query{
Command: cmd,
Database: db,
}
response, err := conn.Query(query)
if err == nil {
if response.Error() != nil {
return res, response.Error()
}
res = response.Results
} else {
return res, err
}
return res, nil
}
select * from devices where devgroup='0';
select * from devices where devgroup='10';
select * from devices where devgroup='20';
select * from devices where devgroup='30';
select * from devices where devgroup='40';
select * from devices where devgroup='50';
select * from devices where devgroup='60';
select * from devices where devgroup='70';
select * from devices where devgroup='80';
select * from devices where devgroup='90';
select count(temperature) from devices where devgroup=~/[1-1][0-9]/;
select count(temperature) from devices where devgroup=~/[1-2][0-9]/;
select count(temperature) from devices where devgroup=~/[1-3][0-9]/;
select count(temperature) from devices where devgroup=~/[1-4][0-9]/;
select count(temperature) from devices where devgroup=~/[1-5][0-9]/;
select count(temperature) from devices where devgroup=~/[1-6][0-9]/;
select count(temperature) from devices where devgroup=~/[1-7][0-9]/;
select count(temperature) from devices where devgroup=~/[1-8][0-9]/;
select count(temperature) from devices where devgroup=~/[1-9][0-9]/;
select count(temperature) from devices;
select mean(temperature) from devices where devgroup=~/[1-1][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-2][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-3][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-4][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-5][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-6][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-7][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-8][0-9]/;
select mean(temperature) from devices where devgroup=~/[1-9][0-9]/;
select mean(temperature) from devices;
select sum(temperature) from devices where devgroup=~/[1-1][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-2][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-3][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-4][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-5][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-6][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-7][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-8][0-9]/;
select sum(temperature) from devices where devgroup=~/[1-9][0-9]/;
select sum(temperature) from devices;
select max(temperature) from devices where devgroup=~/[1-1][0-9]/;
select max(temperature) from devices where devgroup=~/[1-2][0-9]/;
select max(temperature) from devices where devgroup=~/[1-3][0-9]/;
select max(temperature) from devices where devgroup=~/[1-4][0-9]/;
select max(temperature) from devices where devgroup=~/[1-5][0-9]/;
select max(temperature) from devices where devgroup=~/[1-6][0-9]/;
select max(temperature) from devices where devgroup=~/[1-7][0-9]/;
select max(temperature) from devices where devgroup=~/[1-8][0-9]/;
select max(temperature) from devices where devgroup=~/[1-9][0-9]/;
select max(temperature) from devices;
select min(temperature) from devices where devgroup=~/[1-1][0-9]/;
select min(temperature) from devices where devgroup=~/[1-2][0-9]/;
select min(temperature) from devices where devgroup=~/[1-3][0-9]/;
select min(temperature) from devices where devgroup=~/[1-4][0-9]/;
select min(temperature) from devices where devgroup=~/[1-5][0-9]/;
select min(temperature) from devices where devgroup=~/[1-6][0-9]/;
select min(temperature) from devices where devgroup=~/[1-7][0-9]/;
select min(temperature) from devices where devgroup=~/[1-8][0-9]/;
select min(temperature) from devices where devgroup=~/[1-9][0-9]/;
select min(temperature) from devices;
select spread(temperature) from devices where devgroup=~/[1-1][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-2][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-3][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-4][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-5][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-6][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-7][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-8][0-9]/;
select spread(temperature) from devices where devgroup=~/[1-9][0-9]/;
select spread(temperature) from devices;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-1][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-2][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-3][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-4][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-5][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-6][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-7][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-8][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-9][0-9]/ group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices group by devgroup;
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-1][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-2][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-3][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-4][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-5][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-6][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-7][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-8][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices where devgroup=~/[1-9][0-9]/ group by time(1m);
select count(temperature), sum(temperature), mean(temperature) from devices group by time(1m);
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.opentsdb.test</groupId>
<artifactId>opentsdbtest</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>jar</packaging>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-plugins</artifactId>
<version>30</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.0</version>
<configuration>
<archive>
<manifest>
<mainClass>OpentsdbTest</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
<name>opentsdbtest</name>
<!-- FIXME change it to the project's website -->
<url>http://www.example.com</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>net.opentsdb</groupId>
<artifactId>opentsdb_gwt_theme</artifactId>
<version>1.0.0</version>
</dependency>
<dependency>
<groupId>net.opentsdb</groupId>
<artifactId>opentsdb</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.0-jre</version>
</dependency>
<dependency>
<groupId>com.google.gwt</groupId>
<artifactId>gwt-user</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>2.9.10</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>2.9.10</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.10.0.pr1</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<version>3.10.6.Final</version>
</dependency>
<dependency>
<groupId>com.stumbleupon</groupId>
<artifactId>async</artifactId>
<version>1.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.4.1</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-jexl</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.7</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>4.3</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpasyncclient</artifactId>
<version>4.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.13</version>
</dependency>
<dependency>
<groupId>org.jgrapht</groupId>
<artifactId>jgrapht-core</artifactId>
<version>0.9.1</version>
</dependency>
<dependency>
<groupId>com.esotericsoftware.kryo</groupId>
<artifactId>kryo</artifactId>
<version>2.21.1</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-core</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-query-builder</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-mapper-runtime</artifactId>
<version>4.1.0</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.7</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-1.2-api</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.0</version>
</dependency>
<dependency>
<groupId>com.github.eulery</groupId>
<artifactId>opentsdb-java-sdk</artifactId>
<version>1.1.4</version>
</dependency>
</dependencies>
<profiles>
<profile>
<id>hbase</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.hbase</groupId>
<artifactId>asynchbase</artifactId>
<version>1.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.14</version>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>jline</groupId>
<artifactId>jline</artifactId>
</exclusion>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>
</profiles>
</project>
import com.stumbleupon.async.Callback;
import com.stumbleupon.async.Deferred;
import net.opentsdb.core.TSDB;
import net.opentsdb.uid.NoSuchUniqueName;
import net.opentsdb.uid.UniqueId.UniqueIdType;
import net.opentsdb.utils.Config;
import java.net.URL;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.client.ResponseHandler;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import java.io.BufferedWriter;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.FileReader;
import java.io.IOException;
import java.nio.charset.Charset;
import java.text.DecimalFormat;
import java.util.Random;
import java.util.ArrayList;
import java.util.List;
import java.util.LinkedHashMap;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.*;
import java.math.*;
import java.lang.reflect.Method;
import org.apache.log4j.Logger;
import org.apache.log4j.LogManager;
import org.apache.log4j.Level;
public class OpentsdbTest{
//static { System.setProperty("logback.configurationFile", "/home/ubuntu/fang/opentsdb/opentsdbtest/logback.xml");}
static { System.setProperty("logback.configurationFile", "/etc/opentsdb/logback.xml");}
public static void main(String args[]) {
Logger logger = LogManager.getLogger(OpentsdbTest.class);
logger.setLevel(Level.OFF);
// begin to parse argument
String datadir = "/home/ubuntu/testdata";
String sqlchoice = "q1";
int numOfRows = 1000000;
int numOfFiles = 0;
int numOfClients = 1;
int rowsPerRequest = 1;
for (int i = 0; i < args.length; ++i) {
if (args[i].equalsIgnoreCase("-dataDir")) {
if (i < args.length - 1) {
datadir = args[++i];
}
} else if (args[i].equalsIgnoreCase("-numofFiles")) {
if (i < args.length - 1) {
numOfFiles = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-rowsPerRequest")) {
if (i < args.length - 1) {
rowsPerRequest = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-writeClients")) {
if (i < args.length - 1) {
numOfClients = Integer.parseInt(args[++i]);
}
} else if (args[i].equalsIgnoreCase("-sql")) {
sqlchoice = args[++i];
}
}
System.out.println("parameters:\n");
if (numOfFiles >0) {
// write data
System.out.printf("----dataDir:%s\n", datadir);
System.out.printf("----numOfFiles:%d\n", numOfFiles);
System.out.printf("----numOfClients:%d\n", numOfClients);
System.out.printf("----rowsPerRequest:%d\n", rowsPerRequest);
try {
// begin to insert data
System.out.printf("----begin to insert data\n");
long startTime = System.currentTimeMillis();
int a = numOfFiles/numOfClients;
int b = numOfFiles%numOfClients;
int last = 0;
WriteThread[] writethreads = new WriteThread[numOfClients];
int[] wargs = new int[2]; // data file start, end
wargs[0] = numOfRows; //rows to be read from each file
wargs[1] = rowsPerRequest;
int fstart =0;
int fend =0;
for (int i = 0; i<numOfClients; ++i) {
if (i<b) {
fstart = last;
fend = last+a;
last = last+a+1;
writethreads[i] = new WriteThread(fstart,fend,wargs,datadir);
System.out.printf("----Thread %d begin to write\n",i);
writethreads[i].start();
} else {
fstart = last;
fend = last+a-1;
last = last+a;
writethreads[i] = new WriteThread(fstart,fend,wargs,datadir);
System.out.printf("----Thread %d begin to write\n",i);
writethreads[i].start();
}
}
for (int i =0; i<numOfClients; ++i) {
try {
writethreads[i].join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
long stopTime = System.currentTimeMillis();
float elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
float speeds = numOfRows*numOfFiles/elapseTime;
System.out.printf("---- insertation speed: %f Rows/Second\n",speeds);
} catch (Exception ex) {
ex.printStackTrace();
System.exit(1);
} finally {
System.out.printf("---- insertion end\n");
}
// above:write part; below: read part;
} else {
try (CloseableHttpClient httpclient = HttpClients.createDefault()) {
String filter_reg;
String get_url;
long startTime;
long stopTime;
float elapseTime;
CloseableHttpResponse responseBody;
StringEntity stringEntity;
HttpPost httpPost;
String qjson;
for (int ig = 10; ig <110; ig = ig+10) {
if (ig == 10) {
filter_reg = "\\b[0-9]\\b";
} else {
filter_reg = "\\b" + "([0-9]|"
+ "[" + "1" + "-"
+ Integer.toString(ig/10-1) + "][0-9])" +"\\b";
}
switch (sqlchoice) {
case "q1":
get_url = "http://127.0.0.1:4242/api/query?";
/*
get_url = get_url + "start=1563249700&m=none:temperature{devgroup=";
get_url = get_url + String.valueOf(ig-10) +"}";
*/
startTime = System.currentTimeMillis();
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"none\",\n" +
" \"metric\": \"temperature\",\n" +
" \"tags\": {\n" +
" \"devgroup\": " + "\"" + Integer.toString(ig-10) + "\"" + "\n" +
" }\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
/*
System.out.println(responseBody.getStatusLine());
System.out.println(qjson);
*/
responseBody.close();
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to get data when devgroup = %d\n",elapseTime, ig-10);
break;
case "q2":
//count
startTime = System.currentTimeMillis();
get_url = "http://127.0.0.1:4242/api/query?";
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"count\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupby\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to count data when devgroup < %d\n",elapseTime, ig);
responseBody.close();
//avg
startTime = System.currentTimeMillis();
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"avg\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupby\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to avg data when devgroup < %d\n",elapseTime, ig);
responseBody.close();
//sum
startTime = System.currentTimeMillis();
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"sum\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" +",\n" +
" \"groupby\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to sum data when devgroup < %d\n",elapseTime, ig);
responseBody.close();
//max
startTime = System.currentTimeMillis();
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"max\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupby\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to max data when devgroup < %d\n",elapseTime, ig);
responseBody.close();
//min
startTime = System.currentTimeMillis();
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"min\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupby\": false\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
responseBody.close();
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to min data when devgroup < %d\n",elapseTime, ig);
responseBody.close();
break;
case "q3":
startTime = System.currentTimeMillis();
get_url = "http://127.0.0.1:4242/api/query?";
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"count\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupBy\": true\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"aggregator\": \"sum\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupBy\": true\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"aggregator\": \"avg\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupBy\": true\n" +
" }\n" +
" ]\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
/*
System.out.println(responseBody.getStatusLine());
System.out.println(qjson);
*/
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to group data by devgroup when devgroup < %d\n",elapseTime, ig);
responseBody.close();
break;
case "q4":
startTime = System.currentTimeMillis();
get_url = "http://127.0.0.1:4242/api/query?";
httpPost = new HttpPost(get_url);
qjson = " {\n" +
" \"start\": 1563249700,\n" +
" \"queries\": [\n" +
" {\n" +
" \"aggregator\": \"none\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupBy\": false\n" +
" }\n" +
" ],\n" +
" \"downsample\": \"1m-sum\"\n" +
" },\n" +
" {\n" +
" \"aggregator\": \"none\",\n" +
" \"metric\": \"temperature\",\n" +
" \"filters\": [\n"+
" {\n" +
" \"type\": \"regexp\",\n" +
" \"tagk\": \"devgroup\",\n" +
" \"filter\": " +"\"" + filter_reg +"\"" + ",\n" +
" \"groupBy\": false\n" +
" }\n" +
" ],\n" +
" \"downsample\": \"1m-avg\"\n" +
" }\n" +
" ]\n" +
" }";
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
stringEntity = new StringEntity(qjson);
httpPost.setEntity(stringEntity);
responseBody = httpclient.execute(httpPost);
/*
System.out.println(responseBody.getStatusLine());
System.out.println(qjson);
*/
stopTime = System.currentTimeMillis();
elapseTime = stopTime - startTime;
elapseTime = elapseTime/1000;
System.out.printf("Spend %f seconds to group data by time when devgroup < %d\n",elapseTime, ig);
responseBody.close();
break;
}
}
httpclient.close();
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("query end:----\n");
} // end write or query
System.exit(0);
}// end main
}// end class
import com.stumbleupon.async.Callback;
import com.stumbleupon.async.Deferred;
import net.opentsdb.core.TSDB;
import net.opentsdb.uid.NoSuchUniqueName;
import net.opentsdb.uid.UniqueId.UniqueIdType;
import net.opentsdb.utils.Config;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.ResponseHandler;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.http.client.methods.*;
import java.io.BufferedWriter;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.FileReader;
import java.io.IOException;
import java.nio.charset.Charset;
import java.text.DecimalFormat;
import java.util.Random;
import java.util.ArrayList;
import java.util.List;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.math.*;
import java.lang.reflect.Method;
public class WriteThread extends Thread {
private int[] wargs; // fstart, fend, rows to be read, rows perrequest
private String fdir;
private int fstart;
private int fend;
public WriteThread (int fstart, int fend,int[] wargs, String fdir) {
this.fstart = fstart;
this.fend = fend;
this.fdir = fdir;
this.wargs = wargs;
}
// begin to insert in this thread
public void run() {
StringEntity stringEntity;
String port = "4242";
String put_url = "http://127.0.0.1:"+port+"/api/put?summary";
try (CloseableHttpClient httpclient = HttpClients.createDefault()) {
/*
httpclient.getHttpConnectionManager().getParams()
.setConnectionTimeout(1000);
httpclient.getHttpConnectionManager().getParams()
.setSoTimeout(5000);
*/
for (int i=fstart; i<=fend; i++) {
String csvfile;
csvfile = fdir + "/testdata"+ Integer.toString(i)+".csv";
BufferedReader br = null;
String line = "";
String cvsSplitBy = " ";
try {
br = new BufferedReader(new FileReader(csvfile));
System.out.println("---- begin to read file " +csvfile+"\n");
for (int itotalrow =0; itotalrow<wargs[0]; itotalrow=itotalrow+wargs[1]) {
HttpPost httpPost = new HttpPost(put_url);
httpPost.setHeader("Accept", "application/json");
httpPost.setHeader("Content-type", "application/json");
String totaljson = "[\n";
for (int irow =0; irow<wargs[1]; ++irow) {
line = br.readLine();
if (line !=null) {
String[] meter = line.split(cvsSplitBy);
// devid, devname,devgroup,ts,temperature,humidity
BigInteger timestamp = new BigInteger(meter[3]);
timestamp = timestamp.divide(BigInteger.valueOf(1000));
long ts = timestamp.longValue();
int temperature = Integer.parseInt(meter[4]);
float humidity = Float.parseFloat(meter[5]);
String onejson = " {\n" +
" \"metric\": \"temperature\",\n" +
" \"timestamp\": " + String.valueOf(ts) + ",\n" +
" \"value\": " + String.valueOf(temperature) + ",\n" +
" \"tags\" : {\n" +
" \"devid\":" +" \"" + meter[0] + "\",\n" +
" \"devname\":" +" \"" + meter[1] + "\",\n" +
" \"devgroup\":" +" \"" + meter[2] + "\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"metric\": \"humidity\",\n" +
" \"timestamp\": " + String.valueOf(ts) + ",\n" +
" \"value\": " + String.valueOf(humidity) + ",\n" +
" \"tags\" : {\n" +
" \"devid\":" +" \"" + meter[0] + "\",\n" +
" \"devname\":" +" \"" + meter[1] + "\",\n" +
" \"devgroup\":" +" \"" + meter[2] + "\"\n" +
" }\n";
if (irow == 0) {
totaljson = totaljson + onejson;
} else if (irow < wargs[1]) {
totaljson = totaljson + " },\n" + onejson;
}
} //end one line reading
} //end on batch put
totaljson = totaljson + " }\n]";
stringEntity = new StringEntity(totaljson);
httpPost.setEntity(stringEntity);
CloseableHttpResponse responseBody = httpclient.execute(httpPost);
/*
System.out.println(responseBody.getStatusLine());
System.out.println(totaljson);
*/
responseBody.close();
}// end one file reading
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}//end file iteration
httpclient.close();
} catch (Exception e) {
e.printStackTrace();
System.out.println("failed to connect");
}
}//end run
}//end class
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 日志保存路径为tomcat下面的logs下面的mobileLog文件夹,logback会自动创建文件夹,这样设置了就可以输出日志文件了
<substitutionProperty name="logbase" value="${catalina.base}/logs/mobileLog/"
/> -->
<substitutionProperty name="logbase" value="${user.dir}/logs/ " />
<!-- 这个是要配置输出文件的 -->
<jmxConfigurator />
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%date [%thread] %-5level %logger{80} - %msg%n</pattern>
</layout>
</appender>
<!-- 文件输出日志 (文件大小策略进行文件输出,超过指定大小对文件备份) -->
<appender name="logfile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<Encoding>UTF-8</Encoding>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<File>${logbase}%d{yyyy-MM-dd}.log.html</File>
<FileNamePattern>${logbase}.%d{yyyy-MM-dd}.log.html.zip
</FileNamePattern>
</rollingPolicy>
<triggeringPolicy
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>2MB</MaxFileSize>
</triggeringPolicy>
<layout class="ch.qos.logback.classic.html.HTMLLayout">
<pattern>%date%level%thread%10logger%file%line%msg</pattern>
</layout>
</appender>
<!-- Output by Email -->
<!--
<appender name="Email" class="ch.qos.logback.classic.net.SMTPAppender">
<SMTPHost>stmp host name</SMTPHost>
<To>Email Address</To>
<To>Email Address</To>
<From>Email Address</From>
<Subject>TESTING Email Function: %logger{20} - %m</Subject>
<layout class="ch.qos.logback.classic.html.HTMLLayout">
<pattern>%date%level%thread%10logger%file%line%msg</pattern>
</layout>
</appender> -->
<!-- Output to Database -->
<!--
<appender name="DB" class="ch.qos.logback.classic.db.DBAppender">
<connectionSource class="ch.qos.logback.core.db.DriverManagerConnectionSource">
<driverClass>com.mysql.jdbc.Driver</driverClass>
<url>jdbc:mysql://localhost:3306/test</url>
<user>root</user>
<password>trend_dev</password>
</connectionSource>
</appender> -->
<root>
<level value="debug" />
<appender-ref ref="logfile" />
<appender-ref ref="logfile" />
</root>
</configuration>
CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
IF (TD_LINUX)
add_executable(tdengineTest tdengineTest.c)
target_link_libraries(tdengineTest taos_static tutil common pthread)
ENDIF()
IF (TD_DARWIN)
add_executable(tdengineTest tdengineTest.c)
target_link_libraries(tdengineTest taos_static tutil common pthread)
ENDIF()
ROOT=./
TARGET=exe
LFLAGS = '-Wl,-rpath,/usr/lib' -ltaos -lpthread -lm -lrt
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99
all: $(TARGET)
exe:
gcc $(CFLAGS) ./tdengineTest.c -o $(ROOT)/tdengineTest $(LFLAGS)
clean:
rm $(ROOT)tdengineTest
\ No newline at end of file
select * from db.devices where devgroup=0;
select * from db.devices where devgroup=10;
select * from db.devices where devgroup=20;
select * from db.devices where devgroup=30;
select * from db.devices where devgroup=40;
select * from db.devices where devgroup=50;
select * from db.devices where devgroup=60;
select * from db.devices where devgroup=70;
select * from db.devices where devgroup=80;
select * from db.devices where devgroup=90;
select count(*) from db.devices where devgroup<10;
select count(*) from db.devices where devgroup<20;
select count(*) from db.devices where devgroup<30;
select count(*) from db.devices where devgroup<40;
select count(*) from db.devices where devgroup<50;
select count(*) from db.devices where devgroup<60;
select count(*) from db.devices where devgroup<70;
select count(*) from db.devices where devgroup<80;
select count(*) from db.devices where devgroup<90;
select count(*) from db.devices;
select avg(temperature) from db.devices where devgroup<10;
select avg(temperature) from db.devices where devgroup<20;
select avg(temperature) from db.devices where devgroup<30;
select avg(temperature) from db.devices where devgroup<40;
select avg(temperature) from db.devices where devgroup<50;
select avg(temperature) from db.devices where devgroup<60;
select avg(temperature) from db.devices where devgroup<70;
select avg(temperature) from db.devices where devgroup<80;
select avg(temperature) from db.devices where devgroup<90;
select avg(temperature) from db.devices;
select sum(temperature) from db.devices where devgroup<10;
select sum(temperature) from db.devices where devgroup<20;
select sum(temperature) from db.devices where devgroup<30;
select sum(temperature) from db.devices where devgroup<40;
select sum(temperature) from db.devices where devgroup<50;
select sum(temperature) from db.devices where devgroup<60;
select sum(temperature) from db.devices where devgroup<70;
select sum(temperature) from db.devices where devgroup<80;
select sum(temperature) from db.devices where devgroup<90;
select sum(temperature) from db.devices;
select max(temperature) from db.devices where devgroup<10;
select max(temperature) from db.devices where devgroup<20;
select max(temperature) from db.devices where devgroup<30;
select max(temperature) from db.devices where devgroup<40;
select max(temperature) from db.devices where devgroup<50;
select max(temperature) from db.devices where devgroup<60;
select max(temperature) from db.devices where devgroup<70;
select max(temperature) from db.devices where devgroup<80;
select max(temperature) from db.devices where devgroup<90;
select max(temperature) from db.devices;
select min(temperature) from db.devices where devgroup<10;
select min(temperature) from db.devices where devgroup<20;
select min(temperature) from db.devices where devgroup<30;
select min(temperature) from db.devices where devgroup<40;
select min(temperature) from db.devices where devgroup<50;
select min(temperature) from db.devices where devgroup<60;
select min(temperature) from db.devices where devgroup<70;
select min(temperature) from db.devices where devgroup<80;
select min(temperature) from db.devices where devgroup<90;
select min(temperature) from db.devices;
select spread(temperature) from db.devices where devgroup<10;
select spread(temperature) from db.devices where devgroup<20;
select spread(temperature) from db.devices where devgroup<30;
select spread(temperature) from db.devices where devgroup<40;
select spread(temperature) from db.devices where devgroup<50;
select spread(temperature) from db.devices where devgroup<60;
select spread(temperature) from db.devices where devgroup<70;
select spread(temperature) from db.devices where devgroup<80;
select spread(temperature) from db.devices where devgroup<90;
select spread(temperature) from db.devices;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<10 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<20 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<30 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<40 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<50 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<60 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<70 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<80 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<90 group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices group by devgroup;
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<10 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<20 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<30 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<40 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<50 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<60 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<70 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<80 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices where devgroup<90 interval(1m);
select count(temperature), sum(temperature), avg(temperature) from db.devices interval(1m);
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <taos.h>
#include <time.h>
#include <pthread.h>
#include <sys/time.h>
#include <inttypes.h>
typedef struct {
char sql[256];
char dataDir[256];
int filesNum;
int clients;
int rowsPerRequest;
int write;
} ProArgs;
typedef struct {
int64_t totalRows;
} StatisInfo;
typedef struct {
pthread_t pid;
int threadId;
int sID;
int eID;
} ThreadObj;
static StatisInfo statis;
static ProArgs arguments;
void parseArg(int argc, char *argv[]);
void writeData();
void readData();
int main(int argc, char *argv[]) {
statis.totalRows = 0;
parseArg(argc, argv);
if (arguments.write) {
writeData();
} else {
readData();
}
}
void parseArg(int argc, char *argv[]) {
strcpy(arguments.sql, "./sqlCmd.txt");
strcpy(arguments.dataDir, "./testdata");
arguments.filesNum = 2;
arguments.clients = 1;
arguments.rowsPerRequest = 100;
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-sql") == 0) {
if (i < argc - 1) {
strcpy(arguments.sql, argv[++i]);
}
else {
fprintf(stderr, "'-sql' requires a parameter, default:%s\n", arguments.sql);
exit(EXIT_FAILURE);
}
}
else if (strcmp(argv[i], "-dataDir") == 0) {
if (i < argc - 1) {
strcpy(arguments.dataDir, argv[++i]);
}
else {
fprintf(stderr, "'-dataDir' requires a parameter, default:%s\n", arguments.dataDir);
exit(EXIT_FAILURE);
}
}
else if (strcmp(argv[i], "-numOfFiles") == 0) {
if (i < argc - 1) {
arguments.filesNum = atoi(argv[++i]);
}
else {
fprintf(stderr, "'-numOfFiles' requires a parameter, default:%d\n", arguments.filesNum);
exit(EXIT_FAILURE);
}
}
else if (strcmp(argv[i], "-clients") == 0) {
if (i < argc - 1) {
arguments.clients = atoi(argv[++i]);
}
else {
fprintf(stderr, "'-clients' requires a parameter, default:%d\n", arguments.clients);
exit(EXIT_FAILURE);
}
}
else if (strcmp(argv[i], "-rowsPerRequest") == 0) {
if (i < argc - 1) {
arguments.rowsPerRequest = atoi(argv[++i]);
}
else {
fprintf(stderr, "'-rowsPerRequest' requires a parameter, default:%d\n", arguments.rowsPerRequest);
exit(EXIT_FAILURE);
}
}
else if (strcmp(argv[i], "-w") == 0) {
arguments.write = 1;
}
}
}
static void taos_error(TAOS_RES *tres, TAOS *conn) {
printf("TDengine error: %s\n", tres?taos_errstr(tres):"null result");
taos_close(conn);
exit(1);
}
int64_t getTimeStampMs() {
struct timeval systemTime;
gettimeofday(&systemTime, NULL);
return (int64_t)systemTime.tv_sec * 1000L + (int64_t)systemTime.tv_usec / 1000;
}
void writeDataImp(void *param) {
ThreadObj *pThread = (ThreadObj *)param;
printf("Thread %d, writing sID %d, eID %d\n", pThread->threadId, pThread->sID, pThread->eID);
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) {
// where to find errstr?
// taos_error(NULL, taos);
printf("TDengine error: %s\n", "failed to connect");
exit(1);
}
TAOS_RES* result = taos_query(taos, "use db");
int32_t code = taos_errno(result);
if (code != 0) {
taos_error(result, taos);
}
taos_free_result(result);
char *sql = calloc(1, 8*1024*1024);
int sqlLen = 0;
int lastMachineid = 0;
int counter = 0;
int totalRecords = 0;
for (int j = pThread->sID; j <= pThread->eID; j++) {
char fileName[300];
sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, j);
FILE *fp = fopen(fileName, "r");
if (fp == NULL) {
printf("failed to open file %s\n", fileName);
exit(1);
}
printf("open file %s success\n", fileName);
char *line = NULL;
size_t len = 0;
while (!feof(fp)) {
free(line);
line = NULL;
len = 0;
getline(&line, &len, fp);
if (line == NULL) break;
if (strlen(line) < 10) continue;
int machineid;
char machinename[16];
int machinegroup;
int64_t timestamp;
int temperature;
float humidity;
sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, &timestamp, &temperature, &humidity);
if (counter == 0) {
sqlLen = sprintf(sql, "insert into");
}
if (lastMachineid != machineid) {
lastMachineid = machineid;
sqlLen += sprintf(sql + sqlLen, " dev%d values",
machineid);
}
sqlLen += sprintf(sql + sqlLen, "(%" PRId64 ",%d,%f)", timestamp, temperature, humidity);
counter++;
if (counter >= arguments.rowsPerRequest) {
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("insert into dev%d values (%" PRId64 ",%d,%f)\n",machineid, timestamp, temperature, humidity);
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(result));
}
taos_free_result(result);
totalRecords += counter;
counter = 0;
lastMachineid = -1;
sqlLen = 0;
}
}
fclose(fp);
}
if (counter > 0) {
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
// printf("insert into dev%d using devices tags(%d,'%s',%d) values (%" PRId64 ",%d,%f)",machineid, machineid, machinename, machinegroup, timestamp, temperature, humidity);
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}
taos_free_result(result);
totalRecords += counter;
}
__sync_fetch_and_add(&statis.totalRows, totalRecords);
free(sql);
}
void writeData() {
printf("write data\n");
printf("---- clients: %d\n", arguments.clients);
printf("---- dataDir: %s\n", arguments.dataDir);
printf("---- numOfFiles: %d\n", arguments.filesNum);
printf("---- rowsPerRequest: %d\n", arguments.rowsPerRequest);
taos_init();
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) {
// where to find errstr?
// taos_error(NULL, taos);
printf("TDengine error: %s\n", "failed to connect");
exit(1);
}
TAOS_RES *result = taos_query(taos, "create database if not exists db");
int32_t code = taos_errno(result);
if (code != 0) {
taos_error(result, taos);
}
taos_free_result(result);
result = taos_query(taos,
"create stable if not exists db.devices(ts timestamp, temperature int, humidity float) "
"tags(devid int, devname binary(16), devgroup int)");
code = taos_errno(result);
if (code != 0) {
taos_error(result, taos);
}
taos_free_result(result);
//create tables before insert the data
result = taos_query(taos, "use db");
code = taos_errno(result);
if (code != 0) {
taos_error(result, taos);
}
taos_free_result(result);
char *sql = calloc(1, 8*1024*1024);
int sqlLen = 0;
int lastMachineid = 0;
int counter = 0;
int totalRecords = 0;
for (int i = 0; i < arguments.filesNum; i++) {
char fileName[300];
sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, i);
FILE *fp = fopen(fileName, "r");
if (fp == NULL) {
printf("failed to open file %s\n", fileName);
exit(1);
}
printf("open file %s success\n", fileName);
char *line = NULL;
size_t len = 0;
while (!feof(fp)) {
free(line);
line = NULL;
len = 0;
getline(&line, &len, fp);
if (line == NULL) break;
if (strlen(line) < 10) continue;
int machineid;
char machinename[16];
int machinegroup;
int64_t timestamp;
int temperature;
float humidity;
sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, &timestamp, &temperature, &humidity);
if (counter == 0) {
sqlLen = sprintf(sql, "create table if not exists");
}
if (lastMachineid != machineid) {
lastMachineid = machineid;
sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d)", machineid, machineid, machinename, machinegroup);
}
counter++;
if (counter >= arguments.rowsPerRequest) {
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("create table error:%d reason:%s\n", code, taos_errstr(result));
}
taos_free_result(result);
totalRecords += counter;
counter = 0;
lastMachineid = -1;
sqlLen = 0;
}
}
fclose(fp);
}
int64_t st = getTimeStampMs();
int a = arguments.filesNum / arguments.clients;
int b = arguments.filesNum % arguments.clients;
int last = 0;
ThreadObj *threads = calloc((size_t)arguments.clients, sizeof(ThreadObj));
for (int i = 0; i < arguments.clients; ++i) {
ThreadObj *pthread = threads + i;
pthread_attr_t thattr;
pthread->threadId = i + 1;
pthread->sID = last;
if (i < b) {
pthread->eID = last + a;
} else {
pthread->eID = last + a - 1;
}
last = pthread->eID + 1;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))writeDataImp, pthread);
}
for (int i = 0; i < arguments.clients; i++) {
pthread_join(threads[i].pid, NULL);
}
int64_t elapsed = getTimeStampMs() - st;
float seconds = (float)elapsed / 1000;
float rs = (float)statis.totalRows / seconds;
free(threads);
printf("---- Spent %f seconds to insert %" PRId64 " records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs);
}
void readDataImp(void *param)
{
ThreadObj *pThread = (ThreadObj *)param;
printf("Thread %d\n", pThread->threadId);
FILE *fp = fopen(arguments.sql, "r");
if (fp == NULL) {
printf("failed to open file %s\n", arguments.sql);
exit(1);
}
printf("open file %s success\n", arguments.sql);
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) {
// where to find errstr?
// taos_error(NULL, taos);
printf("TDengine error: %s\n", "failed to connect");
exit(1);
}
char *line = NULL;
size_t len = 0;
while (!feof(fp)) {
free(line);
line = NULL;
len = 0;
getline(&line, &len, fp);
if (line == NULL) break;
if (strlen(line) < 10) continue;
int64_t st = getTimeStampMs();
TAOS_RES *result = taos_query(taos, line);
int32_t code = taos_errno(result);
if (code != 0) {
taos_error(result, taos);
}
TAOS_ROW row;
int rows = 0;
//int num_fields = taos_field_count(taos);
//TAOS_FIELD *fields = taos_fetch_fields(result);
while ((row = taos_fetch_row(result))) {
rows++;
//char temp[256];
//taos_print_row(temp, row, fields, num_fields);
//printf("%s\n", temp);
}
taos_free_result(result);
int64_t elapsed = getTimeStampMs() - st;
float seconds = (float)elapsed / 1000;
printf("---- Spent %f seconds to retrieve %d records, Thread:%d query: %s\n", seconds, rows, pThread->threadId, line);
}
fclose(fp);
}
void readData() {
printf("read data\n");
printf("---- sql: %s\n", arguments.sql);
printf("---- clients: %d\n", arguments.clients);
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) {
// where to find errstr?
// taos_error(NULL, taos);
printf("TDengine error: %s\n", "failed to connect");
exit(1);
}
ThreadObj *threads = calloc((size_t)arguments.clients, sizeof(ThreadObj));
for (int i = 0; i < arguments.clients; ++i) {
ThreadObj *pthread = threads + i;
pthread_attr_t thattr;
pthread->threadId = i + 1;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))readDataImp, pthread);
}
for (int i = 0; i < arguments.clients; i++) {
pthread_join(threads[i].pid, NULL);
}
free(threads);
}
\ No newline at end of file
#include "qSqlparser.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size){
char *new_str = (char *)malloc(size+1);
if (new_str == NULL){
return 0;
}
memcpy(new_str, data, size);
new_str[size] = '\0';
qSqlParse(new_str);
free(new_str);
return 0;
}
@echo off
echo ==== start Go connector test cases test ====
cd /d %~dp0
set severIp=%1
set serverPort=%2
if "%severIp%"=="" (set severIp=127.0.0.1)
if "%serverPort%"=="" (set serverPort=6030)
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
cd case001
case001.bat %severIp% %serverPort%
rem cd case002
rem case002.bat
:: cd case002
:: case002.bat
rem cd nanosupport
rem nanoCase.bat
:: cd nanosupport
:: nanoCase.bat
#!/bin/bash
echo "==== start Go connector test cases test ===="
severIp=$1
serverPort=$2
if [ ! -n "$severIp" ]; then
severIp=127.0.0.1
fi
if [ ! -n "$serverPort" ]; then
serverPort=6030
fi
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
bash ./case001/case001.sh $severIp $serverPort
bash ./case002/case002.sh $severIp $serverPort
#bash ./case003/case003.sh $severIp $serverPort
bash ./nanosupport/nanoCase.sh $severIp $serverPort
@echo off
echo ==== start run cases001.go
del go.*
go mod init demotest
go build
demotest.exe -h %1 -p %2
cd ..
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"database/sql"
"flag"
"fmt"
_ "github.com/taosdata/driver-go/v2/taosSql"
"log"
"strconv"
"time"
)
type config struct {
hostName string
serverPort int
user string
password string
}
var configPara config
var url string
func init() {
flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.")
flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.")
flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
flag.Parse()
}
func printAllArgs() {
fmt.Printf("\n============= args parse result: =============\n")
fmt.Printf("hostName: %v\n", configPara.hostName)
fmt.Printf("serverPort: %v\n", configPara.serverPort)
fmt.Printf("usr: %v\n", configPara.user)
fmt.Printf("password: %v\n", configPara.password)
fmt.Printf("================================================\n")
}
func main() {
printAllArgs()
taosDriverName := "taosSql"
demodb := "demodb"
demot := "demot"
fmt.Printf("\n======== start demo test ========\n")
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/"
// open connect to taos server
fmt.Printf("url:%s",url)
db, err := sql.Open(taosDriverName, url)
if err != nil {
log.Fatalf("Open database error: %s\n", err)
}
defer db.Close()
drop_database(db, demodb)
create_database(db, demodb)
use_database(db, demodb)
create_table(db, demot)
insert_data(db, demot)
select_data(db, demot)
fmt.Printf("\n======== start stmt mode test ========\n")
demodbStmt := "demodbStmt"
demotStmt := "demotStmt"
drop_database_stmt(db, demodbStmt)
create_database_stmt(db, demodbStmt)
use_database_stmt(db, demodbStmt)
create_table_stmt(db, demotStmt)
insert_data_stmt(db, demotStmt)
select_data_stmt(db, demotStmt)
fmt.Printf("\n======== end demo test ========\n")
}
func drop_database(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
res, err := db.Exec("drop database if exists " + demodb)
checkErr(err, "drop database if exists "+demodb)
affectd, err := res.RowsAffected()
checkErr(err, "drop db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
//sleep 50毫秒
time.Sleep(time.Duration(50)* time.Millisecond)
}
func create_database(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
// create database
res, err := db.Exec("create database " + demodb)
checkErr(err, "create db, db.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "create db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
return
}
func use_database(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
// use database
res, err := db.Exec("use " + demodb) // notes: must no quote to db name
checkErr(err, "use db db.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "use db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func create_table(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
// create table
res, err := db.Exec("create table " + demot + " (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)")
checkErr(err, "create table db.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "create table res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func insert_data(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
// insert data
res, err := db.Exec("insert into " + demot +
" values (now, 100, 'beijing', 10, true, 'one', 123.456, 123.456)" +
" (now+1s, 101, 'shanghai', 11, true, 'two', 789.123, 789.123)" +
" (now+2s, 102, 'shenzhen', 12, false, 'three', 456.789, 456.789)")
checkErr(err, "insert data, db.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "insert data res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func select_data(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
fmt.Println(demot)
rows, err := db.Query("select * from ? ", demot) // go text mode
fmt.Println("end query",err)
checkErr(err, "select db.Query")
fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
var affectd int
//decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。
fmt.Println("start next")
for rows.Next() {
var ts time.Time
var name string
var id int
var len int8
var flag bool
var notes string
var fv float32
var dv float64
err = rows.Scan(&ts, &id, &name, &len, &flag, &notes, &fv, &dv)
fmt.Println("rows:",err)
checkErr(err, "select rows.Scan")
fmt.Printf("%s|\t", ts)
fmt.Printf("%d|\t", id)
fmt.Printf("%10s|\t", name)
fmt.Printf("%d|\t", len)
fmt.Printf("%t|\t", flag)
fmt.Printf("%s|\t", notes)
fmt.Printf("%06.3f|\t", fv)
fmt.Printf("%09.6f|\n\n", dv)
affectd++
}
et := time.Now().Nanosecond()
fmt.Printf("select data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
//fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9)
}
func drop_database_stmt(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
// drop test db
res, err := db.Exec("drop database if exists " + demodb)
checkErr(err, "drop database "+demodb)
affectd, err := res.RowsAffected()
checkErr(err, "drop db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func create_database_stmt(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
// create database
//var stmt interface{}
stmt, err := db.Prepare("create database ?")
checkErr(err, "create db, db.Prepare")
//var res driver.Result
res, err := stmt.Exec(demodb)
checkErr(err, "create db, stmt.Exec")
//fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected())
affectd, err := res.RowsAffected()
checkErr(err, "create db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func use_database_stmt(db *sql.DB, demodb string) {
st := time.Now().Nanosecond()
// create database
//var stmt interface{}
stmt, err := db.Prepare("use " + demodb)
checkErr(err, "use db, db.Prepare")
res, err := stmt.Exec()
checkErr(err, "use db, stmt.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "use db, res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func create_table_stmt(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
// create table
// (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)
stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(10), ? tinyint, ? bool, ? binary(8), ? float, ? double)")
checkErr(err, "create table db.Prepare")
res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv")
checkErr(err, "create table stmt.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "create table res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func insert_data_stmt(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
// insert data into table
stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)")
checkErr(err, "insert db.Prepare")
res, err := stmt.Exec(demot, "now", 1000, "'haidian'", 6, true, "'AI world'", 6987.654, 321.987,
"now+1s", 1001, "'changyang'", 7, false, "'DeepMode'", 12356.456, 128634.456,
"now+2s", 1002, "'chuangping'", 8, true, "'database'", 3879.456, 65433478.456)
checkErr(err, "insert data, stmt.Exec")
affectd, err := res.RowsAffected()
checkErr(err, "res.RowsAffected")
et := time.Now().Nanosecond()
fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func select_data_stmt(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?") // go binary mode
checkErr(err, "db.Prepare")
rows, err := stmt.Query("ts", "id", "name", "len", "flag", "notes", "fv", "dv", demot)
checkErr(err, "stmt.Query")
fmt.Printf("%10s%s%8s %5s %8s%s %s %10s%s %7s%s %8s%s %11s%s %14s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
var affectd int
for rows.Next() {
var ts string
var name string
var id int
var len int8
var flag bool
var notes string
var fv float32
var dv float64
err = rows.Scan(&ts, &id, &name, &len, &flag, &notes, &fv, &dv)
//fmt.Println("start scan fields from row.rs, &fv:", &fv)
//err = rows.Scan(&fv)
checkErr(err, "rows.Scan")
fmt.Printf("%s|\t", ts)
fmt.Printf("%d|\t", id)
fmt.Printf("%10s|\t", name)
fmt.Printf("%d|\t", len)
fmt.Printf("%t|\t", flag)
fmt.Printf("%s|\t", notes)
fmt.Printf("%06.3f|\t", fv)
fmt.Printf("%09.6f|\n", dv)
affectd++
}
et := time.Now().Nanosecond()
fmt.Printf("select data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1e9)
}
func checkErr(err error, prompt string) {
if err != nil {
fmt.Printf("%s\n", prompt)
panic(err)
}
}
#!/bin/bash
echo "==== start run cases001.go"
set +e
#set -x
script_dir="$(dirname $(readlink -f $0))"
#echo "pwd: $script_dir, para0: $0"
#execName=$0
#execName=`echo ${execName##*/}`
#goName=`echo ${execName%.*}`
###### step 3: start build
cd $script_dir
rm -f go.*
go mod init demotest
go build
sleep 1s
./demotest -h $1 -p $2
@echo off
echo ==== start run cases002.go
del go.*
go mod init demotest
go build
demotest.exe -h %1 -p %2
cd ..
package main
import (
"database/sql/driver"
"fmt"
"io"
"os"
"time"
taos "github.com/taosdata/driver-go/v2/af"
)
func Subscribe_check(topic taos.Subscriber, check int) bool {
count := 0
rows, err := topic.Consume()
defer func() { rows.Close(); time.Sleep(time.Second) }()
if err != nil {
fmt.Println(err)
os.Exit(3)
}
for {
values := make([]driver.Value, 2)
err := rows.Next(values)
if err == io.EOF {
break
} else if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(4)
}
count++
}
if count == check {
return false
} else {
return true
}
}
func main() {
ts := 1630461600000
db, err := taos.Open("127.0.0.1", "", "", "", 0)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer db.Close()
db.Exec("drop database if exists test")
db.Exec("create database if not exists test ")
db.Exec("use test")
db.Exec("create table test (ts timestamp ,level int)")
for i := 0; i < 10; i++ {
sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i)
db.Exec(sqlcmd)
}
fmt.Println("consumption 01.")
topic, err := db.Subscribe(false, "test", "select ts, level from test", time.Second)
if Subscribe_check(topic, 10) {
os.Exit(3)
}
fmt.Println("consumption 02: no new rows inserted")
if Subscribe_check(topic, 0) {
os.Exit(3)
}
fmt.Println("consumption 03: after one new rows inserted")
sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+10, 10)
db.Exec(sqlcmd)
if Subscribe_check(topic, 1) {
os.Exit(3)
}
fmt.Println("consumption 04: keep progress and continue previous subscription")
topic.Unsubscribe(true)
topic, err = db.Subscribe(false, "test", "select ts, level from test", time.Second)
if Subscribe_check(topic, 0) {
os.Exit(3)
}
}
#!/bin/bash
echo "==== start run cases002.go"
set +e
#set -x
script_dir="$(dirname $(readlink -f $0))"
#echo "pwd: $script_dir, para0: $0"
#execName=$0
#execName=`echo ${execName##*/}`
#goName=`echo ${execName%.*}`
###### step 3: start build
cd $script_dir
rm -f go.*
go mod init demotest > /dev/null 2>&1
go mod tidy > /dev/null 2>&1
go build > /dev/null 2>&1
sleep 1s
./demotest -h $1 -p $2
package connector
import (
"context"
"fmt"
"reflect"
"time"
"github.com/taosdata/go-utils/log"
"github.com/taosdata/go-utils/tdengine/config"
"github.com/taosdata/go-utils/tdengine/connector"
tdengineExecutor "github.com/taosdata/go-utils/tdengine/executor"
)
type Executor struct {
executor *tdengineExecutor.Executor
ctx context.Context
}
var Logger = log.NewLogger("taos test")
func NewExecutor(conf *config.TDengineGo, db string, showSql bool) (*Executor, error) {
tdengineConnector, err := connector.NewTDengineConnector("go", conf)
if err != nil {
return nil, err
}
executor := tdengineExecutor.NewExecutor(tdengineConnector, db, showSql, Logger)
return &Executor{
executor: executor,
ctx: context.Background(),
}, nil
}
func (e *Executor) Execute(sql string) (int64, error) {
return e.executor.DoExec(e.ctx, sql)
}
func (e *Executor) Query(sql string) (*connector.Data, error) {
fmt.Println("query :", sql)
return e.executor.DoQuery(e.ctx, sql)
}
func (e *Executor) CheckData(row, col int, value interface{}, data *connector.Data) (bool, error) {
if data == nil {
return false, fmt.Errorf("data is nil")
}
if col >= len(data.Head) {
return false, fmt.Errorf("col out of data")
}
if row >= len(data.Data) {
return false, fmt.Errorf("row out of data")
}
dataValue := data.Data[row][col]
if dataValue == nil && value != nil {
return false, fmt.Errorf("dataValue is nil but value is not nil")
}
if dataValue == nil && value == nil {
return true, nil
}
if reflect.TypeOf(dataValue) != reflect.TypeOf(value) {
return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue))
}
switch value.(type) {
case time.Time:
t, _ := dataValue.(time.Time)
if value.(time.Time).Nanosecond() != t.Nanosecond() {
return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond())
}
case string:
if value.(string) != dataValue.(string) {
return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string))
}
case int8:
if value.(int8) != dataValue.(int8) {
return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8))
}
case int16:
if value.(int16) != dataValue.(int16) {
return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16))
}
case int32:
if value.(int32) != dataValue.(int32) {
return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32))
}
case int64:
if value.(int64) != dataValue.(int64) {
return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64))
}
case float32:
if value.(float32) != dataValue.(float32) {
return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
}
case float64:
if value.(float64) != dataValue.(float64) {
return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
}
case bool:
if value.(bool) != dataValue.(bool) {
return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool))
}
default:
return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value))
}
return true, nil
}
func (e *Executor) CheckData2(row, col int, value interface{}, data *connector.Data) {
match, err := e.CheckData(row, col, value, data)
fmt.Println("expect data is :", value)
fmt.Println("go got data is :", data.Data[row][col])
if err != nil {
fmt.Println(err)
}
if !match {
fmt.Println(" data not match")
}
/*
fmt.Println(value)
if data == nil {
// return false, fmt.Errorf("data is nil")
// fmt.Println("check failed")
}
if col >= len(data.Head) {
// return false, fmt.Errorf("col out of data")
// fmt.Println("check failed")
}
if row >= len(data.Data) {
// return false, fmt.Errorf("row out of data")
// fmt.Println("check failed")
}
dataValue := data.Data[row][col]
if dataValue == nil && value != nil {
// return false, fmt.Errorf("dataValue is nil but value is not nil")
// fmt.Println("check failed")
}
if dataValue == nil && value == nil {
// return true, nil
fmt.Println("check pass")
}
if reflect.TypeOf(dataValue) != reflect.TypeOf(value) {
// return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue))
fmt.Println("check failed")
}
switch value.(type) {
case time.Time:
t, _ := dataValue.(time.Time)
if value.(time.Time).Nanosecond() != t.Nanosecond() {
// return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond())
// fmt.Println("check failed")
}
case string:
if value.(string) != dataValue.(string) {
// return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string))
// fmt.Println("check failed")
}
case int8:
if value.(int8) != dataValue.(int8) {
// return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8))
// fmt.Println("check failed")
}
case int16:
if value.(int16) != dataValue.(int16) {
// return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16))
// fmt.Println("check failed")
}
case int32:
if value.(int32) != dataValue.(int32) {
// return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32))
// fmt.Println("check failed")
}
case int64:
if value.(int64) != dataValue.(int64) {
// return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64))
// fmt.Println("check failed")
}
case float32:
if value.(float32) != dataValue.(float32) {
// return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
// fmt.Println("check failed")
}
case float64:
if value.(float64) != dataValue.(float64) {
// return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
// fmt.Println("check failed")
}
case bool:
if value.(bool) != dataValue.(bool) {
// return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool))
// fmt.Println("check failed")
}
default:
// return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value))
// fmt.Println("check failed")
}
// return true, nil
// fmt.Println("check pass")
*/
}
func (e *Executor) CheckRow(count int, data *connector.Data) {
if len(data.Data) != count {
fmt.Println("check failed !")
}
}
@echo off
echo ==== start run nanosupport.go
del go.*
go mod init nano
go mod tidy
go build
nano.exe -h %1 -p %2
cd ..
#!/bin/bash
echo "==== start run nanosupport.go "
set +e
#set -x
script_dir="$(dirname $(readlink -f $0))"
#echo "pwd: $script_dir, para0: $0"
#execName=$0
#execName=`echo ${execName##*/}`
#goName=`echo ${execName%.*}`
###### step 3: start build
cd $script_dir
rm -f go.*
go mod init nano
go mod tidy
go build
sleep 10s
./nano -h $1 -p $2
package main
import (
"fmt"
"log"
"nano/connector"
"time"
"github.com/taosdata/go-utils/tdengine/config"
)
func main() {
e, err := connector.NewExecutor(&config.TDengineGo{
Address: "root:taosdata@/tcp(127.0.0.1:6030)/",
MaxIdle: 20,
MaxOpen: 30,
MaxLifetime: 30,
}, "db", false)
if err != nil {
panic(err)
}
prepareData(e)
data, err := e.Query("select * from tb")
if err != nil {
panic(err)
}
layout := "2006-01-02 15:04:05.999999999"
t0, _ := time.Parse(layout, "2021-06-10 00:00:00.100000001")
t1, _ := time.Parse(layout, "2021-06-10 00:00:00.150000000")
t2, _ := time.Parse(layout, "2021-06-10 00:00:00.299999999")
t3, _ := time.Parse(layout, "2021-06-10 00:00:00.300000000")
t4, _ := time.Parse(layout, "2021-06-10 00:00:00.300000001")
t5, _ := time.Parse(layout, "2021-06-10 00:00:00.999999999")
e.CheckData2(0, 0, t0, data)
e.CheckData2(1, 0, t1, data)
e.CheckData2(2, 0, t2, data)
e.CheckData2(3, 0, t3, data)
e.CheckData2(4, 0, t4, data)
e.CheckData2(5, 0, t5, data)
e.CheckData2(3, 1, int32(3), data)
e.CheckData2(4, 1, int32(5), data)
e.CheckData2(5, 1, int32(7), data)
fmt.Println(" start check nano support!")
data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000001\" and ts < \"2021-06-10 0:00:00.160000000\";")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000000\" and ts < \"2021-06-10 0:00:00.150000000\";")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts > 1623254400400000000;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts < \"2021-06-10 00:00:00.400000000\";")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb where ts < now + 400000000b;")
e.CheckData2(0, 0, int64(6), data)
data, _ = e.Query("select count(*) from tb where ts >= \"2021-06-10 0:00:00.100000001\";")
e.CheckData2(0, 0, int64(6), data)
data, _ = e.Query("select count(*) from tb where ts <= 1623254400300000000;")
e.CheckData2(0, 0, int64(4), data)
data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.000000000\";")
data, _ = e.Query("select count(*) from tb where ts = 1623254400150000000;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.100000001\";")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb where ts between \"2021-06-10 0:00:00.299999999\" and \"2021-06-10 0:00:00.300000001\";")
e.CheckData2(0, 0, int64(3), data)
data, _ = e.Query("select avg(speed) from tb interval(5000000000b);")
e.CheckRow(1, data)
data, _ = e.Query("select avg(speed) from tb interval(100000000b)")
e.CheckRow(4, data)
data, _ = e.Query("select avg(speed) from tb interval(1000b);")
e.CheckRow(5, data)
data, _ = e.Query("select avg(speed) from tb interval(1u);")
e.CheckRow(5, data)
data, _ = e.Query("select avg(speed) from tb interval(100000000b) sliding (100000000b);")
e.CheckRow(4, data)
data, _ = e.Query("select last(*) from tb")
tt, _ := time.Parse(layout, "2021-06-10 0:00:00.999999999")
e.CheckData2(0, 0, tt, data)
data, _ = e.Query("select first(*) from tb")
tt1, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001")
e.CheckData2(0, 0, tt1, data)
e.Execute("insert into tb values(now + 500000000b, 6);")
data, _ = e.Query("select * from tb;")
e.CheckRow(7, data)
e.Execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);")
e.Execute("insert into tb2 values(\"2021-06-10 0:00:00.100000001\", 1, \"2021-06-11 0:00:00.100000001\");")
e.Execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);")
e.Execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);")
e.Execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);")
e.Execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);")
e.Execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);")
data, _ = e.Query("select * from tb2;")
tt2, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001")
tt3, _ := time.Parse(layout, "2021-06-10 0:00:00.150000000")
e.CheckData2(0, 0, tt2, data)
e.CheckData2(1, 0, tt3, data)
e.CheckData2(2, 1, int32(4), data)
e.CheckData2(3, 1, int32(3), data)
tt4, _ := time.Parse(layout, "2021-06-11 00:00:00.300000001")
e.CheckData2(4, 2, tt4, data)
e.CheckRow(6, data)
data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb2 where ts2 > \"2021-06-11 0:00:00.100000000\" and ts2 < \"2021-06-11 0:00:00.100000002\";")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800500000000;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb2 where ts2 < \"2021-06-11 0:00:00.400000000\";")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 < now + 400000000b;")
e.CheckData2(0, 0, int64(6), data)
data, _ = e.Query("select count(*) from tb2 where ts2 >= \"2021-06-11 0:00:00.100000001\";")
e.CheckData2(0, 0, int64(6), data)
data, _ = e.Query("select count(*) from tb2 where ts2 <= 1623340800400000000;")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.000000000\";")
data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.300000001\";")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb2 where ts2 = 1623340800300000001;")
e.CheckData2(0, 0, int64(1), data)
data, _ = e.Query("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 between \"2021-06-11 0:00:00.299999999\" and \"2021-06-11 0:00:00.300000001\";")
e.CheckData2(0, 0, int64(3), data)
data, _ = e.Query("select count(*) from tb2 where ts2 <> 1623513600999999999;")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000001\";")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000000\";")
e.CheckData2(0, 0, int64(6), data)
data, _ = e.Query("select count(*) from tb2 where ts2 != 1623513600999999999;")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000001\";")
e.CheckData2(0, 0, int64(5), data)
data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000000\";")
e.CheckData2(0, 0, int64(6), data)
e.Execute("insert into tb2 values(now + 500000000b, 6, now +2d);")
data, _ = e.Query("select * from tb2;")
e.CheckRow(7, data)
e.Execute("create table tb3 (ts timestamp, speed int);")
_, err = e.Execute("insert into tb3 values(16232544001500000, 2);")
if err != nil {
fmt.Println("check pass! ")
}
e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456\", 2);")
data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456000\";")
e.CheckRow(1, data)
e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456789000\", 2);")
data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456789\";")
e.CheckRow(1, data)
// check timezone support
e.Execute("drop database if exists nsdb;")
e.Execute("create database nsdb precision 'ns';")
e.Execute("use nsdb;")
e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);")
data, _ = e.Query("select first(*) from tb1;")
ttt, _ := time.Parse(layout, "2021-06-10 01:00:00.123456789")
e.CheckData2(0, 0, ttt, data)
e.Execute("create database usdb precision 'us';")
e.Execute("use usdb;")
e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);")
data, _ = e.Query("select first(*) from tb1;")
ttt2, _ := time.Parse(layout, "2021-06-10 01:00:00.123456")
e.CheckData2(0, 0, ttt2, data)
e.Execute("drop database if exists msdb;")
e.Execute("create database msdb precision 'ms';")
e.Execute("use msdb;")
e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);")
data, _ = e.Query("select first(*) from tb1;")
ttt3, _ := time.Parse(layout, "2021-06-10 01:00:00.123")
e.CheckData2(0, 0, ttt3, data)
fmt.Println("all test done!")
}
func prepareData(e *connector.Executor) {
sqlList := []string{
"reset query cache;",
"drop database if exists db;",
"create database db;",
"use db;",
"reset query cache;",
"drop database if exists db;",
"create database db precision 'ns';",
"show databases;",
"use db;",
"create table tb (ts timestamp, speed int);",
"insert into tb values('2021-06-10 0:00:00.100000001', 1);",
"insert into tb values(1623254400150000000, 2);",
"import into tb values(1623254400300000000, 3);",
"import into tb values(1623254400299999999, 4);",
"insert into tb values(1623254400300000001, 5);",
"insert into tb values(1623254400999999999, 7);",
}
for _, sql := range sqlList {
err := executeSql(e, sql)
if err != nil {
log.Fatalf("prepare data error:%v, sql:%s", err, sql)
}
}
}
func executeSql(e *connector.Executor, sql string) error {
_, err := e.Execute(sql)
if err != nil {
return err
}
return nil
}
#!/bin/bash
##################################################
#
# Do go test
#
##################################################
set +e
#set -x
FILE_NAME=
RELEASE=0
while getopts "f:" arg
do
case $arg in
f)
FILE_NAME=$OPTARG
echo "input file: $FILE_NAME"
;;
?)
echo "unknow argument"
;;
esac
done
# start one taosd
bash ../script/sh/stop_dnodes.sh
bash ../script/sh/deploy.sh -n dnode1 -i 1
bash ../script/sh/cfg.sh -n dnode1 -c walLevel -v 0
bash ../script/sh/exec.sh -n dnode1 -s start
# start build test go file
caseDir=`echo ${FILE_NAME%/*}`
echo "caseDir: $caseDir"
cd $caseDir
rm go.*
go mod init $caseDir
go build
sleep 1s
./$caseDir
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
pipeline {
agent none
environment{
WK = '/data/lib/jenkins/workspace/TDinternal'
WKC= '/data/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'slam1'}
steps {
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage('test_b1') {
agent{label 'slam2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage('test_crash_gen') {
agent{label "slam3"}
steps {
pre_test()
sh '''
cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh '''
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage('test_valgrind') {
agent{label "slam4"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}
\ No newline at end of file
#!/bin/bash
taos -n fqdn
此差异已折叠。
此差异已折叠。
[{
"host":"192.168.0.210",
"username":"root",
"workdir":"/var/data/jenkins/workspace",
"thread":25
},
{
"host":"192.168.0.211",
"username":"root",
"workdir":"/var/data/jenkins/workspace",
"thread":25
},
{
"host":"192.168.0.212",
"username":"root",
"workdir":"/var/data/jenkins/workspace",
"thread":25
},
{
"host":"192.168.0.213",
"username":"root",
"workdir":"/var/data/jenkins/workspace",
"thread":25
},
{
"host":"192.168.0.214",
"username":"root",
"workdir":"/var/data/jenkins/workspace",
"thread":25
}]
此差异已折叠。
#!/bin/bash
CONTAINER_TESTDIR=/home/community
# CONTAINER_TESTDIR=/root/tang/repository/TDengine
# export PATH=$PATH:$CONTAINER_TESTDIR/debug/build/bin
function usage() {
echo "$0"
echo -e "\t -d execution dir"
echo -e "\t -c command"
echo -e "\t -h help"
}
while getopts "d:c:h" opt; do
case $opt in
d)
exec_dir=$OPTARG
;;
c)
cmd=$OPTARG
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG"
usage
exit 0
;;
esac
done
if [ -z "$exec_dir" ]; then
usage
exit 0
fi
if [ -z "$cmd" ]; then
usage
exit 0
fi
go env -w GOPROXY=https://goproxy.cn
echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config
ln -s /home/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
npm config -g set unsafe-perm
npm config -g set registry https://registry.npm.taobao.org
mkdir -p /home/sim/tsim
mkdir -p /var/lib/taos/subscribe
rm -rf ${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules
rm -rf ${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules
rm -rf ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules
# ln -s /home/node_modules ${CONTAINER_TESTDIR}/src/connector/nodejs/
# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/examples/nodejs/
# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/
# echo "$cmd"|grep -q "nodejs"
# if [ $? -eq 0 ]; then
# cd $CONTAINER_TESTDIR/src/connector/nodejs
# npm install node-gyp-build@4.3.0 --ignore-scripts
# fi
cd $CONTAINER_TESTDIR/tests/$exec_dir
ulimit -c unlimited
$cmd
RET=$?
if [ $RET -ne 0 ]; then
pwd
fi
exit $RET
#!/bin/bash
function usage() {
echo "$0"
echo -e "\t -w work dir"
echo -e "\t -d execution dir"
echo -e "\t -c command"
echo -e "\t -t thread number"
echo -e "\t -h help"
}
while getopts "w:d:c:t:h" opt; do
case $opt in
w)
WORKDIR=$OPTARG
;;
d)
exec_dir=$OPTARG
;;
c)
cmd=$OPTARG
;;
t)
thread_no=$OPTARG
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG"
usage
exit 0
;;
esac
done
if [ -z "$WORKDIR" ]; then
usage
exit 1
fi
if [ -z "$exec_dir" ]; then
usage
exit 1
fi
if [ -z "$cmd" ]; then
usage
exit 1
fi
if [ -z "$thread_no" ]; then
usage
exit 1
fi
ulimit -c unlimited
INTERNAL_REPDIR=$WORKDIR/TDinternal
REPDIR=$INTERNAL_REPDIR/community
CONTAINER_TESTDIR=/home/community
TMP_DIR=$WORKDIR/tmp
MOUNT_DIR=""
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/sim/tsim
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/node_modules
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/coredump
rm -rf ${TMP_DIR}/thread_volume/$thread_no/coredump/*
if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then
subdir=`echo "$exec_dir"|cut -d/ -f1`
echo "cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/"
cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/
fi
MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir"
echo "$thread_no -> ${exec_dir}:$cmd"
echo "$cmd"|grep -q "nodejs"
if [ $? -eq 0 ]; then
MOUNT_NODE_MOD="-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules \
-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules \
-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules"
fi
if [ -f "$REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml" ]; then
TAOSADAPTER_TOML="-v $REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml:/etc/taos/taosadapter.toml:ro"
fi
docker run \
-v $REPDIR/tests:$CONTAINER_TESTDIR/tests \
-v $MOUNT_DIR \
-v "$TMP_DIR/thread_volume/$thread_no/sim:${CONTAINER_TESTDIR}/sim" \
-v ${TMP_DIR}/thread_volume/$thread_no/coredump:/home/coredump \
-v $INTERNAL_REPDIR/debug:/home/debug:ro \
-v $REPDIR/deps:$CONTAINER_TESTDIR/deps:ro \
-v $REPDIR/src:$CONTAINER_TESTDIR/src \
-v $REPDIR/src/inc/taos.h:/usr/include/taos.h:ro \
$TAOSADAPTER_TOML \
-v $REPDIR/examples:$CONTAINER_TESTDIR/tests/examples \
-v $REPDIR/snap:$CONTAINER_TESTDIR/snap:ro \
-v $REPDIR/alert:$CONTAINER_TESTDIR/alert:ro \
-v $REPDIR/packaging/cfg/taos.cfg:/etc/taos/taos.cfg:ro \
-v $REPDIR/packaging:$CONTAINER_TESTDIR/packaging:ro \
-v $REPDIR/README.md:$CONTAINER_TESTDIR/README.md:ro \
-v $REPDIR/src/connector/python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \
-e LD_LIBRARY_PATH=/home/debug/build/lib:/home/debug/build/lib64 \
-e PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/debug/build/bin:/usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin \
-e JAVA_HOME=/usr/local/jdk1.8.0_144 \
--rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd"
ret=$?
exit $ret
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#!/user/bin/gnuplot
reset
set terminal png
set title filename font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output filename . '.png'
set datafile separator ','
set key reverse Left outside
set grid
# plot 'perftest-influx-report.csv' using 1:2 title "InfluxDB Write", \
# "" using 1:3 title "InfluxDB Query case1", \
# "" using 1:4 title "InfluxDB Query case2", \
# "" using 1:5 title "InfluxDB Query case3", \
# "" using 1:6 title "InfluxDB Query case4"
#
plot filename . '.csv' using 1:2 title "TDengine Write", \
"" using 1:3 title "TDengine Query case1", \
"" using 1:4 title "TDengine Query case2", \
"" using 1:5 title "TDengine Query case3", \
"" using 1:6 title "TDengine Query case4"
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册