提交 2495dd6e 编写于 作者: L lichuang

Merge branch 'develop' into feature/TD-4034

version: 1.0.{build}
os: Visual Studio 2015
image:
- Visual Studio 2015
- macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
matrix:
exclude:
- image: macos
ARCH: x86
for:
-
matrix:
only:
- image: Visual Studio 2015
clone_folder: c:\dev\TDengine
clone_depth: 1
clone_folder: c:\dev\TDengine
clone_depth: 1
init:
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
init:
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
before_build:
- cd c:\dev\TDengine
- md build
before_build:
- cd c:\dev\TDengine
- md build
build_script:
- cd build
- cmake -G "NMake Makefiles" ..
- nmake install
build_script:
- cd build
- cmake -G "NMake Makefiles" ..
- nmake install
-
matrix:
only:
- image: macos
clone_depth: 1
build_script:
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
on_build_success: true
on_build_failure: true
on_build_status_changed: true
---
kind: pipeline
name: test_amd64
platform:
os: linux
arch: amd64
steps:
- name: smoke_test
image: python:3.8
commands:
- apt-get update
- apt-get install -y cmake build-essential gcc
- pip3 install psutil
- pip3 install guppy3
- pip3 install src/connector/python/linux/python3/
- mkdir debug
- cd debug
- cmake ..
- make
- cd ../tests
- ./test-all.sh smoke
when:
branch:
- develop
- master
- name: crash_gen
image: python:3.8
commands:
- pip3 install requests
- pip3 install src/connector/python/linux/python3/
- pip3 install psutil
- pip3 install guppy3
- cd tests/pytest
- ./crash_gen.sh -a -p -t 4 -s 2000
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm64
platform:
os: linux
arch: arm64
steps:
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm
platform:
os: linux
arch: arm
steps:
- name: build
image: arm32v7/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
when:
branch:
- develop
- master
---
kind: pipeline
name: build_trusty
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:trusty
commands:
- apt-get update
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
- mkdir debug
- cd debug
- cmake ..
- make
when:
branch:
- develop
- master
---
kind: pipeline
name: build_xenial
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:xenial
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
when:
branch:
- develop
- master
---
kind: pipeline
name: build_bionic
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:bionic
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
when:
branch:
- develop
- master
---
kind: pipeline
name: goodbye
platform:
os: linux
arch: amd64
steps:
- name: 64-bit
image: alpine
commands:
- echo 64-bit is good.
when:
branch:
- develop
- master
depends_on:
- test_arm64
- test_amd64
\ No newline at end of file
#
# Configuration
#
#
# Build Matrix
#
branches:
only:
- master
- develop
- coverity_scan
- /^.*ci-.*$/
matrix:
- os: linux
dist: bionic
language: c
git:
- depth: 1
compiler: gcc
env: DESC="linux/gcc build and test"
addons:
apt:
packages:
- build-essential
- cmake
- net-tools
- python3.8
- libc6-dbg
- valgrind
- psmisc
- unixodbc
- unixodbc-dev
- mono-complete
before_script:
- export TZ=Asia/Harbin
- date
- curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python3.8 get-pip.py
- python3.8 -m pip install --upgrade pip setuptools
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
after_success:
- travis_wait 20
- |-
case $TRAVIS_OS_NAME in
linux)
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
pip3 install psutil
pip3 install guppy3
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs || travis_terminate $?
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
sleep 5
mono taosdemo -Q DEFAULT -y || travis_terminate $?
pkill -KILL -x taosd
fuser -k -n tcp 6030
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh smoke || travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
sleep 1
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
More than our threshold! ## ${NC}"
travis_terminate $memError
fi
fi
done
grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
travis_terminate $defiMemError
fi
fi
done
;;
esac
- os: linux
dist: bionic
language: c
compiler: gcc
env: COVERITY_SCAN=true
git:
- depth: 1
script:
- echo "this job is for coverity scan"
addons:
coverity_scan:
# GitHub project metadata
# ** specific to your project **
project:
name: TDengine
version: 2.x
description: TDengine
# Where email notification of build analysis results will be sent
notification_email: sdsang@taosdata.com, slguan@taosdata.com
# Commands to prepare for build_command
# ** likely specific to your build **
build_command_prepend: cmake . > /dev/null
# The command that will be added as an argument to "cov-build" to compile your project for analysis,
# ** likely specific to your build **
build_command: make
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
# Take care in resource usage, and consider the build frequency allowances per
# https://scan.coverity.com/faq#frequency
branch_pattern: coverity_scan
- os: linux
dist: trusty
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- binutils-2.26
- unixodbc
- unixodbc-dev
env:
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
- os: linux
dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
- os: linux
arch: arm64
dist: bionic
language: c
compiler: clang
env: DESC="arm64 linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: linux
arch: arm64
dist: xenial
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
env:
- DESC="arm64 xenial build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: osx
osx_image: xcode11.4
language: c
compiler: clang
env: DESC="mac/clang build"
git:
- depth: 1
addons:
homebrew:
- cmake
- unixodbc
script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null
......@@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}
......
......@@ -921,7 +921,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
cfg.maxValue = 10000000000.0f;
cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
......
......@@ -1630,64 +1630,68 @@ static void printfQueryMeta() {
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
printf("\n");
printf("specified table query info: \n");
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval);
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent);
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
printf("specified table query info: \n");
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.sqlCount);
printf("specified tbl query times:\n");
printf(" \033[33m%"PRIu64"\033[0m\n",
if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
printf("specified tbl query times:\n");
printf(" \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.mode);
printf("interval: \033[33m%"PRIu64"\033[0m\n",
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval);
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent);
printf("mod: \033[33m%s\033[0m\n",
(g_queryInfo.specifiedQueryInfo.mode)?"async":"sync");
printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeRestart);
printf("keepProgress: \033[33m%d\033[0m\n",
printf("keepProgress: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
}
for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
i, g_queryInfo.specifiedQueryInfo.sql[i]);
}
printf("\n");
printf("super table query info:\n");
printf("query interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryInterval);
printf("threadCnt: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.threadCnt);
printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
g_queryInfo.superQueryInfo.sTblName);
printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.mode);
printf("interval: \033[33m%"PRIu64"\033[0m\n",
for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
i, g_queryInfo.specifiedQueryInfo.sql[i]);
}
printf("\n");
}
printf("super table query info:\n");
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
if (g_queryInfo.superQueryInfo.sqlCount > 0) {
printf("query interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryInterval);
printf("threadCnt: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.threadCnt);
printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
g_queryInfo.superQueryInfo.sTblName);
printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
printf("mod: \033[33m%s\033[0m\n",
(g_queryInfo.superQueryInfo.mode)?"async":"sync");
printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.subscribeRestart);
printf("keepProgress: \033[33m%d\033[0m\n",
printf("keepProgress: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
printf(" sql[%d]: \033[33m%s\033[0m\n",
i, g_queryInfo.superQueryInfo.sql[i]);
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
printf(" sql[%d]: \033[33m%s\033[0m\n",
i, g_queryInfo.superQueryInfo.sql[i]);
}
printf("\n");
}
}
printf("\n");
SHOW_PARSE_RESULT_END();
}
......@@ -2847,7 +2851,7 @@ static void* createTable(void *sarg)
}
static int startMultiThreadCreateChildTable(
char* cols, int threads, int64_t startFrom, int64_t ntables,
char* cols, int threads, uint64_t startFrom, uint64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
......@@ -2862,13 +2866,13 @@ static int startMultiThreadCreateChildTable(
threads = 1;
}
int64_t a = ntables / threads;
uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
int64_t b = 0;
uint64_t b = 0;
b = ntables % threads;
for (int64_t i = 0; i < threads; i++) {
......@@ -4212,7 +4216,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
// sub_table_query
// super_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
g_queryInfo.superQueryInfo.threadCnt = 1;
......@@ -5679,13 +5683,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
taos_close(taos);
int a = ntables / threads;
uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
int b = 0;
uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
......@@ -6380,7 +6384,7 @@ static int queryTestProcess() {
b = ntables % threads;
}
int startFrom = 0;
uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
......@@ -6436,13 +6440,14 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c
}
getResult(res, (char*)param);
taos_free_result(res);
// tao_unscribe() will free result.
}
static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) {
static TAOS_SUB* subscribeImpl(
TAOS *taos, char *sql, char* topic, char* resultFileName) {
TAOS_SUB* tsub = NULL;
if (g_queryInfo.specifiedQueryInfo.mode) {
if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
tsub = taos_subscribe(taos,
g_queryInfo.specifiedQueryInfo.subscribeRestart,
topic, sql, subscribe_callback, (void*)resultFileName,
......@@ -6466,6 +6471,9 @@ static void *superSubscribe(void *sarg) {
char subSqlstr[1024];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
if (g_queryInfo.superQueryInfo.sqlCount == 0)
return NULL;
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
......@@ -6524,7 +6532,7 @@ static void *superSubscribe(void *sarg) {
TAOS_RES* res = NULL;
while(1) {
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
if (1 == g_queryInfo.superQueryInfo.mode) {
if (ASYNC_QUERY_MODE == g_queryInfo.superQueryInfo.mode) {
continue;
}
......@@ -6554,6 +6562,9 @@ static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
if (g_queryInfo.specifiedQueryInfo.sqlCount == 0)
return NULL;
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
......@@ -6591,7 +6602,7 @@ static void *specifiedSubscribe(void *sarg) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
sprintf(topic, "taosdemo-subscribe-%d", i);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID);
}
......@@ -6610,7 +6621,7 @@ static void *specifiedSubscribe(void *sarg) {
TAOS_RES* res = NULL;
while(1) {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
if (ASYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) {
continue;
}
......@@ -6710,21 +6721,21 @@ static int subscribeTestProcess() {
exit(-1);
}
int ntables = g_queryInfo.superQueryInfo.childTblCount;
uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
int a = ntables / threads;
uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
int b = 0;
uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
int startFrom = 0;
uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
......
......@@ -171,7 +171,7 @@ typedef struct HttpThread {
EpollFd pollFd;
int32_t numOfContexts;
int32_t threadId;
char label[HTTP_LABEL_SIZE];
char label[HTTP_LABEL_SIZE << 1];
bool (*processData)(HttpContext *pContext);
} HttpThread;
......
......@@ -21,6 +21,7 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册