提交 4c18fcd5 编写于 作者: Z zyyang

Merge branch 'develop' into feature/TD-2191

......@@ -3,6 +3,7 @@ os: Visual Studio 2015
environment:
matrix:
- ARCH: amd64
- ARCH: x86
clone_folder: c:\dev\TDengine
clone_depth: 1
......@@ -23,6 +24,7 @@ notifications:
- provider: Email
to:
- sangshuduo@gmail.com
on_build_success: true
on_build_failure: true
on_build_status_changed: true
......@@ -146,7 +146,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
dist: trusty
dist: xenial
language: c
git:
- depth: 1
......@@ -157,7 +157,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin
......@@ -227,7 +227,7 @@ matrix:
- os: linux
arch: arm64
dist: trusty
dist: xenial
language: c
git:
- depth: 1
......@@ -238,7 +238,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin
......
import hudson.model.Result
import jenkins.model.CauseOfInterruption
properties([pipelineTriggers([githubPush()])])
node {
git url: 'https://github.com/taosdata/TDengine.git'
}
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
def jobs = Jenkins.instance.getItemByFullName(currentJobName)
def builds = jobs.getBuilds()
for (build in builds) {
if (!build.isBuilding()) {
continue;
}
if (currentBuildNumber == build.getNumber().toInteger()) {
continue;
}
build.doKill() //doTerm(),doKill(),doTerm()
}
}
//abort previous build
abortPreviousBuilds()
def abort_previous(){
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
def pre_test(){
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
sudo rmtaos
'''
}
sh '''
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
cd ${WK}
git reset --hard HEAD~10
git checkout develop
git pull
cd ${WK}
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
'''
return 1
}
pipeline {
agent none
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
//only build pr
when {
changeRequest()
}
parallel {
stage('pytest') {
agent{label '184'}
stage('python_1') {
agent{label 'p1'}
steps {
pre_test()
sh '''
date
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh pytest
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
}
stage('test_b1') {
agent{label 'master'}
stage('python_2') {
agent{label 'p2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p2
date'''
}
}
stage('test_b1') {
agent{label 'b1'}
steps {
pre_test()
sh '''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh b1
./test-all.sh b1fq
date'''
}
}
stage('test_crash_gen') {
agent{label "185"}
agent{label "b2"}
steps {
sh '''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
'''
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
......@@ -102,200 +136,35 @@ pipeline {
sh '''
date
cd ${WKC}/tests
./test-all.sh b2
./test-all.sh b2fq
date
'''
}
}
stage('test_valgrind') {
agent{label "186"}
agent{label "b3"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh '''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
./test-all.sh b3fq
date'''
}
}
stage('connector'){
agent{label "release"}
steps{
sh'''
cd ${WORKSPACE}
git checkout develop
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}
\ No newline at end of file
}
}
......@@ -5,7 +5,7 @@ go 1.14
require (
github.com/jmoiron/sqlx v1.2.0
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/taosdata/driver-go v0.0.0-20200727182616-1a3b1941c206
github.com/taosdata/driver-go v0.0.0-20201113094317-050667e5b4d0
go.uber.org/zap v1.14.1
google.golang.org/appengine v1.6.5 // indirect
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
......
......@@ -45,7 +45,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
......@@ -53,7 +53,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
......@@ -62,7 +62,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
......@@ -70,21 +70,21 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
......@@ -105,8 +105,8 @@ IF (TD_LINUX)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-O3 -Wno-error")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
......@@ -125,9 +125,9 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
ENDIF ()
IF (TD_WINDOWS)
......@@ -140,7 +140,7 @@ IF (TD_WINDOWS)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread)
......
......@@ -41,8 +41,10 @@ SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FL
# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
IF (${CMAKE_BUILD_TYPE} MATCHES "Debug")
SET(CMAKE_BUILD_TYPE "Debug")
MESSAGE(STATUS "Build Debug Version")
ELSEIF (${CMAKE_BUILD_TYPE} MATCHES "Release")
SET(CMAKE_BUILD_TYPE "Release")
MESSAGE(STATUS "Build Release Version")
ELSE ()
IF (TD_WINDOWS)
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.9.0")
SET(TD_VER_NUMBER "2.0.12.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
此差异已折叠。
/* include/curl/curlbuild.h. Generated from curlbuild.h.in by configure. */
#ifndef __CURL_CURLBUILD_H
#define __CURL_CURLBUILD_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* If you think that something actually needs to be changed, adjusted
* or fixed in this file, then, report it on the libcurl development
* mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/
*
* This header file shall only export symbols which are 'curl' or 'CURL'
* prefixed, otherwise public name space would be polluted.
*
* NOTE 2:
* -------
*
* Right now you might be staring at file include/curl/curlbuild.h.in or
* at file include/curl/curlbuild.h, this is due to the following reason:
*
* On systems capable of running the configure script, the configure process
* will overwrite the distributed include/curl/curlbuild.h file with one that
* is suitable and specific to the library being configured and built, which
* is generated from the include/curl/curlbuild.h.in template file.
*
*/
/* ================================================================ */
/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */
/* ================================================================ */
#ifdef CURL_SIZEOF_LONG
#error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_SOCKLEN_T
#error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_SOCKLEN_T
#error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_OFF_T
#error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_T
#error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_TU
#error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined
#endif
#ifdef CURL_FORMAT_OFF_T
#error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_OFF_T
#error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_T
#error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_TU
#error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined
#endif
/* ================================================================ */
/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */
/* ================================================================ */
/* Configure process defines this to 1 when it finds out that system */
/* header file ws2tcpip.h must be included by the external interface. */
/* #undef CURL_PULL_WS2TCPIP_H */
#ifdef CURL_PULL_WS2TCPIP_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# include <winsock2.h>
# include <ws2tcpip.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/types.h must be included by the external interface. */
#define CURL_PULL_SYS_TYPES_H 1
#ifdef CURL_PULL_SYS_TYPES_H
# include <sys/types.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file stdint.h must be included by the external interface. */
/* #undef CURL_PULL_STDINT_H */
#ifdef CURL_PULL_STDINT_H
# include <stdint.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file inttypes.h must be included by the external interface. */
/* #undef CURL_PULL_INTTYPES_H */
#ifdef CURL_PULL_INTTYPES_H
# include <inttypes.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/socket.h must be included by the external interface. */
#define CURL_PULL_SYS_SOCKET_H 1
#ifdef CURL_PULL_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/poll.h must be included by the external interface. */
/* #undef CURL_PULL_SYS_POLL_H */
#ifdef CURL_PULL_SYS_POLL_H
# include <sys/poll.h>
#endif
/* The size of `long', as computed by sizeof. */
#define CURL_SIZEOF_LONG 8
/* Integral data type used for curl_socklen_t. */
#define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
/* The size of `curl_socklen_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_SOCKLEN_T 4
/* Data type definition of curl_socklen_t. */
typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t;
/* Signed integral data type used for curl_off_t. */
#define CURL_TYPEOF_CURL_OFF_T long
/* Data type definition of curl_off_t. */
typedef CURL_TYPEOF_CURL_OFF_T curl_off_t;
/* curl_off_t formatting string directive without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_T "ld"
/* unsigned curl_off_t formatting string without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_TU "lu"
/* curl_off_t formatting string directive with "%" conversion specifier. */
#define CURL_FORMAT_OFF_T "%ld"
/* The size of `curl_off_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_OFF_T 8
/* curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_T L
/* unsigned curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_TU UL
#endif /* __CURL_CURLBUILD_H */
#ifndef __CURL_CURLRULES_H
#define __CURL_CURLRULES_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* COMPILE TIME SANITY CHECKS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* All checks done in this file are intentionally placed in a public
* header file which is pulled by curl/curl.h when an application is
* being built using an already built libcurl library. Additionally
* this file is also included and used when building the library.
*
* If compilation fails on this file it is certainly sure that the
* problem is elsewhere. It could be a problem in the curlbuild.h
* header file, or simply that you are using different compilation
* settings than those used to build the library.
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* Do not deactivate any check, these are done to make sure that the
* library is properly built and used.
*
* You can find further help on the libcurl development mailing list:
* http://cool.haxx.se/mailman/listinfo/curl-library/
*
* NOTE 2
* ------
*
* Some of the following compile time checks are based on the fact
* that the dimension of a constant array can not be a negative one.
* In this way if the compile time verification fails, the compilation
* will fail issuing an error. The error description wording is compiler
* dependent but it will be quite similar to one of the following:
*
* "negative subscript or subscript is too large"
* "array must have at least one element"
* "-1 is an illegal array size"
* "size of array is negative"
*
* If you are building an application which tries to use an already
* built libcurl library and you are getting this kind of errors on
* this file, it is a clear indication that there is a mismatch between
* how the library was built and how you are trying to use it for your
* application. Your already compiled or binary library provider is the
* only one who can give you the details you need to properly use it.
*/
/*
* Verify that some macros are actually defined.
*/
#ifndef CURL_SIZEOF_LONG
# error "CURL_SIZEOF_LONG definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_SOCKLEN_T
# error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_SOCKLEN_T
# error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_OFF_T
# error "CURL_TYPEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_T
# error "CURL_FORMAT_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_TU
# error "CURL_FORMAT_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing
#endif
#ifndef CURL_FORMAT_OFF_T
# error "CURL_FORMAT_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_OFF_T
# error "CURL_SIZEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_T
# error "CURL_SUFFIX_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_TU
# error "CURL_SUFFIX_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing
#endif
/*
* Macros private to this header file.
*/
#define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1
#define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1
/*
* Verify that the size previously defined and expected for long
* is the same as the one reported by sizeof() at compile time.
*/
typedef char
__curl_rule_01__
[CurlchkszEQ(long, CURL_SIZEOF_LONG)];
/*
* Verify that the size previously defined and expected for
* curl_off_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_02__
[CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)];
/*
* Verify at compile time that the size of curl_off_t as reported
* by sizeof() is greater or equal than the one reported for long
* for the current compilation.
*/
typedef char
__curl_rule_03__
[CurlchkszGE(curl_off_t, long)];
/*
* Verify that the size previously defined and expected for
* curl_socklen_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_04__
[CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)];
/*
* Verify at compile time that the size of curl_socklen_t as reported
* by sizeof() is greater or equal than the one reported for int for
* the current compilation.
*/
typedef char
__curl_rule_05__
[CurlchkszGE(curl_socklen_t, int)];
/* ================================================================ */
/* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */
/* ================================================================ */
/*
* CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow
* these to be visible and exported by the external libcurl interface API,
* while also making them visible to the library internals, simply including
* curl_setup.h, without actually needing to include curl.h internally.
* If some day this section would grow big enough, all this should be moved
* to its own header file.
*/
/*
* Figure out if we can use the ## preprocessor operator, which is supported
* by ISO/ANSI C and C++. Some compilers support it without setting __STDC__
* or __cplusplus so we need to carefully check for them too.
*/
#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \
defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \
defined(__ILEC400__)
/* This compiler is believed to have an ISO compatible preprocessor */
#define CURL_ISOCPP
#else
/* This compiler is believed NOT to have an ISO compatible preprocessor */
#undef CURL_ISOCPP
#endif
/*
* Macros for minimum-width signed and unsigned curl_off_t integer constants.
*/
#if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551)
# define __CURL_OFF_T_C_HLPR2(x) x
# define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU)
#else
# ifdef CURL_ISOCPP
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix
# else
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix
# endif
# define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU)
#endif
/*
* Get rid of macros private to this header file.
*/
#undef CurlchkszEQ
#undef CurlchkszGE
/*
* Get rid of macros not intended to exist beyond this point.
*/
#undef CURL_PULL_WS2TCPIP_H
#undef CURL_PULL_SYS_TYPES_H
#undef CURL_PULL_SYS_SOCKET_H
#undef CURL_PULL_SYS_POLL_H
#undef CURL_PULL_STDINT_H
#undef CURL_PULL_INTTYPES_H
#undef CURL_TYPEOF_CURL_SOCKLEN_T
#undef CURL_TYPEOF_CURL_OFF_T
#ifdef CURL_NO_OLDIES
#undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */
#endif
#endif /* __CURL_CURLRULES_H */
#ifndef __CURL_CURLVER_H
#define __CURL_CURLVER_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* This header file contains nothing but libcurl version info, generated by
a script at release-time. This was made its own header file in 7.11.2 */
/* This is the global package copyright */
#define LIBCURL_COPYRIGHT "1996 - 2015 Daniel Stenberg, <daniel@haxx.se>."
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.47.0"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 7
#define LIBCURL_VERSION_MINOR 47
#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal (using 8 bits each). All three numbers are always represented
using two digits. 1.2 would appear as "0x010200" while version 9.11.7
appears as "0x090b07".
This 6-digit (24 bits) hexadecimal number does not show pre-release number,
and it is always a greater number in a more recent release. It makes
comparisons with greater than and less than work.
Note: This define is the full hex number and _does not_ use the
CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
#define LIBCURL_VERSION_NUM 0x072f00
/*
* This is the date and time when the full source package was created. The
* timestamp is not stored in git, as the timestamp is properly set in the
* tarballs by the maketgz script.
*
* The format of the date should follow this template:
*
* "Mon Feb 12 11:35:33 UTC 2007"
*/
#define LIBCURL_TIMESTAMP "Wed Jan 27 07:32:44 UTC 2016"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|z)
#define CURL_AT_LEAST_VERSION(x,y,z) \
(LIBCURL_VERSION_NUM >= CURL_VERSION_BITS(x, y, z))
#endif /* __CURL_CURLVER_H */
#ifndef __CURL_EASY_H
#define __CURL_EASY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN CURL *curl_easy_init(void);
CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
CURL_EXTERN CURLcode curl_easy_perform(CURL *curl);
CURL_EXTERN void curl_easy_cleanup(CURL *curl);
/*
* NAME curl_easy_getinfo()
*
* DESCRIPTION
*
* Request internal information from the curl session with this function. The
* third argument MUST be a pointer to a long, a pointer to a char * or a
* pointer to a double (as the documentation describes elsewhere). The data
* pointed to will be filled in accordingly and can be relied upon only if the
* function returns CURLE_OK. This function is intended to get used *AFTER* a
* performed transfer, all results from this function are undefined until the
* transfer is completed.
*/
CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...);
/*
* NAME curl_easy_duphandle()
*
* DESCRIPTION
*
* Creates a new curl session handle with the same options set for the handle
* passed in. Duplicating a handle could only be a matter of cloning data and
* options, internal state info and things like persistent connections cannot
* be transferred. It is useful in multithreaded applications when you can run
* curl_easy_duphandle() for each new thread to avoid a series of identical
* curl_easy_setopt() invokes in every thread.
*/
CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl);
/*
* NAME curl_easy_reset()
*
* DESCRIPTION
*
* Re-initializes a CURL handle to the default values. This puts back the
* handle to the same state as it was in when it was just created.
*
* It does keep: live connections, the Session ID cache, the DNS cache and the
* cookies.
*/
CURL_EXTERN void curl_easy_reset(CURL *curl);
/*
* NAME curl_easy_recv()
*
* DESCRIPTION
*
* Receives data from the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen,
size_t *n);
/*
* NAME curl_easy_send()
*
* DESCRIPTION
*
* Sends data over the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer,
size_t buflen, size_t *n);
#ifdef __cplusplus
}
#endif
#endif
#ifndef __CURL_MPRINTF_H
#define __CURL_MPRINTF_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <stdarg.h>
#include <stdio.h> /* needed for FILE */
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN int curl_mprintf(const char *format, ...);
CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...);
CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...);
CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength,
const char *format, ...);
CURL_EXTERN int curl_mvprintf(const char *format, va_list args);
CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args);
CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args);
CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength,
const char *format, va_list args);
CURL_EXTERN char *curl_maprintf(const char *format, ...);
CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args);
#ifdef _MPRINTF_REPLACE
# undef printf
# undef fprintf
# undef sprintf
# undef vsprintf
# undef snprintf
# undef vprintf
# undef vfprintf
# undef vsnprintf
# undef aprintf
# undef vaprintf
# define printf curl_mprintf
# define fprintf curl_mfprintf
# define sprintf curl_msprintf
# define vsprintf curl_mvsprintf
# define snprintf curl_msnprintf
# define vprintf curl_mvprintf
# define vfprintf curl_mvfprintf
# define vsnprintf curl_mvsnprintf
# define aprintf curl_maprintf
# define vaprintf curl_mvaprintf
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CURL_MPRINTF_H */
#ifndef __CURL_MULTI_H
#define __CURL_MULTI_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
This is an "external" header file. Don't give away any internals here!
GOALS
o Enable a "pull" interface. The application that uses libcurl decides where
and when to ask libcurl to get/send data.
o Enable multiple simultaneous transfers in the same thread without making it
complicated for the application.
o Enable the application to select() on its own file descriptors and curl's
file descriptors simultaneous easily.
*/
/*
* This header file should not really need to include "curl.h" since curl.h
* itself includes this file and we expect user applications to do #include
* <curl/curl.h> without the need for especially including multi.h.
*
* For some reason we added this include here at one point, and rather than to
* break existing (wrongly written) libcurl applications, we leave it as-is
* but with this warning attached.
*/
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void CURLM;
typedef enum {
CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or
curl_multi_socket*() soon */
CURLM_OK,
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_BAD_SOCKET, /* the passed in socket argument did not match */
CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */
CURLM_ADDED_ALREADY, /* an easy handle already added to a multi handle was
attempted to get added - again */
CURLM_LAST
} CURLMcode;
/* just to make code nicer when using curl_multi_socket() you can now check
for CURLM_CALL_MULTI_SOCKET too in the same style it works for
curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */
#define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM
/* bitmask bits for CURLMOPT_PIPELINING */
#define CURLPIPE_NOTHING 0L
#define CURLPIPE_HTTP1 1L
#define CURLPIPE_MULTIPLEX 2L
typedef enum {
CURLMSG_NONE, /* first, not used */
CURLMSG_DONE, /* This easy handle has completed. 'result' contains
the CURLcode of the transfer */
CURLMSG_LAST /* last, not used */
} CURLMSG;
struct CURLMsg {
CURLMSG msg; /* what this message means */
CURL *easy_handle; /* the handle it concerns */
union {
void *whatever; /* message-specific data */
CURLcode result; /* return code for transfer */
} data;
};
typedef struct CURLMsg CURLMsg;
/* Based on poll(2) structure and values.
* We don't use pollfd and POLL* constants explicitly
* to cover platforms without poll(). */
#define CURL_WAIT_POLLIN 0x0001
#define CURL_WAIT_POLLPRI 0x0002
#define CURL_WAIT_POLLOUT 0x0004
struct curl_waitfd {
curl_socket_t fd;
short events;
short revents; /* not supported yet */
};
/*
* Name: curl_multi_init()
*
* Desc: inititalize multi-style curl usage
*
* Returns: a new CURLM handle to use in all 'curl_multi' functions.
*/
CURL_EXTERN CURLM *curl_multi_init(void);
/*
* Name: curl_multi_add_handle()
*
* Desc: add a standard curl handle to the multi stack
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_remove_handle()
*
* Desc: removes a curl handle from the multi stack again
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_fdset()
*
* Desc: Ask curl for its fd_set sets. The app can use these to select() or
* poll() on. We want curl_multi_perform() called as soon as one of
* them are ready.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle,
fd_set *read_fd_set,
fd_set *write_fd_set,
fd_set *exc_fd_set,
int *max_fd);
/*
* Name: curl_multi_wait()
*
* Desc: Poll on all fds within a CURLM set as well as any
* additional fds passed to the function.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle,
struct curl_waitfd extra_fds[],
unsigned int extra_nfds,
int timeout_ms,
int *ret);
/*
* Name: curl_multi_perform()
*
* Desc: When the app thinks there's data available for curl it calls this
* function to read/write whatever there is right now. This returns
* as soon as the reads and writes are done. This function does not
* require that there actually is data available for reading or that
* data can be written, it can be called just in case. It returns
* the number of handles that still transfer data in the second
* argument's integer-pointer.
*
* Returns: CURLMcode type, general multi error code. *NOTE* that this only
* returns errors etc regarding the whole multi stack. There might
* still have occurred problems on invidual transfers even when this
* returns OK.
*/
CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle,
int *running_handles);
/*
* Name: curl_multi_cleanup()
*
* Desc: Cleans up and removes a whole multi stack. It does not free or
* touch any individual easy handles in any way. We need to define
* in what state those handles will be if this function is called
* in the middle of a transfer.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
/*
* Name: curl_multi_info_read()
*
* Desc: Ask the multi handle if there's any messages/informationals from
* the individual transfers. Messages include informationals such as
* error code from the transfer or just the fact that a transfer is
* completed. More details on these should be written down as well.
*
* Repeated calls to this function will return a new struct each
* time, until a special "end of msgs" struct is returned as a signal
* that there is no more to get at this point.
*
* The data the returned pointer points to will not survive calling
* curl_multi_cleanup().
*
* The 'CURLMsg' struct is meant to be very simple and only contain
* very basic informations. If more involved information is wanted,
* we will provide the particular "transfer handle" in that struct
* and that should/could/would be used in subsequent
* curl_easy_getinfo() calls (or similar). The point being that we
* must never expose complex structs to applications, as then we'll
* undoubtably get backwards compatibility problems in the future.
*
* Returns: A pointer to a filled-in struct, or NULL if it failed or ran out
* of structs. It also writes the number of messages left in the
* queue (after this read) in the integer the second argument points
* to.
*/
CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle,
int *msgs_in_queue);
/*
* Name: curl_multi_strerror()
*
* Desc: The curl_multi_strerror function may be used to turn a CURLMcode
* value into the equivalent human readable error string. This is
* useful for printing meaningful error messages.
*
* Returns: A pointer to a zero-terminated error message.
*/
CURL_EXTERN const char *curl_multi_strerror(CURLMcode);
/*
* Name: curl_multi_socket() and
* curl_multi_socket_all()
*
* Desc: An alternative version of curl_multi_perform() that allows the
* application to pass in one of the file descriptors that have been
* detected to have "action" on them and let libcurl perform.
* See man page for details.
*/
#define CURL_POLL_NONE 0
#define CURL_POLL_IN 1
#define CURL_POLL_OUT 2
#define CURL_POLL_INOUT 3
#define CURL_POLL_REMOVE 4
#define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD
#define CURL_CSELECT_IN 0x01
#define CURL_CSELECT_OUT 0x02
#define CURL_CSELECT_ERR 0x04
typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */
curl_socket_t s, /* socket */
int what, /* see above */
void *userp, /* private callback
pointer */
void *socketp); /* private socket
pointer */
/*
* Name: curl_multi_timer_callback
*
* Desc: Called by libcurl whenever the library detects a change in the
* maximum number of milliseconds the app is allowed to wait before
* curl_multi_socket() or curl_multi_perform() must be called
* (to allow libcurl's timed events to take place).
*
* Returns: The callback should return zero.
*/
typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */
long timeout_ms, /* see above */
void *userp); /* private callback
pointer */
CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle,
curl_socket_t s,
int ev_bitmask,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle,
int *running_handles);
#ifndef CURL_ALLOW_OLD_MULTI_SOCKET
/* This macro below was added in 7.16.3 to push users who recompile to use
the new curl_multi_socket_action() instead of the old curl_multi_socket()
*/
#define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z)
#endif
/*
* Name: curl_multi_timeout()
*
* Desc: Returns the maximum number of milliseconds the app is allowed to
* wait before curl_multi_socket() or curl_multi_perform() must be
* called (to allow libcurl's timed events to take place).
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle,
long *milliseconds);
#undef CINIT /* re-using the same name as in curl.h */
#ifdef CURL_ISOCPP
#define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num
#else
/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
#define LONG CURLOPTTYPE_LONG
#define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT
#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT
#define OFF_T CURLOPTTYPE_OFF_T
#define CINIT(name,type,number) CURLMOPT_/**/name = type + number
#endif
typedef enum {
/* This is the socket callback function pointer */
CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1),
/* This is the argument passed to the socket callback */
CINIT(SOCKETDATA, OBJECTPOINT, 2),
/* set to 1 to enable pipelining for this multi handle */
CINIT(PIPELINING, LONG, 3),
/* This is the timer callback function pointer */
CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4),
/* This is the argument passed to the timer callback */
CINIT(TIMERDATA, OBJECTPOINT, 5),
/* maximum number of entries in the connection cache */
CINIT(MAXCONNECTS, LONG, 6),
/* maximum number of (pipelining) connections to one host */
CINIT(MAX_HOST_CONNECTIONS, LONG, 7),
/* maximum number of requests in a pipeline */
CINIT(MAX_PIPELINE_LENGTH, LONG, 8),
/* a connection with a content-length longer than this
will not be considered for pipelining */
CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9),
/* a connection with a chunk length longer than this
will not be considered for pipelining */
CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10),
/* a list of site names(+port) that are blacklisted from
pipelining */
CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11),
/* a list of server types that are blacklisted from
pipelining */
CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12),
/* maximum number of open connections in total */
CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13),
/* This is the server push callback function pointer */
CINIT(PUSHFUNCTION, FUNCTIONPOINT, 14),
/* This is the argument passed to the server push callback */
CINIT(PUSHDATA, OBJECTPOINT, 15),
CURLMOPT_LASTENTRY /* the last unused */
} CURLMoption;
/*
* Name: curl_multi_setopt()
*
* Desc: Sets options for the multi handle.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle,
CURLMoption option, ...);
/*
* Name: curl_multi_assign()
*
* Desc: This function sets an association in the multi handle between the
* given socket and a private pointer of the application. This is
* (only) useful for curl_multi_socket uses.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle,
curl_socket_t sockfd, void *sockp);
/*
* Name: curl_push_callback
*
* Desc: This callback gets called when a new stream is being pushed by the
* server. It approves or denies the new stream.
*
* Returns: CURL_PUSH_OK or CURL_PUSH_DENY.
*/
#define CURL_PUSH_OK 0
#define CURL_PUSH_DENY 1
struct curl_pushheaders; /* forward declaration only */
CURL_EXTERN char *curl_pushheader_bynum(struct curl_pushheaders *h,
size_t num);
CURL_EXTERN char *curl_pushheader_byname(struct curl_pushheaders *h,
const char *name);
typedef int (*curl_push_callback)(CURL *parent,
CURL *easy,
size_t num_headers,
struct curl_pushheaders *headers,
void *userp);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif
#ifndef __STDC_HEADERS_H
#define __STDC_HEADERS_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <sys/types.h>
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);
int strcasecmp(const char *, const char *);
int strncasecmp(const char *, const char *, size_t);
#endif /* __STDC_HEADERS_H */
此差异已折叠。
......@@ -128,5 +128,18 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [培训和FAQ](https://www.taosdata.com/cn/faq)
- [FAQ](https://www.taosdata.com/cn/documentation20/faq):常见问题与答案
- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine
<ul>
<li><a l href="https://www.taosdata.com/blog/2020/12/25/2126.html">技术公开课:开源、高效的物联网大数据平台,TDengine内核技术剖析</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1941.html">TDengine视频教程-快速上手</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1945.html">TDengine视频教程-数据建模</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1961.html">TDengine视频教程-集群搭建</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1951.html">TDengine视频教程-Go Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1955.html">TDengine视频教程-JDBC Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1957.html">TDengine视频教程-NodeJS Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1963.html">TDengine视频教程-Python Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1965.html">TDengine视频教程-RESTful Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1959.html">TDengine视频教程-“零”代码运维监控</a></li>
<li><a l href="https://www.taosdata.com/cn/documentation20/faq">FAQ:常见问题与答案</a></li>
<li><a l href="https://www.taosdata.com/cn/blog/?categories=4"> 应用案例:一些使用实例来解释如何使用TDengine</a></li>
</ul>
......@@ -59,5 +59,3 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
|要求运维学习成本可控| | | √ |同上。|
|要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
## TDengine 性能指标介绍和验证方法
......@@ -8,14 +8,14 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```cmd
```mysql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
```
上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见<a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL</a>
创建库之后,需要使用SQL命令USE将当前库切换过来,例如:
```cmd
```mysql
USE power;
```
......@@ -28,7 +28,7 @@ USE power;
## 创建超级表
一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
```cmd
```mysql
CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
```
与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 <a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL </a>一节。
......@@ -59,3 +59,5 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
......@@ -161,7 +161,7 @@ SELECT function<field_name>,…
超级表聚合查询,TDengine目前支持以下聚合\选择函数:sum、count、avg、first、last、min、max、top、bottom,以及针对全部或部分列的投影操作,使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合,结果输出默认是按照时间戳单调递增输出,用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序;使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组,并对每个组内的数据分别进行聚合,输出结果为各个组的聚合结果,组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合,结果输出默认是按照时间戳单调递增输出,用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序;使用GROUP BY <tag_name> 的聚合查询会按照tags进行分组,并对每个组内的数据分别进行聚合,输出结果为各个组的聚合结果,组间的排序可以由ORDER BY <tag_name> 语句指定,每个分组内部,时间序列是单调递增的。
使用SLIMIT/SOFFSET语句指定组间分页,即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页,即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。
......@@ -178,7 +178,7 @@ CREATE TABLE thermometer (ts timestamp, degree double)
TAGS(location binary(20), type int)
```
假设有北京,天津和上海三个地区的采集器共4个,温度采集器有3种类型,我们就可以对每个采集器建表如下:
假设有北京,天津和上海三个地区的采集器共4个,温度采集器有3种类型,我们就可以对每个采集器建表如下:
```mysql
CREATE TABLE therm1 USING thermometer TAGS (’beijing’, 1);
......
# TDengine 2.0 错误码以及对应的十进制码
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
......
......@@ -60,7 +60,7 @@ create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s
会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,
并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如:
```shell
```mysql
taos> select * from avg_vol;
ts | avg_voltage_ |
===================================================
......@@ -72,14 +72,13 @@ taos> select * from avg_vol;
需要注意,查询时间窗口的最小值是10毫秒,没有时间窗口范围的上限。
此外,TDengine还支持用户指定连续查询的起止时间。
如果不输入开始时间,连续查询将从第一条原始数据所在的时间窗口开始;
如果没有输入结束时间,连续查询将永久运行;
如果用户指定了结束时间,连续查询在系统时间达到指定的时间以后停止运行。
比如使用下面的SQL创建的连续查询将运行一小时,之后会自动停止。
```sql
```mysql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
......
#数据模型和整体架构
# 数据模型和整体架构
## 数据模型
### 物联网典型场景
......@@ -102,7 +102,7 @@
每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表一中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
### 数据特征
### 数据特征
除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征:
1. 数据高度结构化;
......@@ -121,7 +121,7 @@
### 关系型数据库模型
因为采集的数据一般是结构化数据,同时为降低学习门槛,TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库,然后创建表,之后才能插入或查询数据。TDengine采用的是结构化存储,而不是NoSQL的key-value存储。
### 一个数据采集点一张表
### 一个数据采集点一张表
为充分利用其数据的时序性和其他数据特点,TDengine要求**对每个数据采集点单独建表**(比如有一千万个智能电表,就需创建一千万张表,上述表格中的d1001, d1002, d1003, d1004都需单独建表),用来存储这个采集点所采集的时序数据。这种设计有几大优点:
1. 能保证一个采集点的数据在存储介质上是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。
......@@ -150,7 +150,7 @@ TDengine 分布式架构的逻辑结构图如下:
<center> 图 1 TDengine架构示意图 </center>
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文<a href="https://www.taosdata.com/blog/2020/09/11/1824.html">《一篇文章说清楚TDengine的FQDN》</a>
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
......@@ -356,7 +356,7 @@ SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成
客户端在获取查询结果的时候,dnode的查询执行队列中的工作线程会等待vnode执行线程执行完成,才能将查询结果返回到请求的客户端。
### 按时间轴聚合、降采样、插值
### 按时间轴聚合、降采样、插值
时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。
......
......@@ -137,6 +137,16 @@ DROP DNODE "fqdn:port";
其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
<font color=green>**【注意】**</font>
- 一个数据节点一旦被drop之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成drop dnode操作之前,会将该dnode的数据迁移走。
- 请注意 drop dnode 和 停止taosd进程是两个不同的概念,不要混淆:因为删除dnode之前要执行迁移数据的操作,因此被删除的dnode必须保持在线状态。待删除操作结束之后,才能停止taosd进程。
- 一个数据节点被drop之后,其他节点都会感知到这个dnodeID的删除操作,任何集群中的节点都不会再接收此dnodeID的请求。
- dnodeID的是集群自动分配的,不得人工指定。它在生成时递增的,不会重复。
### 查看数据节点
执行CLI程序taos,使用root账号登录进TDengine系统,执行:
......@@ -216,3 +226,5 @@ SHOW MNODES;
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
......@@ -30,7 +30,7 @@ TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储
- master: 具有最新的数据,容许客户端往里写入数据,一个虚拟节点组,至多一个master.
- slave:与master是同步的,但不容许客户端往里写入数据,根据配置,可以容许客户端对其进行查询。
- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务
- unsynced: 节点处于非同步状态,比如虚拟节点刚启动、或与其他虚拟节点的连接出现故障等。处于该状态时,该虚拟节点既不能提供写入,也不能提供查询服务
- offline: 由于宕机或网络原因,无法访问到某虚拟节点时,其他虚拟节点将该虚拟节点标为离线。但请注意,该虚拟节点本身的状态可能是unsynced或其他,但不会是离线。
**Quorum:**
......@@ -83,10 +83,10 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced.
2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程
2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程
3. 如果master不存在,则会检查自己保存的各虚拟节点的状态信息与从另一节点接收到的是否一致,如果一致,说明节点组里状态已经稳定一致,则会触发选举流程。如果不一致,说明状态还没趋于一致,即使master不存在,也不进行选主。由于要求状态信息一致才进行选举,每个虚拟节点根据同样的信息,会选出同一个虚拟节点做master,无需投票表决。
4. 自己的状态是根据规则自己决定并修改的,并不需要其他节点同意,包括成为master。一个节点无权修改其他节点的状态。
5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version).
5. 如果一个虚拟节点检测到自己或其他虚拟节点的role发生改变,该节点会广播它自己保存的各个虚拟节点的状态信息(role和version)
具体的流程图如下:
......@@ -124,7 +124,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
如果一虚拟节点(vnode B) 处于unsynced状态,master存在(vnode A),而且其版本号比master的低,它将立即启动数据恢复流程。在理解恢复流程时,需要澄清几个关于文件的概念和处理规则。
1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mnode里的文件,数量是固定的,对应于acct, user, db, table等文件。
1. 每个文件(无论是archived data的file还是wal)都有一个index, 这需要应用来维护(vnode里,该index就是fileId*3 + 0/1/2, 对应data, head与last三个文件)。如果index为0,表示系统里最老的数据文件。对于mode里的文件,数量是固定的,对应于acct, user, db, table等文件。
2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。
3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。
4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则
......@@ -212,10 +212,10 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
相同之处:
- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程
- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的
- 三大流程一致:Raft里有Leader election, replication, safety,完全对应TDengine的选举、数据转发、数据恢复三个流程
- 节点状态定义一致:Raft里每个节点有Leader, Follower, Candidate三个状态,TDengine里是Master, Slave, Unsynced, Offline。多了一个offlince, 但本质上是一样的,因为offline是外界看一个节点的状态,但该节点本身是处于master, slave 或unsynced的
- 数据转发流程完全一样,Master(leader)需要等待回复确认。
- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步
- 数据恢复流程几乎一样,Raft没有涉及历史数据同步问题,只考虑了WAL数据同步
不同之处:
......@@ -226,7 +226,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
## Meta Data的数据复制
TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析
TDengine里存在时序数据,也存在Meta Data。Meta Data对数据的可靠性要求更高,那么TDengine设计能否满足要求呢?下面做个仔细分析
TDengine里Meta Data包括以下:
......
此差异已折叠。
......@@ -26,6 +26,7 @@ else
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
......
......@@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Subproject commit ec77d9049a719dabfd1a7c1122a209e201861944
Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册