提交 cc5c7882 编写于 作者: P Ping Xiao

Merge branch 'develop' into xiaoping/CI_update

##########################################################################################
# Customize file classifications. #
# Results from files under any classifier will be excluded from LGTM #
# statistics. #
##########################################################################################
##########################################################################################
# Use the `path_classifiers` block to define changes to the default classification of #
# files. #
##########################################################################################
path_classifiers:
# docs:
# Identify the top-level file called `generate_javadoc.py` as documentation-related.
test:
# Override LGTM's default classification of test files by excluding all files.
- exclude: /
# Classify all files in the top-level directories tests/ and testsuites/ as test code.
- tests
# - testsuites
# Classify all files with suffix `.test` as test code.
# Note: use only forward slash / as a path separator.
# Use ** to indicate an arbitrary parent path.
# Use * to indicate any sequence of characters excluding /.
# Always enclose the expression in double quotes if it includes *.
# - "**/*.test"
# Refine the classifications above by excluding files in test/util/.
# - exclude: test/util
# The default behavior is to tag all files created during the
# build as `generated`. Results are hidden for generated code. You can tag
# further files as being generated by adding them to the `generated` section.
generated:
# Exclude all `*.c` files under the `ui/` directory from classification as
# generated code.
# - exclude: ui/**/*.c
# By default, all files not checked into the repository are considered to be
# 'generated'.
# The default behavior is to tag library code as `library`. Results are hidden
# for library code. You can tag further files as being library code by adding them
# to the `library` section.
library:
- exclude: deps/
# The default behavior is to tag template files as `template`. Results are hidden
# for template files. You can tag further files as being template files by adding
# them to the `template` section.
template:
#- exclude: path/to/template/code/**/*.c
# Define your own category, for example: 'some_custom_category'.
some_custom_category:
# Classify all files in the top-level directory tools/ (or the top-level file
# called tools).
# - tools
#########################################################################################
# Use the `queries` block to change the default display of query results. #
#########################################################################################
# queries:
# Start by hiding the results of all queries.
# - exclude: "*"
# Then include all queries tagged 'security' and 'correctness', and with a severity of
# 'error'.
# - include:
# tags:
# - "security"
# - "correctness"
# severity: "error"
# Specifically hide the results of two queries.
# - exclude: cpp/use-of-goto
# - exclude: java/equals-on-unrelated-types
# Refine by including the `java/command-line-injection` query.
# - include: java/command-line-injection
#########################################################################################
# Define changes to the default code extraction process. #
# Each block configures the extraction of a single language, and modifies actions in a #
# named step. Every named step includes automatic default actions, #
# except for the 'prepare' step. The steps are performed in the following sequence: #
# prepare #
# after_prepare #
# configure (C/C++ only) #
# python_setup (Python only) #
# before_index #
# index #
##########################################################################################
#########################################################################################
# Environment variables available to the steps: #
#########################################################################################
# LGTM_SRC
# The root of the source tree.
# LGTM_WORKSPACE
# An existing (initially empty) folder outside the source tree.
# Used for temporary download and setup commands.
#########################################################################################
# Use the extraction block to define changes to the default code extraction process #
# for one or more languages. The settings for each language are defined in a child #
# block, with one or more steps. #
#########################################################################################
extraction:
# Define settings for C/C++ analysis
#####################################
cpp:
# The `prepare` step exists for customization on LGTM.com only.
prepare:
# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to
# # be installed.
packages:
- cmake
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# This step is useful for C/C++ analysis where you want to prepare the environment
# for the `configure` step without changing the default behavior for that step.
# after_prepare:
#- export GNU_MAKE=make
#- export GIT=true
# The `configure` step generates build configuration files which the `index` step
# then uses to build the codebase.
configure:
command:
- mkdir build
- cd build
- cmake ..
# - ./prepare_deps
# Optional step. You should add a `before_index` step if you need to run commands
# before the `index` step.
# before_index:
# - export BOOST_DIR=$LGTM_SRC/boost
# - export GTEST_DIR=$LGTM_SRC/googletest
# - export HUNSPELL_DIR=$LGTM_SRC/hunspell
# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp
# The `index` step builds the code and extracts information during the build
# process.
index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
build_command:
- cd build
- make
# - $GNU_MAKE -j2 -s
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Specify MSBuild settings
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /p:Platform=x64 /p:Configuration=Release
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration:
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform:
# Specify the MSBuild target. Default: rebuild.
# target:
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files.
# vstools_version: 10
# Define settings for C# analysis
##################################
# csharp:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
#index:
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./example-compile-all.sh
# By default, LGTM analyzes the code by building it. You can override this,
# and tell LGTM not to build the code. Beware that this can lead
# to less accurate results.
# buildless: true
# Specify .NET Core settings.
# dotnet:
# Specify additional arguments to `dotnet build`.
# Default: empty.
# arguments: "example_arg"
# Specify the version of .NET Core SDK to use.
# Default: The version installed on the build machine.
# version: 2.1
# Specify MSBuild settings.
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /P:WarningLevel=2
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration: release
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform: x86
# Specify the MSBuild target. Default: rebuild.
# target: notest
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files
# vstools_version: 10
# Specify additional options for the extractor,
# for example --fast to perform a faster extraction that produces a smaller
# database.
# extractor: "--fast"
# Define settings for Go analysis
##################################
# go:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
# index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./compile-all.sh
# Define settings for Java analysis
####################################
# java:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify Gradle settings.
# gradle:
# Specify the required Gradle version.
# Default: determined automatically.
# version: 4.4
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command: ./compile-all.sh
# Specify the Java version required to build the project.
# java_version: 11
# Specify whether to extract Java .properties files
# Default: false
# properties_files: true
# Specify Maven settings.
# maven:
# Specify the path (absolute or relative) of a Maven settings file to use.
# Default: Maven uses a settings file in the default location, if it exists.
# settings_file: /opt/share/settings.xml
# Specify the path of a Maven toolchains file.
# Default: Maven uses a toolchains file in the default location, if it exists.
# toolchains_file: /opt/share/toolchains.xml
# Specify the required Maven version.
# Default: the Maven version is determined automatically, where feasible.
# version: 3.5.2
# Specify how XML files should be extracted:
# all = extract all XML files.
# default = only extract XML files named `AndroidManifest.xml`, `pom.xml`, and `web.xml`.
# disabled = do not extract any XML files.
# xml_mode: all
# Define settings for JavaScript analysis
##########################################
# javascript:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify a list of files and folders to extract.
# Default: The project root directory.
# include:
# - src/js
# Specify a list of files and folders to exclude from extraction.
# exclude:
# - thirdparty/lib
# You can add additional file types for LGTM to extract, by mapping file
# extensions (including the leading dot) to file types. The usual
# include/exclude patterns apply, so, for example, `.jsm` files under
# `thirdparty/lib` will not be extracted.
# filetypes:
# ".jsm": "js"
# ".tmpl": "html"
# Specify a list of glob patterns to include/exclude files from extraction; this
# is applied on top of the include/exclude paths from above; patterns are
# processed in the same way as for path classifiers above.
# Default: include all files with known extensions (such as .js, .ts and .html),
# but exclude files ending in `-min.js` or `.min.js` and folders named `node_modules`
# or `bower_components`
# filters:
# exclude any *.ts files anywhere.
# - exclude: "**/*.ts"
# but include *.ts files under src/js/typescript.
# - include: "src/js/typescript/**/*.ts"
# Specify how TypeScript files should be extracted:
# none = exclude all TypeScript files.
# basic = extract syntactic information from TypeScript files.
# full = extract syntactic and type information from TypeScript files.
# Default: full.
# typescript: basic
# By default, LGTM doesn't extract any XML files. You can override this by
# using the `xml_mode` property and setting it to `all`.
# xml_mode: all
# Define settings for Python analysis
######################################
# python:
# # The `prepare` step exists for customization on LGTM.com only.
# # prepare:
# # # The `packages` section is valid for LGTM.com only. It names packages to
# # # be installed.
# # packages: libpng-dev
# # This step is useful for Python analysis where you want to prepare the
# # environment for the `python_setup` step without changing the default behavior
# # for that step.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# # This sets up the Python interpreter and virtual environment, ready for the
# # `index` step to extract the codebase.
# python_setup:
# # Specify packages that should NOT be installed despite being mentioned in the
# # requirements.txt file.
# # Default: no package marked for exclusion.
# exclude_requirements:
# - pywin32
# # Specify a list of pip packages to install.
# # If any of these packages cannot be installed, the extraction will fail.
# requirements:
# - Pillow
# # Specify a list of requirements text files to use to set up the environment,
# # or false for none. Default: any requirements.txt, test-requirements.txt,
# # and similarly named files identified in the codebase are used.
# requirements_files:
# - required-packages.txt
# # Specify a setup.py file to use to set up the environment, or false for none.
# # Default: any setup.py files identified in the codebase are used in preference
# # to any requirements text files.
# setup_py: new-setup.py
# # Override the version of the Python interpreter used for setup and extraction
# # Default: Python 3.
# version: 2
# # Optional step. You should add a `before_index` step if you need to run commands
# # before the `index` step.
# before_index:
# - antlr4 -Dlanguage=Python3 Grammar.g4
# # The `index` step extracts information from the files in the codebase.
# index:
# # Specify a list of files and folders to exclude from extraction.
# # Default: Git submodules and Subversion externals.
# exclude:
# - legacy-implementation
# - thirdparty/libs
# filters:
# - exclude: "**/documentation/examples/snippets/*.py"
# - include: "**/documentation/examples/test_application/*"
# include:
# - example/to/include
pipeline {
agent any
stages {
stage('build TDengine') {
steps {
sh '''cd ${WORKSPACE}
export TZ=Asia/Harbin
date
rm -rf ${WORKSPACE}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WORKSPACE}/debug'''
}
}
stage('test_tsim') {
parallel {
stage('test') {
steps {
sh '''cd ${WORKSPACE}/tests
#./test-all.sh smoke
sudo ./test-all.sh full'''
}
}
stage('test_crash_gen') {
steps {
sh '''cd ${WORKSPACE}/tests/pytest
sudo ./crash_gen.sh -a -p -t 4 -s 2000'''
}
}
stage('test_valgrind') {
steps {
sh '''cd ${WORKSPACE}/tests/pytest
sudo ./valgrind-test.sh 2>&1 > mem-error-out.log
grep \'start to execute\\|ERROR SUMMARY\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-mem-error-out.log
for memError in `grep \'ERROR SUMMARY\' uniq-mem-error-out.log | awk \'{print $4}\'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\\
More than our threshold! ## ${NC}"
travis_terminate $memError
fi
fi
done
grep \'start to execute\\|definitely lost:\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep \'definitely lost:\' uniq-definitely-lost-out.log | awk \'{print $7}\'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \\
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
travis_terminate $defiMemError
fi
fi
done'''
}
}
}
}
}
}
\ No newline at end of file
......@@ -2,7 +2,6 @@
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com)
......@@ -84,14 +83,22 @@ sudo dnf install -y maven
## Get the source codes
- github:
First of all, you may clone the source codes from github:
```bash
git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
The connectors for go & grafana have been moved to separated repositories,
so you should run this command in the TDengine directory to install them:
```bash
git submodule update --init --recursive
```
## Build TDengine
### On Linux platform
```bash
mkdir debug && cd debug
cmake .. && cmake --build .
......@@ -109,6 +116,34 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
If you use the Visual Studio 2019, please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows:
```
mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
# Quick Run
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd
......
......@@ -9,9 +9,7 @@ set -e
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
lib_link_dir="/usr/lib"
#install main path
install_main_dir="/usr/local/taos"
lib64_link_dir="/usr/lib64"
# Color setting
RED='\033[0;31m'
......@@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
function clean_driver() {
${csudo} rm -f /usr/lib/libtaos.so || :
}
function install_driver() {
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
#create install main dir and all sub dir
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
echo
echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}"
if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
echo
echo -e "${GREEN}TDengine client driver is successfully installed!${NC}"
else
echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}"
fi
}
install_driver
......@@ -119,7 +119,7 @@ WantedBy=multi-user.target
return nil
}
const version = "TDengine alert v2.0.0.1"
var version = "2.0.0.1s"
func main() {
var (
......@@ -133,7 +133,7 @@ func main() {
flag.Parse()
if showVersion {
fmt.Println(version)
fmt.Println("TDengine alert v" + version)
return
}
......
......@@ -6,9 +6,9 @@ set -e
# set parameters by default value
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
osType=linux # [linux | darwin | windows]
version=""
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
while getopts "h:c:o:" arg
while getopts "h:c:o:n:" arg
do
case $arg in
c)
......@@ -19,6 +19,10 @@ do
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
exit 0
......@@ -30,18 +34,27 @@ do
esac
done
if [ "$version" == "" ]; then
echo "Please input the correct version!"
exit 1
fi
startdir=$(pwd)
scriptdir=$(dirname $(readlink -f $0))
cd ${scriptdir}/cmd/alert
version=$(grep 'const version =' main.go | awk '{print $NF}')
version=${version%\"}
version=${version:1}
echo "cpuType=${cpuType}"
echo "osType=${osType}"
echo "version=${version}"
GOOS=${osType} GOARCH=${cpuType} go build
GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version}
mkdir -p TDengine-alert/driver
cp alert alert.cfg install_driver.sh ./TDengine-alert/.
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
chmod 777 ./TDengine-alert/install_driver.sh
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
rm -rf ./TDengine-alert
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/
......@@ -48,6 +48,7 @@ ENDIF ()
IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
......@@ -55,30 +56,38 @@ ENDIF ()
IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ADD_DEFINITIONS(-D_TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ADD_DEFINITIONS(-D_TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
......@@ -86,6 +95,7 @@ IF (TD_APLHINE)
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
MESSAGE(STATUS "aplhine is defined")
ENDIF ()
IF (TD_LINUX)
......@@ -95,12 +105,12 @@ IF (TD_LINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
IF (TD_NINGSI_60)
ADD_DEFINITIONS(-D_TD_NINGSI_60_)
ADD_DEFINITIONS(-D_TD_NINGSI_60)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
......@@ -118,6 +128,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-DDARWIN)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
......@@ -147,11 +158,13 @@ IF (TD_WINDOWS_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_WINDOWS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "windows64 is defined")
ENDIF ()
IF (TD_WINDOWS_32)
ADD_DEFINITIONS(-D_TD_WINDOWS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "windows32 is defined")
ENDIF ()
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
......
......@@ -10,31 +10,27 @@ ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
ENDIF ()
IF (NOT TD_GODLL)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
ENDIF ()
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .)
ENDIF ()
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSE ()
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.3.0")
SET(TD_VER_NUMBER "2.0.4.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......@@ -42,6 +42,12 @@ IF (DEFINED CPUTYPE)
ELSE ()
IF (TD_WINDOWS_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_LINUX_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_ARM_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_MIPS_32)
SET(TD_VER_CPUTYPE "x86")
ELSE ()
SET(TD_VER_CPUTYPE "x64")
ENDIF ()
......
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(z ${SRC})
ENDIF ()
\ No newline at end of file
IF (TD_WINDOWS)
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /WX-")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /WX-")
ENDIF()
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(z ${SRC})
......@@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
......
......@@ -11,7 +11,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__:安装、集群一秒搞定,无需分库分表,实时备份。标准SQL,支持JDBC, RESTful, 支持Python/Java/C/C++/Go, 与MySQL相似,零学习成本。
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
......
......@@ -30,13 +30,13 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版
- TDengine-alert-2.0.0-Linux-x64.tar.gz (8.1M)
目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which`命令来检测系统中是否存在`systemd`:
目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which systemctl`命令来检测系统中是否存在`systemd`:
```cmd
which systemd
which systemctl
```
如果系统中不存在`systemd`命令,请考虑[通过源码安装](#通过源码安装)TDengine。
如果系统中不存在`systemd`,请考虑[通过源码安装](#通过源码安装)TDengine。
具体的安装过程,请参见<a href="https://www.taosdata.com/blog/2019/08/09/566.html">TDengine多种安装包的安装和卸载</a>
......@@ -68,7 +68,7 @@ systemctl status taosd
taos
```
如果TDengine终端链接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端链接服务端失败的问题)。TDengine终端的提示符号如下:
如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下:
```cmd
taos>
......@@ -99,8 +99,8 @@ Query OK, 2 row(s) in set (0.001700s)
- -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_
- -h, --host: 指定服务的IP地址,默认为本地服务
- -s, --commands: 在不进入终端的情况下运行TDengine命令
- -u, -- user: 接TDengine服务器的用户名,缺省为root
- -p, --password: 接TDengine服务器的密码,缺省为taosdata
- -u, -- user: 接TDengine服务器的用户名,缺省为root
- -p, --password: 接TDengine服务器的密码,缺省为taosdata
- -?, --help: 打印出所有命令行参数
示例:
......
......@@ -16,13 +16,13 @@ Three different packages are provided, please pick up the one you like.
<li><a id='tdengine-deb' style='color:var(--b2)'>TDengine DEB package (1.7M)</a></li>
<li><a id='tdengine-tar' style='color:var(--b2)'>TDengine Tarball (3.0M)</a></li>
</ul>
For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd*, use the _which_ command.
For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd* package, use the _which systemctl_ command.
```cmd
which systemd
which systemctl
```
If the `systemd` command is not found, please [install from source code](#Install-from-Source).
If the `systemd` package is not found, please [install from source code](#Install-from-Source).
### Running TDengine
......
......@@ -19,7 +19,7 @@ CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4;
USE power;
```
就当前接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。
就当前接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。
**注意:**
......@@ -56,4 +56,4 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
## 多列模型 vs 单列模型
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
......@@ -196,7 +196,7 @@ TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进
**对外服务地址**:TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时,选项-h需要提供的就是publicIp。
**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图接secondIp。
**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图接secondIp。
dnode启动后,会获知集群的mnode IP列表,并且定时向mnode发送状态信息。
......@@ -245,4 +245,4 @@ vnode(虚拟数据节点)保存采集的时序数据,而且查询、计算都
**Note:**目前集群功能仅仅限于企业版
\ No newline at end of file
**Note:**目前集群功能仅仅限于企业版
......@@ -29,23 +29,9 @@ Query OK, 2 row(s) in set (0.001100s)
具体的查询语法请看<a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL </a>
## 多表聚合查询
物联网场景中,往往同一个类型的数据采集点有多个。TDengine采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时TDengine使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。
TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。
应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
<center> <img src="../assets/stable.png"> </center>
<center> 多表聚合查询原理图 </center>
1:应用将一个查询条件发往系统;2: taosc将超级表的名字发往 Meta Node(管理节点);3:管理节点将超级表所拥有的 vnode 列表发回 taosc;4:taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点;5:每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc;6:taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。
由于TDengine在vnode内将标签数据与时序数据分离存储,通过先在内存里过滤标签数据,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个vnode/dnode,聚合计算操作在多个vnode里并发进行,又进一步提升了聚合的速度。
对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
比如:在TAOS Shell,查找所有智能电表采集的电压平均值,并按照location分组
**示例1**:在TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照location分组
```mysql
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
......@@ -55,6 +41,18 @@ taos> SELECT AVG(voltage) FROM meters GROUP BY location;
Query OK, 2 row(s) in set (0.002136s)
```
**示例2**:在TAOS shell, 查找groupId为2的所有智能电表过去24小时的记录条数,电流的最大值
```mysql
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
cunt(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在<a href="https://www.taosdata.com/cn/documentation20/taos-sql/">TAOS SQL </a>一章,查询类操作都会注明是否支持超级表。
## 降采样查询、插值
物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每10秒钟求和
......@@ -66,9 +64,9 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
```
降采样操作也适用于超级表,比如:将所有智能电表采集的电流值每秒钟求和
降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s);
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
......@@ -78,6 +76,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。
......
......@@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>…
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
......
......@@ -33,8 +33,7 @@ taos> DESCRIBE meters;
- 内部函数now是服务器的当前时间
- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
......@@ -99,17 +98,18 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。
```mysql
ALTER DATABASE db_name QUORUM 365;
ALTER DATABASE db_name QUORUM 2;
```
QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
```mysql
ALTER DATABASE db_name BLOCKS 365;
ALTER DATABASE db_name BLOCKS 100;
```
BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库**
```mysql
SHOW DATABASES;
......@@ -124,7 +124,8 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
说明:
1) 表的第一个字段必须是TIMESTAMP,并且系统自动将其设为主键;
2) 表名最大长度为193;
3) 表的每行长度不能超过16k个字符;
3) 表的每行长度不能超过16k个字符;
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
5) 使用数据类型binary或nchar,需指定其最长的字节数,如binary(20),表示20字节;
- **删除数据表**
......@@ -298,7 +299,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[INTERVAL (interval_val [, interval_offset])]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
......@@ -971,17 +972,17 @@ TDengine支持按时间段进行聚合,可以将表中数据按照时间段进
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。
......@@ -1019,5 +1020,5 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
- 表名最大长度为193,每行数据最大长度16k个字符
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
# TDengine 2.0 错误码以及对应的十进制码
| Code | bit | error code | 错误描述 | 十进制错误码 |
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
......@@ -87,7 +87,7 @@
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
......@@ -107,7 +107,7 @@
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
......
......@@ -130,9 +130,25 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
- arbitrator: 系统中裁决器的end point,缺省为空
- arbitrator: 系统中裁决器的end point,缺省为空
- timezone、locale、charset 的配置见客户端配置。
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
```mysql
ALTER DNODE <dnode_id> <config>
```
- dnode_id: 可以通过SQL语句"SHOW DNODES"命令获取
- config: 要调整的日志参数,在如下列表中取值
> resetlog 截断旧日志文件,创建一个新日志文件
> debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143
例如:
```
alter dnode 1 debugFlag 135;
```
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
......@@ -233,6 +249,12 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```
ALTER USER <user_name> PRIVILEDGE <super|write|read>;
```
修改用户权限为:super/write/read,不需要添加单引号
```
SHOW USERS;
```
......@@ -386,5 +408,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
##
......@@ -39,7 +39,7 @@ create table D1002 using meters tags ("Beijing.Haidian", 2);
我们已经知道,可以通过下面这条SQL语句以一分钟为时间窗口、30秒为前向增量统计这些电表的平均电压。
```sql
select avg(voltage) from meters interval(1m) sliding(30s)
select avg(voltage) from meters interval(1m) sliding(30s);
```
每次执行这条语句,都会重新计算所有数据。
......@@ -47,14 +47,14 @@ select avg(voltage) from meters interval(1m) sliding(30s);
可以把上面的语句改进成下面的样子,每次使用不同的 `startTime` 并定期执行:
```sql
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s)
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
这样做没有问题,但TDengine提供了更简单的方法,
只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如:
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s)
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,
......@@ -80,7 +80,7 @@ taos> select * from avg_vol;
比如使用下面的SQL创建的连续查询将运行一小时,之后会自动停止。
```sql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s)
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
需要说明的是,上面例子中的 `now` 是指创建连续查询的时间,而不是查询执行的时间,否则,查询就无法自动停止了。
......@@ -396,7 +396,7 @@ ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian
```sql
# taos
taos> use power
taos> use power;
taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
```
......
......@@ -46,7 +46,7 @@ taos>
5. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 使用命令:
```
CREATE DNODE "h2.taos.com:6030"
CREATE DNODE "h2.taos.com:6030";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
......@@ -54,7 +54,7 @@ taos>
6. 使用命令
```
SHOW DNODES
SHOW DNODES;
```
查看新节点是否被成功加入。
......@@ -71,7 +71,7 @@ taos>
###添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port"
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。
......
......@@ -142,7 +142,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
获取最近一次API调用失败的原因,返回值为错误代码。
**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该接调用API,否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。
**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该接调用API,否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。
### 异步查询API
......
......@@ -21,7 +21,7 @@
## 5. 遇到错误"Unable to establish connection", 我怎么办?
客户端遇到接故障,请按照下面的步骤进行检查:
客户端遇到接故障,请按照下面的步骤进行检查:
1. 检查网络环境
* 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
......@@ -45,7 +45,7 @@
9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
检查客户端侧TCP端口接是否工作:`nc {hostIP} {port}`
检查客户端侧TCP端口接是否工作:`nc {hostIP} {port}`
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)
......@@ -57,7 +57,7 @@
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
2. 如果网络配置有DNS server, 请检查是否正常工作
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法接服务器的
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法接服务器的
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
......@@ -108,4 +108,8 @@ Connection = DriverManager.getConnection(url, properties);
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
......@@ -22,9 +22,10 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
**Tips:**
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为8M)。
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M)。
- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。
- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。
- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。
## Prometheus直接写入
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
......
......@@ -66,21 +66,21 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
数据实时复制有三个主要流程:选主、数据转发、数据恢复。后续做详细讨论。
## 虚拟节点之间的网络
## 虚拟节点之间的网络
虚拟节点之间通过TCP进行链接,节点之间的状态交换、数据包的转发都是通过这个TCP链接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP链接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP链接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该链接。
虚拟节点之间通过TCP进行连接,节点之间的状态交换、数据包的转发都是通过这个TCP连接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP连接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP连接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该连接。
一旦作为客户端的节点链接不成或中断,它将周期性的每隔一秒钟去试图去链接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。
一旦作为客户端的节点连接不成或中断,它将周期性的每隔一秒钟去试图去连接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。
如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP链接(syncFd)。数据恢复完成后,该链接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。
如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP连接(syncFd)。数据恢复完成后,该连接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。
任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的链接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将链接请求关闭。在TDengine代码里,mnode group的vgId设置为1。
任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的连接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将连接请求关闭。在TDengine代码里,mnode group的vgId设置为1。
## 选主流程
当同一组的两个虚拟节点之间(vnode A, vnode B)建立连接后,他们互换status消息。status消息里包含本地存储的同一虚拟节点组内所有虚拟节点的role和version。
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的链接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的链接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced.
2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程
......@@ -118,7 +118,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
9. 如果quorum为1,上述6,7,8步不会发生。
10. 如果要等待slave的确认,master会启动2秒的定时器(可配置),如果超时,则认为失败。
对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。
对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。
## 数据恢复流程
......@@ -142,9 +142,9 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
<center> <img src="../assets/replica-restore.png"> </center>
1. 通过已经建立的TCP接,发送sync req给master节点
2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP接(syncFd)
3. 新的TCP接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程
1. 通过已经建立的TCP接,发送sync req给master节点
2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP接(syncFd)
3. 新的TCP接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程
4. Retrieve/Restore流程里,先处理所有archived data (vnode里的data, head, last文件),后处理WAL data。
5. 对于archived data,master将通过回调函数getFileInfo获取数据文件的基本信息,包括文件名、magic以及文件大小。
6. master 将获得的文件名、magic以及文件大小发给vnode B
......@@ -157,7 +157,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
1. master节点调用回调函数getWalInfo,获取WAL的文件名。
2. 如果getWalInfo返回值大于0,表示该文件还不是最后一个WAL,因此master调用sendfile一下把该文件发送给vnode B
3. 如果getWalInfo返回时为0,表示该文件是最后一个WAL,因为文件可能还处于写的状态中,sync模块要根据WAL Head的定义逐条读出记录,然后发往vnode B。
4. vnode A读取TCP接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。
4. vnode A读取TCP接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。
5. 上述流程循环,直到所有WAL文件都被处理完。处理完后,master就会将新来的数据包通过Forward消息转发给slave。
从同步文件启动起,sync模块会通过inotify监控所有处理过的file以及wal。一旦发现被处理过的文件有更新变化,同步流程将中止,会重新启动。因为有可能落盘操作正在进行(比如历史数据导入,内存数据落盘),把已经处理过的文件进行了修改,需要重新同步才行。
......@@ -194,15 +194,15 @@ sync模块通过inotify监控LastWal文件的更新和关闭操作。而且在
因为写入失败,客户端会重新写入数据。但对于TDengine而言,是OK的。因为时序数据都是有时间戳的,时间戳相同的数据更新操作,第一次会执行,但第二次会自动扔掉。对于Meta Data(增加、删除库、表等等)的操作,也是OK的。一张表、库已经被创建或删除,再创建或删除,不会被执行的。
在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP链接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个链接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。
在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP连接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个连接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。
## Split Brain的问题
选举流程中,有个强制要求,那就是一定有超过半数的虚拟节点在线。但是如果replication正好是偶数,这个时候,完全可能存在splt brain问题。
为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的接请求,并保持它。
为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的接请求,并保持它。
在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立链接,但如果是偶数个副本,就会主动去建立链接。
在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立连接,但如果是偶数个副本,就会主动去建立连接。
Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系统时,会在bin目录生成。命令行参数“-?”查看可以配置的参数,比如绑定的IP地址,监听的端口号。
......
......@@ -13,7 +13,7 @@ taosd的启动入口是dnode模块,dnode然后启动其他模块,包括可
该模块负责taosd与taosc, 以及其他数据节点之间的通讯。TDengine没有采取标准的HTTP或gRPC等第三方工具,而是实现了自己的通讯模块RPC。
考虑到物联网场景下,数据写入的包一般不大,因此除支持TCP链接之外,RPC还支持UDP链接。当数据包小于15K时,RPC将采用UDP方式进行链接,否则将采用TCP链接。对于查询类的消息,RPC不管包的大小,总是采取TCP链接。对于UDP链接,RPC实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。
考虑到物联网场景下,数据写入的包一般不大,因此除支持TCP连接之外,RPC还支持UDP连接。当数据包小于15K时,RPC将采用UDP方式进行连接,否则将采用TCP连接。对于查询类的消息,RPC不管包的大小,总是采取TCP连接。对于UDP连接,RPC实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。
RPC模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数compressMsgSize, RPC在传输中将自动压缩数据,以节省带宽。
......@@ -25,7 +25,7 @@ RPC模块还提供数据压缩功能,如果数据包的字节数超过系统
- 系统的初始化,包括
- 从文件taos.cfg读取系统配置参数,从文件dnodeCfg.json读取数据节点的配置参数;
- 启动RPC模块,并建立起与taosc通讯的server链接,与其他数据节点通讯的server链接;
- 启动RPC模块,并建立起与taosc通讯的server连接,与其他数据节点通讯的server连接;
- 启动并初始化dnode的内部管理, 该模块将扫描该数据节点已有的vnode,并打开它们;
- 初始化可配置的模块,如mnode, http, monitor等。
- 数据节点的管理,包括
......
......@@ -57,6 +57,7 @@ cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
......
......@@ -64,6 +64,7 @@ cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/incl
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
......
......@@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-arbitrator"
install_dir="${release_dir}/TDengine-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/TDengine-arbitrator"
install_dir="${release_dir}/TDengine-arbitrator-${version}"
fi
# Directories and files.
......@@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-arbitrator"
install_dir="${release_dir}/PowerDB-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/PowerDB-arbitrator"
install_dir="${release_dir}/PowerDB-arbitrator-${version}"
fi
# Directories and files.
......@@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -32,9 +32,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-client"
install_dir="${release_dir}/TDengine-enterprise-client-${version}"
else
install_dir="${release_dir}/TDengine-client"
install_dir="${release_dir}/TDengine-client-${version}"
fi
# Directories and files.
......@@ -97,6 +97,8 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
......@@ -111,8 +113,9 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......@@ -122,9 +125,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -32,9 +32,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-client"
install_dir="${release_dir}/PowerDB-enterprise-client-${version}"
else
install_dir="${release_dir}/PowerDB-client"
install_dir="${release_dir}/PowerDB-client-${version}"
fi
# Directories and files.
......@@ -164,9 +164,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-server"
install_dir="${release_dir}/TDengine-enterprise-server-${version}"
else
install_dir="${release_dir}/TDengine-server"
install_dir="${release_dir}/TDengine-server-${version}"
fi
# Directories and files.
......@@ -113,6 +113,8 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
......@@ -122,10 +124,11 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......@@ -135,9 +138,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-server"
install_dir="${release_dir}/PowerDB-enterprise-server-${version}"
else
install_dir="${release_dir}/PowerDB-server"
install_dir="${release_dir}/PowerDB-server-${version}"
fi
# Directories and files.
......@@ -184,9 +184,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
......
......@@ -389,6 +389,7 @@ void balanceReset() {
pDnode->lastAccess = 0;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
mnodeDecDnodeRef(pDnode);
......@@ -551,6 +552,7 @@ static void balanceCheckDnodeAccess() {
if (tsAccessSquence - pDnode->lastAccess > 3) {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state", pDnode->dnodeId);
balanceSetVgroupOffline(pDnode);
}
......
......@@ -69,17 +69,13 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
char intervalTimeUnit;
char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SInterval interval;
SLimitVal limit; // limit info
uint64_t uid; // query meter uid
uint64_t uid; // query table uid
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
......@@ -195,6 +191,7 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
void tscDequoteAndTrimToken(SStrToken* pToken);
int32_t tscValidateName(SStrToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
......@@ -261,6 +258,8 @@ void tscDoQuery(SSqlObj* pSql);
*/
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
......
......@@ -29,6 +29,7 @@ extern "C" {
#include "tglobal.h"
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "qExecutor.h"
#include "qSqlparser.h"
......@@ -88,7 +89,7 @@ typedef struct STableComInfo {
typedef struct SCMCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t inUse;
int8_t numOfEps;
SEpAddr epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo;
......@@ -98,6 +99,7 @@ typedef struct STableMeta {
uint8_t tableType;
int16_t sversion;
int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo;
STableId id;
......@@ -105,7 +107,7 @@ typedef struct STableMeta {
} STableMeta;
typedef struct STableMetaInfo {
STableMeta * pTableMeta; // table meta, cached in client side and acquired by name
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
......@@ -225,13 +227,8 @@ typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
// TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
int64_t intervalOffset;// start offset of each time window
int32_t tz; // query client timezone
SInterval interval;
SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray<SColumn*>
......@@ -263,7 +260,7 @@ typedef struct {
};
int32_t insertType;
int32_t clauseIndex; // index of multiple subclause query
int32_t clauseIndex; // index of multiple subclause query
char * curSql; // current sql, resume position of sql after parsing paused
int8_t parseFinished;
......@@ -278,7 +275,8 @@ typedef struct {
int32_t numOfParams;
int8_t dataSourceType; // load data from file or not
int8_t submitSchema; // submit block is built with table schema
int8_t submitSchema; // submit block is built with table schema
STagData tagData;
SHashObj *pTableList; // referred table involved in sql
SArray *pDataBlocks; // SArray<STableDataBlocks*> submit data blocks after parsing sql
} SSqlCmd;
......@@ -333,6 +331,7 @@ typedef struct STscObj {
struct SSqlStream *streamList;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE()
} STscObj;
typedef struct SSqlObj {
......@@ -359,6 +358,8 @@ typedef struct SSqlObj {
uint16_t numOfSubs;
struct SSqlObj **pSubs;
struct SSqlObj * prev, *next;
struct SSqlObj **self;
} SSqlObj;
typedef struct SSqlStream {
......@@ -366,8 +367,6 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
......@@ -381,8 +380,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t intervalTime;
int64_t slidingTime;
SInterval interval;
void * pTimer;
void (*fp)();
......@@ -413,7 +411,6 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
......@@ -425,17 +422,19 @@ void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pObj);
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/**
* free sql object, release allocated resource
* @param pObj Free metric/meta information, dynamically allocated payload, and
* response buffer, object itself
* @param pObj
*/
void tscFreeSqlObj(SSqlObj *pObj);
void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeSqlObjInCache(void *pSql);
void tscCloseTscObj(STscObj *pObj);
......@@ -451,9 +450,6 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
// todo remove this function.
bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
......@@ -468,7 +464,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
......@@ -502,7 +498,8 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
}
}
extern void * tscCacheHandle;
extern SCacheObj* tscMetaCache;
extern SCacheObj* tscObjCache;
extern void * tscTmr;
extern void * tscQhandle;
extern int tscKeepConn[];
......
......@@ -149,7 +149,7 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex,
jstring optionValue) {
if (optionValue == NULL) {
jniDebug("option index:%d value is null", optionIndex);
jniDebug("option index:%d value is null", (int32_t)optionIndex);
return 0;
}
......@@ -183,7 +183,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
}
(*env)->ReleaseStringUTFChars(env, optionValue, tz1);
} else {
jniError("option index:%d is not found", optionIndex);
jniError("option index:%d is not found", (int32_t)optionIndex);
}
return res;
......@@ -227,10 +227,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, jport);
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
} else {
jniDebug("jobj:%p, conn:%p, connect to database succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, jport);
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
}
if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host);
......@@ -385,7 +385,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
}
jint ret = taos_affected_rows((SSqlObj *)res);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, ret);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret);
return ret;
}
......@@ -490,13 +490,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1));
break;
case TSDB_DATA_TYPE_TINYINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i]));
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i]));
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int32_t *)row[i]);
break;
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
......
......@@ -18,6 +18,7 @@
#include "tnote.h"
#include "trpc.h"
#include "tcache.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscLocalMerge.h"
......@@ -40,6 +41,8 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const char* sqlstr, size_t sqlLen) {
SSqlCmd* pCmd = &pSql->cmd;
pSql->signature = pSql;
pSql->param = param;
pSql->pTscObj = pObj;
......@@ -48,6 +51,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->fp = fp;
pSql->fetchFp = fp;
registerSqlObj(pSql);
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
......@@ -59,7 +64,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr;
pCmd->curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
......@@ -69,7 +74,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
tscQueueAsyncRes(pSql);
return;
}
tscDoQuery(pSql);
}
......@@ -504,7 +509,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
goto _error;
}
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
if (pCmd->insertType == TSDB_QUERY_TYPE_STMT_INSERT) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
......
......@@ -117,6 +117,10 @@ typedef struct SFirstLastInfo {
typedef struct SFirstLastInfo SLastrowInfo;
typedef struct SPercentileInfo {
tMemBucket *pMemBucket;
int32_t stage;
double minval;
double maxval;
int64_t numOfElems;
} SPercentileInfo;
typedef struct STopBotInfo {
......@@ -302,7 +306,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_PERCT) {
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
*bytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(SPercentileInfo);
} else if (functionId == TSDB_FUNC_LEASTSQR) {
*type = TSDB_DATA_TYPE_BINARY;
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
......@@ -322,7 +326,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_LAST_ROW) {
*type = (int16_t)dataType;
*bytes = (int16_t)dataBytes;
*interBytes = dataBytes + sizeof(SLastrowInfo);
*interBytes = dataBytes;
} else {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -521,7 +525,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
*retVal += pCtx->preAggVals.statis.sum;
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = (double*) pCtx->aOutputBuf;
*retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum));
*retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum));
}
} else { // computing based on the true data block
void *pData = GET_INPUT_CHAR(pCtx);
......@@ -707,13 +711,16 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else { // data in current block is not earlier than current result
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else { // data in current block is not earlier than current result
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
......@@ -726,12 +733,16 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else {
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////
......@@ -757,7 +768,7 @@ static void avg_function(SQLFunctionCtx *pCtx) {
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
*pVal += pCtx->preAggVals.statis.sum;
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
*pVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum));
*pVal += GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.sum));
}
} else {
void *pData = GET_INPUT_CHAR(pCtx);
......@@ -1832,8 +1843,10 @@ static void last_row_function(SQLFunctionCtx *pCtx) {
pInfo1->hasResult = DATA_SET_FLAG;
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts);
} else {
DO_UPDATE_TAG_COLUMNS(pCtx, pCtx->ptsList[pCtx->size - 1]);
}
SET_VAL(pCtx, pCtx->size, 1);
}
......@@ -2428,12 +2441,14 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
if (!function_setup(pCtx)) {
return false;
}
// in the first round, get the min-max value of all involved data
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
pInfo->minval = DBL_MAX;
pInfo->maxval = -DBL_MAX;
pInfo->numOfElems = 0;
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
return true;
}
......@@ -2442,7 +2457,65 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
// the first stage, only acquire the min/max value
if (pInfo->stage == 0) {
if (pCtx->preAggVals.isSet) {
if (pInfo->minval > pCtx->preAggVals.statis.min) {
pInfo->minval = (double)pCtx->preAggVals.statis.min;
}
if (pInfo->maxval < pCtx->preAggVals.statis.max) {
pInfo->maxval = (double)pCtx->preAggVals.statis.max;
}
pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull);
} else {
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue;
}
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(data);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(data);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(data));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(data);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(data);
break;
default:
v = GET_INT32_VAL(data);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
}
}
return;
}
// the second stage, calculate the true percentile value
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
......@@ -2462,10 +2535,47 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return;
}
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(pData);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(pData);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(pData));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(pData);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(pData);
break;
default:
v = GET_INT32_VAL(pData);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
return;
}
tMemBucketPut(pInfo->pMemBucket, pData, 1);
SET_VAL(pCtx, 1, 1);
......@@ -2488,6 +2598,23 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
static void percentile_next_step(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
}
pInfo->stage += 1;
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
} else {
pResInfo->complete = true;
}
}
//////////////////////////////////////////////////////////////////////////////////
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
......@@ -3389,12 +3516,12 @@ static void spread_function(SQLFunctionCtx *pCtx) {
pInfo->max = (double)pCtx->preAggVals.statis.max;
}
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
if (pInfo->min > GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min))) {
pInfo->min = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min));
if (pInfo->min > GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min))) {
pInfo->min = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min));
}
if (pInfo->max < GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max))) {
pInfo->max = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max));
if (pInfo->max < GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max))) {
pInfo->max = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max));
}
}
......@@ -4513,7 +4640,7 @@ SQLAggFuncElem aAggs[] = {{
percentile_function_setup,
percentile_function,
percentile_function_f,
no_next_step,
percentile_next_step,
percentile_finalizer,
noop1,
noop1,
......
此差异已折叠。
......@@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
}
}
......@@ -472,10 +471,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
return;
}
tscDebug("%p start to free local reducer", pSql);
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
tscDebug("%p local reducer has been freed, abort", pSql);
return;
}
......@@ -553,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
// primary timestamp column is involved in final result
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
......@@ -570,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
orderIdx[i] = startCols++;
}
if (pQueryInfo->intervalTime != 0) {
if (pQueryInfo->interval.interval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
......@@ -614,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
assert(pQueryInfo->interval.interval > 0);
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
assert(pQueryInfo->interval.interval == 0);
}
// only one row exists
......@@ -827,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
......@@ -841,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
......@@ -1222,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
#endif
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
......@@ -1260,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int8_t precision = tinfo.precision;
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}
......
......@@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -406,7 +406,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
return TSDB_CODE_SUCCESS;
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, SSqlCmd* pCmd,
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken = {0};
......@@ -426,12 +426,17 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
*str += index;
if (sToken.type == TK_QUESTION) {
if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
*code = tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
return -1;
}
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) {
continue;
}
strcpy(error, "client out of memory");
strcpy(pCmd->payload, "client out of memory");
*code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return -1;
}
......@@ -439,8 +444,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
tscSQLSyntaxErrMsg(error, "invalid data or symbol", sToken.z);
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
*code = tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
return -1;
}
......@@ -470,14 +474,14 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1; // NOTE: here 0 mean error!
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z);
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_TSC_INVALID_TIME_STAMP;
return -1;
}
......@@ -522,11 +526,11 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
SParsedDataColInfo *spd, SSqlCmd* pCmd, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken;
int16_t numOfRows = 0;
int32_t numOfRows = 0;
SSchema *pSchema = tscGetTableSchema(pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
......@@ -534,8 +538,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
int32_t precision = tinfo.precision;
if (spd->hasVal[0] == false) {
strcpy(error, "primary timestamp column can not be null");
*code = TSDB_CODE_TSC_INVALID_SQL;
*code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", *str);
return -1;
}
......@@ -547,17 +550,17 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
int32_t retcode = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(error, "client out of memory");
*code = retcode;
*code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (*code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(pCmd->payload, "client out of memory");
return -1;
}
ASSERT(tSize > maxRows);
maxRows = tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, pCmd, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
......@@ -568,7 +571,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
sToken = tStrGetToken(*str, &index, false, 0, NULL);
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(error, ") expected", *str);
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
}
......@@ -577,7 +580,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
}
if (numOfRows <= 0) {
strcpy(error, "no any data points");
strcpy(pCmd->payload, "no any data points");
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
} else {
......@@ -704,7 +707,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf);
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd, &code, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
return code;
......@@ -724,10 +727,6 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
dataBuf->numOfTables = 1;
/*
* the value of pRes->numOfRows does not affect the true result of AFFECTED ROWS,
* which is actually returned from server.
*/
*totalNum += numOfRows;
return TSDB_CODE_SUCCESS;
}
......@@ -791,7 +790,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
sql += index;
tscAllocPayload(pCmd, sizeof(STagData));
STagData *pTag = (STagData *) pCmd->payload;
STagData *pTag = &pCmd->tagData;
memset(pTag, 0, sizeof(STagData));
......@@ -946,7 +945,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
pTag->dataLen = htonl(pTag->dataLen);
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
......@@ -1192,8 +1190,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
str += index;
if (TK_STRING == sToken.type) {
strdequote(sToken.z);
sToken.n = (uint32_t)strtrim(sToken.z);
tscDequoteAndTrimToken(&sToken);
}
if (sToken.type == TK_RP) {
......@@ -1460,8 +1457,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *lineptr = line;
strtolower(line, line);
int32_t len =
tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, tinfo.precision, &code, tokenBuf);
int32_t len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd, tinfo.precision, &code, tokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code;
break;
......
......@@ -43,10 +43,6 @@ typedef struct SNormalStmt {
tVariant* params;
} SNormalStmt;
//typedef struct SInsertStmt {
//
//} SInsertStmt;
typedef struct STscStmt {
bool isInsert;
STscObj* taos;
......@@ -54,7 +50,6 @@ typedef struct STscStmt {
SNormalStmt normal;
} STscStmt;
static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_t len) {
uint16_t size = stmt->numParts + 1;
if (size > stmt->sizeParts) {
......@@ -514,7 +509,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
pSql->param = (void*) pSql;
......@@ -545,7 +539,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pSql->cmd.numOfParams = 0;
pSql->cmd.batchSize = 0;
registerSqlObj(pSql);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
......@@ -574,7 +570,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
free(normal->sql);
}
tscFreeSqlObj(pStmt->pSql);
taos_free_result(pStmt->pSql);
free(pStmt);
return TSDB_CODE_SUCCESS;
}
......
......@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->intervalTime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
pSdesc->interval = htobe64(pStream->interval.interval);
pHeartbeat->numOfStreams++;
pSdesc++;
......
此差异已折叠。
......@@ -170,6 +170,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableId, pTableMetaMsg->sTableId, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -78,6 +78,9 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_LOCALMERGE, "retrieve-localmerge" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
......
......@@ -174,6 +174,7 @@ extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag;
extern int32_t qDebugFlag;
extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
......
......@@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
文件模式从 100755 更改为 100644
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册