提交 1b3f4202 编写于 作者: sangshuduo's avatar sangshuduo

merge with master branch.

version: 1.0.{build}
os: Visual Studio 2015
image:
- Visual Studio 2015
- macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
matrix:
exclude:
- image: macos
ARCH: x86
for:
-
matrix:
only:
- image: Visual Studio 2015
clone_folder: c:\dev\TDengine
clone_depth: 1
clone_folder: c:\dev\TDengine
clone_depth: 1
init:
init:
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
before_build:
before_build:
- cd c:\dev\TDengine
- md build
build_script:
build_script:
- cd build
- cmake -G "NMake Makefiles" ..
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
- nmake install
-
matrix:
only:
- image: macos
clone_depth: 1
build_script:
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
on_build_success: true
on_build_failure: true
on_build_status_changed: true
---
kind: pipeline
name: test_amd64
platform:
os: linux
arch: amd64
steps:
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm64
platform:
os: linux
arch: arm64
steps:
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm
platform:
os: linux
arch: arm
steps:
- name: build
image: arm32v7/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_trusty
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:trusty
commands:
- apt-get update
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_xenial
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:xenial
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_bionic
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:bionic
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_centos7
platform:
os: linux
arch: amd64
steps:
- name: build
image: ansible/centos7-ansible
commands:
- yum install -y gcc gcc-c++ make cmake
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: goodbye
platform:
os: linux
arch: amd64
steps:
- name: 64-bit
image: alpine
commands:
- echo 64-bit is good.
when:
branch:
- develop
- master
depends_on:
- test_arm64
- test_amd64
\ No newline at end of file
[submodule "src/connector/go"]
path = src/connector/go
url = https://github.com/taosdata/driver-go
url = git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin
url = git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
url = git@github.com:taosdata/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
#
# Configuration
#
#
# Build Matrix
#
branches:
only:
- master
- develop
- coverity_scan
- /^.*ci-.*$/
matrix:
- os: linux
dist: focal
language: c
git:
- depth: 1
compiler: gcc
env: DESC="linux/gcc build and test"
addons:
apt:
packages:
- build-essential
- cmake
- net-tools
- python3-pip
- python3-setuptools
- valgrind
- psmisc
- unixodbc
- unixodbc-dev
- mono-complete
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
after_success:
- travis_wait 20
- |-
case $TRAVIS_OS_NAME in
linux)
cd ${TRAVIS_BUILD_DIR}/debug
make install > /dev/null || travis_terminate $?
py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
pip3 install psutil
pip3 install guppy3
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs || travis_terminate $?
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
sleep 5
mono taosdemo -Q DEFAULT -y || travis_terminate $?
pkill -KILL -x taosd
fuser -k -n tcp 6030
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh smoke || travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
pkill -TERM -x taosd
fuser -k -n tcp 6030
sleep 1
./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
sleep 1
cd ${TRAVIS_BUILD_DIR}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
sleep 1
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
More than our threshold! ## ${NC}"
travis_terminate $memError
fi
fi
done
grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
travis_terminate $defiMemError
fi
fi
done
;;
esac
- os: linux
dist: bionic
language: c
compiler: gcc
env: COVERITY_SCAN=true
git:
- depth: 1
script:
- echo "this job is for coverity scan"
addons:
coverity_scan:
# GitHub project metadata
# ** specific to your project **
project:
name: TDengine
version: 2.x
description: TDengine
# Where email notification of build analysis results will be sent
notification_email: sdsang@taosdata.com, slguan@taosdata.com
# Commands to prepare for build_command
# ** likely specific to your build **
build_command_prepend: cmake . > /dev/null
# The command that will be added as an argument to "cov-build" to compile your project for analysis,
# ** likely specific to your build **
build_command: make
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
# Take care in resource usage, and consider the build frequency allowances per
# https://scan.coverity.com/faq#frequency
branch_pattern: coverity_scan
- os: linux
dist: trusty
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- binutils-2.26
- unixodbc
- unixodbc-dev
env:
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
- os: linux
dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
- os: linux
arch: arm64
dist: bionic
language: c
compiler: clang
env: DESC="arm64 linux/clang build"
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: linux
arch: arm64
dist: xenial
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
- unixodbc
- unixodbc-dev
env:
- DESC="arm64 xenial build"
before_script:
- export TZ=Asia/Harbin
- date
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
cmake .. -DCPUTYPE=aarch64 > /dev/null;
else
cmake .. > /dev/null;
fi
- make > /dev/null
- os: osx
osx_image: xcode11.4
language: c
compiler: clang
env: DESC="mac/clang build"
git:
- depth: 1
addons:
homebrew:
- cmake
- unixodbc
script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null
......@@ -94,6 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/
'''
return 1
}
......
[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
......
......@@ -57,24 +57,28 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
ENDIF ()
ENDIF ()
IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
......@@ -82,21 +86,23 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ADD_DEFINITIONS(-D_TD_MIPS_)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32_)
ADD_DEFINITIONS(-D_TD_MIPS_)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
......@@ -117,7 +123,11 @@ IF (TD_LINUX)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-O3 -Wno-error")
IF (${COVER} MATCHES "true")
......@@ -137,8 +147,12 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
......@@ -156,7 +170,11 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
ELSE ()
SET(DEBUG_FLAGS "/Zi /W3 /GL")
ENDIF ()
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
......
......@@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
IF (TD_MVN_INSTALLED)
IF (TD_BUILD_JDBC)
FIND_PROGRAM(TD_MVN_INSTALLED mvn)
IF (TD_MVN_INSTALLED)
MESSAGE(STATUS "MVN is installed and JDBC will be compiled")
ELSE ()
ELSE ()
MESSAGE(STATUS "MVN is not installed and JDBC is not compiled")
ENDIF ()
ENDIF ()
#
......@@ -32,13 +34,14 @@ ENDIF ()
#
# Set compiler options
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
# SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
IF (${CMAKE_BUILD_TYPE} MATCHES "Debug")
SET(CMAKE_BUILD_TYPE "Debug")
......
......@@ -72,3 +72,14 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
SET(TD_RANDOM_NETWORK_FAIL TRUE)
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()
SET(TD_BUILD_JDBC TRUE)
IF (${BUILD_JDBC} MATCHES "false")
SET(TD_BUILD_JDBC FALSE)
ENDIF ()
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
ENDIF ()
......@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.31.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
......@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_ARM_64 TRUE)
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(CPUTYPE "mips64")
MESSAGE(STATUS "Set CPUTYPE to mips64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_MIPS_64 TRUE)
ENDIF ()
ELSE ()
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.19.0")
SET(TD_VER_NUMBER "2.0.20.8")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -18,3 +18,16 @@ ENDIF ()
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
ENDIF ()
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
......@@ -399,27 +399,22 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
#### Linux
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装:
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装:
`pip install src/connector/python/linux/python2/`
`pip install src/connector/python/`
`pip3 install src/connector/python/linux/python3/`
`pip3 install src/connector/python/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python2\
```
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python3\
cd C:\TDengine\connector\python
python -m pip install .
```
* 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
......
......@@ -64,7 +64,7 @@
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 8640000
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
......
......@@ -58,11 +58,50 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
else
echo "grafanaplugin bundled directory not found!"
exit 1
fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
install_user_local_path="/usr/local"
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
......
......@@ -5,7 +5,7 @@
set -e
#set -x
# releash.sh -v [cluster | edge]
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
......@@ -22,11 +22,12 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
allocator=glibc # [glibc | jemalloc]
dbName=taos # [taos | power]
verNumber=""
verNumberComp="2.0.0.0"
while getopts "hv:V:c:o:l:s:d:n:m:" arg
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
do
case $arg in
v)
......@@ -53,6 +54,10 @@ do
#echo "dbName=$OPTARG"
dbName=$(echo $OPTARG)
;;
a)
#echo "allocator=$OPTARG"
allocator=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
......@@ -71,6 +76,7 @@ do
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
echo " -d [taos | power] "
echo " -n [version number] "
......@@ -84,7 +90,7 @@ do
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd)
......@@ -180,12 +186,18 @@ else
fi
cd ${compile_dir}
if [[ "$allocator" == "jemalloc" ]]; then
allocator_macro="-DJEMALLOC_ENABLED=true"
else
allocator_macro=""
fi
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} ${allocator_macro}
else
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"
......
%define homepath /usr/local/taos
%define userlocalpath /usr/local
%define cfg_install_dir /etc/taos
%define __strip /bin/true
......@@ -66,13 +67,62 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig
mkdir -p %{buildroot}%{userlocalpath}/include
mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share
mkdir -p %{buildroot}%{userlocalpath}/share/doc
mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc
mkdir -p %{buildroot}%{userlocalpath}/share/man
mkdir -p %{buildroot}%{userlocalpath}/share/man/man3
cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/
if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then
cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/bin/jeprof ]; then
cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/
fi
if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then
cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/
ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so
fi
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/
fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/
fi
if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/
fi
if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then
cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/
fi
fi
#Scripts executed before installation
%pre
csudo=""
......
......@@ -227,6 +227,52 @@ function install_lib() {
${csudo} ldconfig
}
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
if [ -d ${jemalloc_dir} ]; then
${csudo} /usr/bin/install -c -d /usr/local/bin
if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${jemalloc_dir}/bin/jeprof ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
fi
if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
${csudo} /usr/bin/install -c -d /usr/local/lib
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo} /usr/bin/install -c -d /usr/local/lib
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
......@@ -607,6 +653,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......@@ -630,6 +677,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......@@ -655,6 +703,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
......@@ -773,6 +822,7 @@ function update_TDengine() {
install_log
install_header
install_lib
install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
......
......@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......
......@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......
......@@ -577,6 +577,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/power/bin/startPre.sh' >> ${powerd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
......@@ -599,6 +600,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......@@ -624,6 +626,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
......
......@@ -6,7 +6,7 @@
set -e
# set -x
# -----------------------Variables definition---------------------
# -----------------------Variables definition
source_dir=$1
binary_dir=$2
osType=$3
......@@ -176,6 +176,49 @@ function install_bin() {
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
fi
}
function install_jemalloc() {
if [ "$osType" != "Darwin" ]; then
/usr/bin/install -c -d /usr/local/bin
if [ -f ${binary_dir}/build/bin/jemalloc-config ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jemalloc.sh ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
fi
if [ -f ${binary_dir}/build/bin/jeprof ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
fi
if [ -f ${binary_dir}/build/include/jemalloc/jemalloc.h ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
fi
if [ -f ${binary_dir}/build/lib/libjemalloc.so.2 ]; then
/usr/bin/install -c -d /usr/local/lib
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
/usr/bin/install -c -d /usr/local/lib
if [ -f ${binary_dir}/build/lib/libjemalloc.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
fi
if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
fi
if [ -f ${binary_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
fi
if [ -f ${binary_dir}/build/share/man/man3/jemalloc.3 ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
fi
fi
}
function install_lib() {
# Remove links
......@@ -199,6 +242,8 @@ function install_lib() {
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
install_jemalloc
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
fi
......@@ -243,9 +288,17 @@ function install_data() {
}
function install_connector() {
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin ${install_main_dir}/connector
if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
${csudo} cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
}
......@@ -333,6 +386,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......
......@@ -117,9 +117,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
......
......@@ -144,24 +144,23 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......
......@@ -30,7 +30,7 @@ else
install_dir="${release_dir}/TDengine-server-${version}"
fi
# Directories and files.
# Directories and files
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
......@@ -73,6 +73,39 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
......@@ -114,6 +147,25 @@ mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
rm -rf ${examples_dir}/JDBC/connectionPools/target
fi
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
fi
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
fi
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
rm -rf ${examples_dir}/JDBC/springbootdemo/target
fi
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
fi
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target
fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
......@@ -131,9 +183,17 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
......
......@@ -166,24 +166,24 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector/
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......
......@@ -405,6 +405,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......
name: tdengine
base: core18
version: '2.0.19.0'
version: '2.0.20.8'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.19.0
- usr/lib/libtaos.so.2.0.20.5
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
......
......@@ -220,10 +220,6 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
}
static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
if (pVgroup->lbTime + 5 * tsStatusInterval > tsAccessSquence) {
return false;
}
int32_t rmVnodeVer = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
......@@ -371,6 +367,7 @@ static bool bnMonitorBalance() {
for (int32_t dest = 0; dest < src; dest++) {
SDnodeObj *pDestDnode = tsBnDnodes.list[dest];
if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) continue;
if (taosGetTimestampMs() - pDestDnode->createdTime < 2000) continue;
float destScore = bnTryCalcDnodeScore(pDestDnode, 1);
if (srcScore + 0.0001 < destScore) continue;
......@@ -405,7 +402,7 @@ void bnReset() {
if (pDnode == NULL) break;
// while master change, should reset dnode to offline
mInfo("dnode:%d set access:%d to 0", pDnode->dnodeId, pDnode->lastAccess);
mInfo("dnode:%d set access:%" PRId64 " to 0", pDnode->dnodeId, pDnode->lastAccess);
pDnode->lastAccess = 0;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
......@@ -499,7 +496,7 @@ static bool bnMontiorDropping() {
if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
mLInfo("dnode:%d, set to removing state for it offline:%" PRId64 " seconds", pDnode->dnodeId,
tsAccessSquence - pDnode->lastAccess);
pDnode->status = TAOS_DN_STATUS_DROPPING;
......@@ -574,8 +571,8 @@ void bnCheckStatus() {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess, pDnode->status);
mInfo("dnode:%d, set to offline state, access seq:%" PRId64 " last seq:%" PRId64 " laststat:%d", pDnode->dnodeId,
tsAccessSquence, pDnode->lastAccess, pDnode->status);
bnSetVgroupOffline(pDnode);
bnStartTimer(3000);
}
......@@ -640,6 +637,19 @@ int32_t bnDropDnode(SDnodeObj *pDnode) {
return TSDB_CODE_SUCCESS;
}
int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode) {
if (pDnode == NULL)
return 0;
if (pDnode->isMgmt || pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE
|| pDnode->status == TAOS_DN_STATUS_DROPPING
|| pDnode->status == TAOS_DN_STATUS_OFFLINE) {
return 0;
} else {
return 1;
}
}
static void bnMonitorDnodeModule() {
int32_t numOfMnodes = mnodeGetMnodesNum();
if (numOfMnodes >= tsNumOfMnodes) return;
......@@ -648,13 +658,7 @@ static void bnMonitorDnodeModule() {
SDnodeObj *pDnode = tsBnDnodes.list[i];
if (pDnode == NULL) break;
if (pDnode->isMgmt || pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) {
continue;
}
if (pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE) {
continue;
}
if (!bnDnodeCanCreateMnode(pDnode)) continue;
mLInfo("dnode:%d, numOfMnodes:%d expect:%d, create mnode in this dnode", pDnode->dnodeId, numOfMnodes, tsNumOfMnodes);
mnodeCreateMnode(pDnode->dnodeId, pDnode->dnodeEp, true);
......
......@@ -102,12 +102,12 @@ static void bnProcessTimer(void *handle, void *tmrId) {
if (tsBnThread.stop) return;
tsBnThread.timer = NULL;
tsAccessSquence++;
bnStartTimer(-1);
bnCheckStatus();
if (handle == NULL) {
++tsAccessSquence;
if (tsAccessSquence % tsBalanceInterval == 0) {
mDebug("balance function is scheduled by timer");
bnPostSignal();
......@@ -122,8 +122,7 @@ static void bnProcessTimer(void *handle, void *tmrId) {
void bnStartTimer(int32_t mseconds) {
if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1);
if (updateSoon) {
if (mseconds != -1) {
mTrace("balance function will be called after %d ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)(int64_t)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {
......
......@@ -168,7 +168,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
......@@ -297,7 +298,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
......
......@@ -21,8 +21,8 @@ extern "C" {
#endif
#include "taosmsg.h"
#include "tstoken.h"
#include "tsclient.h"
#include "ttoken.h"
/**
* get the number of tags of this table
......
......@@ -67,14 +67,16 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
char sTableName[TSDB_TABLE_FNAME_LEN]; // TODO: refactor super table name, not full name
uint64_t suid; // super table id
} CChildTableMeta;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
......@@ -83,6 +85,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
......@@ -402,6 +405,7 @@ typedef struct SSqlObj {
typedef struct SSqlStream {
SSqlObj *pSql;
void * cqhandle; // stream belong to SCQContext handle
const char* dstTable;
uint32_t streamId;
char listed;
......@@ -419,6 +423,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t ltime; // stream last row time in stream table
SInterval interval;
void * pTimer;
......
......@@ -49,6 +49,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset
(JNIEnv *, jclass);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: getResultTimePrecision
* Signature: (J)J
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision
(JNIEnv *, jobject, jlong, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: connectImp
......
......@@ -671,3 +671,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) {
return (*env)->NewStringUTF(env, (const char *)tsCharset);
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_RES *result = (TAOS_RES *)res;
if (result == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
return taos_result_precision(result);
}
\ No newline at end of file
......@@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscAsyncResultOnError(pSql);
return;
......@@ -57,7 +57,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pCmd->curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
......@@ -80,7 +80,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
tscError("bug!!! pObj:%p", pObj);
tscError("pObj:%p is NULL or freed", pObj);
terrno = TSDB_CODE_TSC_DISCONNECTED;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
return NULL;
......@@ -283,12 +283,12 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
static void tscAsyncResultCallback(SSchedMsg *pMsg) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pMsg->ahandle);
if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
tscDebug("%p SqlObj is freed, not add into queue async res", pMsg->ahandle);
return;
}
assert(pSql->res.code != TSDB_CODE_SUCCESS);
tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code));
tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code));
SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){
......@@ -368,17 +368,17 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) {
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
goto _error;
}
tscDebug("%p get %s successfully", pSql, msg);
tscDebug("0x%"PRIx64" get %s successfully", pSql->self, msg);
if (pSql->pStream == NULL) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send the corresponding query", pSql->self);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
......@@ -402,7 +402,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
return;
} else { // continue to process normal async query
if (pCmd->parseFinished) {
tscDebug("%p update local table meta, continue to process sql and send corresponding query", pSql);
tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send corresponding query", pSql->self);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
......@@ -416,7 +416,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pCmd->command != TSDB_SQL_INSERT);
if (pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
tscDebug("0x%"PRIx64" redo parse sql string and proceed", pSql->self);
pCmd->parseFinished = false;
tscResetSqlCmd(pCmd, true);
......@@ -436,7 +436,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else {
tscDebug("%p continue parse sql after get table meta", pSql);
tscDebug("0x%"PRIx64" continue parse sql after get table meta", pSql->self);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
......@@ -486,7 +486,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
}
tscDebug("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command);
tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pSql->cmd.command);
if (!pSql->cmd.parseFinished) {
tsParseSql(pSql, false);
}
......
......@@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pRes->code = tscProcessServStatus(pSql);
} else {
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
tscError("%p not support command:%d", pSql, pCmd->command);
tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
}
// keep the code in local variable in order to avoid invalid read in case of async query
......
......@@ -177,14 +177,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p pMemBuffer is NULL", pMemBuffer);
tscError("pMemBuffer:%p is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p no local buffer or intermediate result format model", pSql);
tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
......@@ -193,7 +193,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
for (int32_t i = 0; i < numOfBuffer; ++i) {
int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
if (len == 0) {
tscDebug("%p no data retrieved from orderOfVnode:%d", pSql, i + 1);
tscDebug("0x%"PRIx64" no data retrieved from orderOfVnode:%d", pSql->self, i + 1);
continue;
}
......@@ -203,12 +203,12 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty
tscDebug("%p retrieved no data", pSql);
tscDebug("0x%"PRIx64" retrieved no data", pSql->self);
return;
}
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
......@@ -220,7 +220,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
......@@ -235,7 +235,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
pReducer->numOfVnode = numOfBuffer;
pReducer->pDesc = pDesc;
tscDebug("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
tscDebug("0x%"PRIx64" the number of merged leaves is: %d", pSql->self, pReducer->numOfBuffer);
int32_t idx = 0;
for (int32_t i = 0; i < numOfBuffer; ++i) {
......@@ -244,7 +244,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
if (ds == NULL) {
tscError("%p failed to create merge structure", pSql);
tscError("0x%"PRIx64" failed to create merge structure", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tfree(pReducer);
return;
......@@ -258,7 +258,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
ds->pageId = 0;
ds->rowIdx = 0;
tscDebug("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSql, i + 1, idx + 1);
tscDebug("0x%"PRIx64" load data from disk into memory, orderOfVnode:%d, total:%d", pSql->self, i + 1, idx + 1);
tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0);
#ifdef _DEBUG_VIEW
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.num);
......@@ -272,7 +272,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
#endif
if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx);
tscDebug("0x%"PRIx64" flush data is empty, ignore %d flush record", pSql->self, idx);
tfree(ds);
continue;
}
......@@ -547,10 +547,10 @@ void tscDestroyLocalMerger(SSqlObj *pSql) {
pLocalMerge->numOfCompleted = 0;
free(pLocalMerge);
} else {
tscDebug("%p already freed or another free function is invoked", pSql);
tscDebug("0x%"PRIx64" already freed or another free function is invoked", pSql->self);
}
tscDebug("%p free local reducer finished", pSql);
tscDebug("0x%"PRIx64" free local reducer finished", pSql->self);
}
static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, SColumnModel *pModel) {
......@@ -674,7 +674,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
if (*pMemBuffer == NULL) {
tscError("%p failed to allocate memory", pSql);
tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
......@@ -683,7 +683,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
if (pSchema == NULL) {
tscError("%p failed to allocate memory", pSql);
tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
......@@ -1529,7 +1529,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return pRes->code;
}
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
tscError("0x%"PRIx64" local merge abort due to error occurs, code:%s", pSql->self, tstrerror(pRes->code));
return pRes->code;
}
......
......@@ -29,8 +29,7 @@
#include "taosdef.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tdataformat.h"
......@@ -464,22 +463,27 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
return tscSQLSyntaxErrMsg(pCmd->payload, "too long string", sToken.z);
}
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
cnt++;
j++;
k++;
continue;
}
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
......@@ -705,15 +709,10 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
free(tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
......@@ -774,6 +773,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
}
if (sToken.type == TK_RP) {
break;
}
......@@ -928,6 +931,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
/* parse columns after super table tags values.
* insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
* (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
* */
index = 0;
sToken = tStrGetToken(sql, &index, false);
sql += index;
int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) {
if (*boundColumn != NULL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
} else {
*boundColumn = &sToken.z[0];
}
while (1) {
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type == TK_RP) {
break;
}
sql += index;
++numOfColsAfterTags;
}
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
return TSDB_CODE_TSC_INVALID_SQL;
}
sToken = tStrGetToken(sql, &index, false);
}
sql = sToken.z;
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
......@@ -969,7 +1008,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
psTblToken->n = len;
psTblToken->type = TK_ID;
tSQLGetToken(psTblToken->z, &psTblToken->type);
tGetToken(psTblToken->z, &psTblToken->type);
return tscValidateName(psTblToken);
}
......@@ -1089,7 +1128,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
str = pCmd->curSql;
}
tscDebug("%p create data block list hashList:%p", pSql, pCmd->pTableBlockHashList);
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->pTableBlockHashList);
while (1) {
int32_t index = 0;
......@@ -1141,7 +1180,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
return code;
}
tscError("%p async insert parse error, code:%s", pSql, tstrerror(code));
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
pCmd->curSql = NULL;
goto _clean;
}
......@@ -1303,7 +1342,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlCmd* pCmd = &pSql->cmd;
if ((!pCmd->parseFinished) && (!initial)) {
tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql);
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql);
}
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
......@@ -1317,30 +1356,27 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
}
// make a backup as tsParseInsertSql may modify the string
char* sqlstr = strdup(pSql->sqlstr);
ret = tsParseInsertSql(pSql);
if ((sqlstr == NULL) || (pSql->parseRetry >= 1) ||
(ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
free(sqlstr);
if ((pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
} else {
tscResetSqlCmd(pCmd, true);
free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
pSql->parseRetry++;
if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
ret = tsParseInsertSql(pSql);
}
}
} else {
SSqlInfo SQLInfo = qSqlParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &sqlInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0/* && sqlInfo.type == TSDB_SQL_NULL*/) {
tscDebug("0x%"PRIx64 " parse sql failed, retry again after clear local meta cache", pSql->self);
tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
ret = tscToSQLCmd(pSql, &SQLInfo);
ret = tscToSQLCmd(pSql, &sqlInfo);
}
SqlInfoDestroy(&SQLInfo);
SqlInfoDestroy(&sqlInfo);
}
/*
......@@ -1409,7 +1445,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno);
goto _error;
}
......@@ -1529,7 +1565,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter);
taos_free_result(pNew);
......
......@@ -151,7 +151,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
while (sql[i] != 0) {
SStrToken token = {0};
token.n = tSQLGetToken(sql + i, &token.type);
token.n = tGetToken(sql + i, &token.type);
if (token.type == TK_QUESTION) {
sql[i] = 0;
......@@ -879,6 +879,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
return TSDB_CODE_TSC_DISCONNECTED;
}
if (sql == NULL) {
tscError("sql is NULL");
return TSDB_CODE_TSC_APP_ERROR;
}
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
......
......@@ -19,6 +19,7 @@
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
#include "tcq.h"
#include "taos.h"
......@@ -54,14 +55,14 @@ void tscAddIntoSqlList(SSqlObj *pSql) {
pSql->next = pObj->sqlList;
if (pObj->sqlList) pObj->sqlList->prev = pSql;
pObj->sqlList = pSql;
pSql->queryId = queryId++;
pSql->queryId = atomic_fetch_add_32(&queryId, 1);
pthread_mutex_unlock(&pObj->mutex);
pSql->stime = taosGetTimestampMs();
pSql->listed = 1;
tscDebug("%p added into sqlList", pSql);
tscDebug("0x%"PRIx64" added into sqlList, queryId:%u", pSql->self, pSql->queryId);
}
void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {
......@@ -99,12 +100,12 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
return;
}
tscDebug("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
tscDebug("0x%"PRIx64" query time:%" PRId64 " sql:%s", pSql->self, pSql->res.useconds, pSql->sqlstr);
int32_t sqlSize = (int32_t)(TSDB_SLOW_QUERY_SQL_LEN + size);
char *sql = malloc(sqlSize);
if (sql == NULL) {
tscError("%p failed to allocate memory to sent slow query to dnode", pSql);
tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self);
return;
}
......@@ -141,7 +142,7 @@ void tscRemoveFromSqlList(SSqlObj *pSql) {
pSql->listed = 0;
tscSaveSlowQuery(pSql);
tscDebug("%p removed from sqlList", pSql);
tscDebug("0x%"PRIx64" removed from sqlList", pSql->self);
}
void tscKillQuery(STscObj *pObj, uint32_t killId) {
......@@ -158,7 +159,7 @@ void tscKillQuery(STscObj *pObj, uint32_t killId) {
if (pSql == NULL) {
tscError("failed to kill query, id:%d, it may have completed/terminated", killId);
} else {
tscDebug("%p query is killed, queryId:%d", pSql, killId);
tscDebug("0x%"PRIx64" query is killed, queryId:%d", pSql->self, killId);
taos_stop_query(pSql);
}
}
......@@ -213,7 +214,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
pthread_mutex_unlock(&pObj->mutex);
if (pStream) {
tscDebug("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
tscDebug("0x%"PRIx64" stream:%p is killed, streamId:%d", pStream->pSql->self, pStream, killId);
if (pStream->callback) {
pStream->callback(pStream->param);
}
......@@ -273,7 +274,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->stime = (pStream->stime == INT64_MIN) ? htobe64(pStream->stime) : htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
......@@ -294,24 +295,34 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
return msgLen;
}
void tscKillConnection(STscObj *pObj) {
pthread_mutex_lock(&pObj->mutex);
SSqlObj *pSql = pObj->sqlList;
while (pSql) {
pSql = pSql->next;
// cqContext->dbconn is killed then call this callback
void cqConnKilledNotify(void* handle, void* conn) {
if (handle == NULL || conn == NULL){
return ;
}
SCqContext* pContext = (SCqContext*) handle;
if (pContext->dbConn == conn){
atomic_store_ptr(&(pContext->dbConn), NULL);
}
}
void tscKillConnection(STscObj *pObj) {
// get stream header by locked
pthread_mutex_lock(&pObj->mutex);
SSqlStream *pStream = pObj->streamList;
pthread_mutex_unlock(&pObj->mutex);
while (pStream) {
SSqlStream *tmp = pStream->next;
// set associate variant to NULL
cqConnKilledNotify(pStream->cqhandle, pObj);
// taos_close_stream function call pObj->mutet lock , careful death-lock
taos_close_stream(pStream);
pStream = tmp;
}
pthread_mutex_unlock(&pObj->mutex);
tscDebug("connection:%p is killed", pObj);
taos_close(pObj);
}
此差异已折叠。
......@@ -94,6 +94,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->tableType = pTableMetaMsg->tableType;
pTableMeta->vgId = pTableMetaMsg->vgroup.vgId;
pTableMeta->suid = pTableMetaMsg->suid;
pTableMeta->tableInfo = (STableComInfo) {
.numOfTags = pTableMetaMsg->numOfTags,
......
此差异已折叠。
......@@ -292,7 +292,7 @@ void taos_close(TAOS *taos) {
pHb->rpcRid = -1;
}
tscDebug("%p HB is freed", pHb);
tscDebug("0x%"PRIx64" HB is freed", pHb->self);
taosReleaseRef(tscObjRef, pHb->self);
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
......@@ -576,7 +576,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
cmd == TSDB_SQL_FETCH)) {
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]);
tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]);
tscProcessSql(pSql);
return false;
......@@ -588,13 +588,13 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
void taos_free_result(TAOS_RES *res) {
SSqlObj* pSql = (SSqlObj*) res;
if (pSql == NULL || pSql->signature != pSql) {
tscError("%p already released sqlObj", res);
tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1);
return;
}
bool freeNow = tscKillQueryInDnode(pSql);
if (freeNow) {
tscDebug("%p free sqlObj in cache", pSql);
tscDebug("0x%"PRIx64" free sqlObj in cache", pSql->self);
taosReleaseRef(tscObjRef, pSql->self);
}
}
......@@ -708,7 +708,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
tscUnlockByThread(&pSql->squeryLock);
tscDebug("%p super table query cancelled", pSql);
tscDebug("0x%"PRIx64" super table query cancelled", pSql->self);
}
void taos_stop_query(TAOS_RES *res) {
......@@ -717,7 +717,7 @@ void taos_stop_query(TAOS_RES *res) {
return;
}
tscDebug("%p start to cancel query", res);
tscDebug("0x%"PRIx64" start to cancel query", pSql->self);
SSqlCmd *pCmd = &pSql->cmd;
// set the error code for master pSqlObj firstly
......@@ -744,7 +744,7 @@ void taos_stop_query(TAOS_RES *res) {
}
}
tscDebug("%p query is cancelled", res);
tscDebug("0x%"PRIx64" query is cancelled", pSql->self);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
......@@ -877,19 +877,18 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pRes->numOfClauseTotal = 0;
tscDebug("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
tscDebug("0x%"PRIx64" Valid SQL: %s pObj:%p", pSql->self, sql, pObj);
int32_t sqlLen = (int32_t)strlen(sql);
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
tscError("0x%"PRIx64" sql too long", pSql->self);
tfree(pSql);
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
if (code != TSDB_CODE_SUCCESS) {
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(pSql), pObj);
tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
}
taos_free_result(pSql);
......@@ -963,7 +962,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
len = (int32_t)strtrim(tblName);
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
tSQLGetToken(tblName, &sToken.type);
tGetToken(tblName, &sToken.type);
// Check if the table name available or not
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
......@@ -1027,18 +1026,18 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
pRes->numOfClauseTotal = 0;
assert(pSql->fp == NULL);
tscDebug("%p tableNameList: %s pObj:%p", pSql, tableNameList, pObj);
tscDebug("0x%"PRIx64" tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj);
int32_t tblListLen = (int32_t)strlen(tableNameList);
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH);
tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
char *str = calloc(1, tblListLen + 1);
if (str == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -1061,7 +1060,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tscDoQuery(pSql);
tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
tscDebug("0x%"PRIx64" load multi table meta result:%d %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
if ((code = pRes->code) != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
}
......
此差异已折叠。
......@@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer);
}
//TODO refactor: extract table list name not simply from the sql
static SArray* getTableList( SSqlObj* pSql ) {
const char* p = strstr( pSql->sqlstr, " from " );
assert(p != NULL); // we are sure this is a 'select' statement
......@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
if (pNew == NULL) {
tscError("failed to retrieve table id: cannot create new sql object.");
tscError("0x%"PRIx64" failed to retrieve table id: cannot create new sql object.", pSql->self);
return NULL;
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew)));
tscError("0x%"PRIx64" failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
return NULL;
}
......@@ -264,7 +264,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
SSqlCmd* pCmd = &pSql->cmd;
pSub->lastSyncTime = taosGetTimestampMs();
TSDB_QUERY_CLEAR_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
......@@ -275,11 +275,14 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
taosArrayClear(pSub->progress);
taosArrayPush(pSub->progress, &target);
}
pSub->lastSyncTime = taosGetTimestampMs();
return 1;
}
SArray* tables = getTableList(pSql);
if (tables == NULL) {
pSub->lastSyncTime = 0; //force to get table list next time
return 0;
}
size_t numOfTables = taosArrayGetSize(tables);
......@@ -304,7 +307,11 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
}
taosArrayDestroy(tables);
if (pTableMetaInfo->pVgroupTables && taosArrayGetSize(pTableMetaInfo->pVgroupTables) > 0) {
TSDB_QUERY_SET_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
}
pSub->lastSyncTime = taosGetTimestampMs();
return 1;
}
......@@ -482,7 +489,15 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
if (pSub == NULL) return NULL;
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
if (pSub->pTimer == NULL) {
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
if (duration < (int64_t)(pSub->interval)) {
tscDebug("subscription consume too frequently, blocking...");
taosMsleep(pSub->interval - (int32_t)duration);
}
}
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { //may reach here when retireve stable vgroup failed
SSqlObj* pSql = recreateSqlObj(pSub);
if (pSql == NULL) {
return NULL;
......@@ -494,6 +509,12 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
}
pSub->pSql = pSql;
pSql->pSubscription = pSub;
pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
tscDebug("table synchronization completed");
}
tscSaveSubscriptionProgress(pSub);
......@@ -518,14 +539,6 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
}
if (pSub->pTimer == NULL) {
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
if (duration < (int64_t)(pSub->interval)) {
tscDebug("subscription consume too frequently, blocking...");
taosMsleep(pSub->interval - (int32_t)duration);
}
}
size_t size = taosArrayGetSize(pSub->progress) * sizeof(STableIdInfo);
size += sizeof(SQueryTableMsg) + 4096;
int code = tscAllocPayload(&pSql->cmd, (int)size);
......
此差异已折叠。
......@@ -45,6 +45,7 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
char tscLogFileName[12] = "taoslog";
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
static volatile int tscInitRes = 0;
......@@ -132,7 +133,7 @@ void taos_init_imp(void) {
printf("failed to create log dir:%s\n", tsLogDir);
}
sprintf(temp, "%s/taoslog", tsLogDir);
sprintf(temp, "%s/%s", tsLogDir, tscLogFileName);
if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
}
......
此差异已折叠。
......@@ -4,7 +4,7 @@
#include <iostream>
#include "taos.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tutil.h"
int main(int argc, char** argv) {
......@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
......@@ -163,7 +163,7 @@ TEST(testCase, parse_time) {
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
char* t = "2021-01-08T02:11:40.000+00:00";
char t[] = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
printf("%ld\n", time);
}
......
......@@ -89,9 +89,6 @@ void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
int32_t rightType, void *output, int32_t order);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
......
......@@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting;
extern char tsEmail[];
extern char tsArbitrator[];
extern int8_t tsArbOnline;
extern int64_t tsArbOnlineTimestamp;
extern int32_t tsDnodeId;
// common
......@@ -74,7 +75,7 @@ extern int32_t tsMinSlidingTime;
extern int32_t tsMinIntervalTime;
extern int32_t tsMaxStreamComputDelay;
extern int32_t tsStreamCompStartDelay;
extern int32_t tsStreamCompRetryDelay;
extern int32_t tsRetryStreamCompDelay;
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
extern int32_t tsProjectExecInterval;
extern int64_t tsMaxRetentWindow;
......
......@@ -18,7 +18,7 @@
#include "os.h"
#include "taosmsg.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tvariant.h"
typedef struct SDataStatis {
......
......@@ -16,8 +16,8 @@
#ifndef TDENGINE_TVARIANT_H
#define TDENGINE_TVARIANT_H
#include "tstoken.h"
#include "tarray.h"
#include "ttoken.h"
#ifdef __cplusplus
extern "C" {
......
......@@ -42,11 +42,12 @@ int32_t tsNumOfMnodes = 3;
int8_t tsEnableVnodeBak = 1;
int8_t tsEnableTelemetryReporting = 1;
int8_t tsArbOnline = 0;
int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME;
char tsEmail[TSDB_FQDN_LEN] = {0};
int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcTimer = 300;
int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
......@@ -92,7 +93,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly
int32_t tsStreamCompRetryDelay = 10;
int32_t tsRetryStreamCompDelay = 10*1000;
// The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f;
......@@ -139,7 +140,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1;
......@@ -696,7 +697,7 @@ static void doInitGlobalConfig(void) {
taosInitConfigOption(cfg);
cfg.option = "retryStreamCompDelay";
cfg.ptr = &tsStreamCompRetryDelay;
cfg.ptr = &tsRetryStreamCompDelay;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 10;
......@@ -816,6 +817,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "precision";
cfg.ptr = &tsTimePrecision;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_PRECISION;
cfg.maxValue = TSDB_MAX_PRECISION;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "comp";
cfg.ptr = &tsCompression;
cfg.valType = TAOS_CFG_VTYPE_INT8;
......@@ -886,6 +897,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "cachelast";
cfg.ptr = &tsCacheLastRow;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_CACHE_LAST_ROW;
cfg.maxValue = TSDB_MAX_DB_CACHE_LAST_ROW;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttHostName";
cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
......
......@@ -2,7 +2,7 @@
#include "tutil.h"
#include "tname.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
......
......@@ -14,14 +14,14 @@
*/
#include "os.h"
#include "tvariant.h"
#include "hash.h"
#include "taos.h"
#include "taosdef.h"
#include "tstoken.h"
#include "ttoken.h"
#include "ttokendef.h"
#include "tutil.h"
#include "ttype.h"
#include "tutil.h"
#include "tvariant.h"
void tVariantCreate(tVariant *pVar, SStrToken *token) {
int32_t ret = 0;
......@@ -48,10 +48,21 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
case TSDB_DATA_TYPE_INT:{
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
SStrToken t = {0};
tGetToken(token->z, &t.type);
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
pVar->nType = -1; // -1 means error type
return;
}
// data overflow, try unsigned parse the input number
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, false);
if (ret != 0) {
pVar->nType = -1; // -1 means error type
return;
}
}
break;
}
......@@ -63,7 +74,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
case TSDB_DATA_TYPE_BINARY: {
pVar->pz = strndup(token->z, token->n);
pVar->nLen = strdequote(pVar->pz);
pVar->nLen = strRmquote(pVar->pz, token->n);
break;
}
......@@ -449,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
*result = (int64_t) pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
/*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
/*int32_t n = */tGetToken(pVariant->pz, &token.type);
if (token.type == TK_NULL) {
if (releaseVariantPtr) {
......@@ -484,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
wchar_t *endPtr = NULL;
SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
token.n = tGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
token.n = tGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
......@@ -525,6 +536,8 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
}
bool code = false;
uint64_t ui = 0;
switch(type) {
case TSDB_DATA_TYPE_TINYINT:
code = IS_VALID_TINYINT(*result); break;
......@@ -535,13 +548,17 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
case TSDB_DATA_TYPE_BIGINT:
code = IS_VALID_BIGINT(*result); break;
case TSDB_DATA_TYPE_UTINYINT:
code = IS_VALID_UTINYINT(*result); break;
ui = *result;
code = IS_VALID_UTINYINT(ui); break;
case TSDB_DATA_TYPE_USMALLINT:
code = IS_VALID_USMALLINT(*result); break;
ui = *result;
code = IS_VALID_USMALLINT(ui); break;
case TSDB_DATA_TYPE_UINT:
code = IS_VALID_UINT(*result); break;
ui = *result;
code = IS_VALID_UINT(ui); break;
case TSDB_DATA_TYPE_UBIGINT:
code = IS_VALID_UBIGINT(*result); break;
ui = *result;
code = IS_VALID_UBIGINT(ui); break;
}
return code? 0:-1;
......
此差异已折叠。
Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f
Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df
Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa
Subproject commit b62a26ecc164a310104df57691691b237e091c89
Subproject commit ce5201014136503d34fecbd56494b67b4961056c
......@@ -8,10 +8,8 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.31.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
ENDIF ()
\ No newline at end of file
......@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.25</version>
<version>2.0.31</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
......
此差异已折叠。
......@@ -113,6 +113,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1);
switch (columnMetaData.getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
return 5;
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册