未验证 提交 9ad16b1a 编写于 作者: X Xiao Ping 提交者: GitHub

Merge branch 'master' into xiaoping/test_case

...@@ -45,6 +45,10 @@ IF (TD_TQ) ...@@ -45,6 +45,10 @@ IF (TD_TQ)
ADD_DEFINITIONS(-D_TD_TQ_) ADD_DEFINITIONS(-D_TD_TQ_)
ENDIF () ENDIF ()
IF (TD_PRO)
ADD_DEFINITIONS(-D_TD_PRO_)
ENDIF ()
IF (TD_MEM_CHECK) IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF () ENDIF ()
......
...@@ -49,6 +49,9 @@ IF (${DBNAME} MATCHES "power") ...@@ -49,6 +49,9 @@ IF (${DBNAME} MATCHES "power")
ELSEIF (${DBNAME} MATCHES "tq") ELSEIF (${DBNAME} MATCHES "tq")
SET(TD_TQ TRUE) SET(TD_TQ TRUE)
MESSAGE(STATUS "tq is true") MESSAGE(STATUS "tq is true")
ELSEIF (${DBNAME} MATCHES "pro")
SET(TD_PRO TRUE)
MESSAGE(STATUS "pro is true")
ENDIF () ENDIF ()
IF (${DLLTYPE} MATCHES "go") IF (${DLLTYPE} MATCHES "go")
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.2.0.1") SET(TD_VER_NUMBER "2.2.0.2")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] ...@@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite] pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic] soMode=dynamic # [static | dynamic]
dbName=taos # [taos | power | tq] dbName=taos # [taos | power | tq | pro]
allocator=glibc # [glibc | jemalloc] allocator=glibc # [glibc | jemalloc]
verNumber="" verNumber=""
verNumberComp="1.0.0.0" verNumberComp="1.0.0.0"
...@@ -78,7 +78,7 @@ do ...@@ -78,7 +78,7 @@ do
echo " -l [full | lite] " echo " -l [full | lite] "
echo " -a [glibc | jemalloc] " echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] " echo " -s [static | dynamic] "
echo " -d [taos | power | tq ] " echo " -d [taos | power | tq | pro] "
echo " -n [version number] " echo " -n [version number] "
echo " -m [compatible version number] " echo " -m [compatible version number] "
exit 0 exit 0
...@@ -253,6 +253,10 @@ if [ "$osType" != "Darwin" ]; then ...@@ -253,6 +253,10 @@ if [ "$osType" != "Darwin" ]; then
${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
elif [[ "$dbName" == "pro" ]]; then
${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
...@@ -262,4 +266,3 @@ else ...@@ -262,4 +266,3 @@ else
cd ${script_dir}/tools cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
fi fi
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -f "$0"))
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/tarbitrator"
# old bin dir
bin_dir="/usr/local/tarbitrator/bin"
service_config_dir="/etc/systemd/system"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
update_flag=0
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if [[ -e /etc/os-release ]]; then
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
else
osinfo=""
fi
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
# echo "This is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian" ; then
# echo "This is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin" ; then
# echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos" ; then
# echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora" ; then
# echo "This is fedora system"
os_type=2
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact hanatech.com.cn for support."
os_type=1
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/bin
#${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/remove_arbi_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_prodb.sh ${bin_link_dir}/rmtarbitrator || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
}
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function install_service_on_sysvinit() {
clean_service_on_sysvinit
sleep 1
# Install prodbs service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi
if ((${initd_mod}==1)); then
${csudo} chkconfig --add tarbitratord || :
${csudo} chkconfig --level 2345 tarbitratord on || :
elif ((${initd_mod}==2)); then
${csudo} insserv tarbitratord || :
${csudo} insserv -d tarbitratord || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d tarbitratord defaults || :
fi
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..."
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
}
function install_service_on_systemd() {
clean_service_on_systemd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
${csudo} systemctl enable tarbitratord
}
function install_service() {
if ((${service_mod}==0)); then
install_service_on_systemd
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
# must manual stop taosd
kill_tarbitrator
fi
}
function update_prodb() {
# Start to update
echo -e "${GREEN}Start to update ProDB's arbitrator ...${NC}"
# Stop the service if running
if pidof tarbitrator &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop tarbitratord || :
elif ((${service_mod}==1)); then
${csudo} service tarbitratord stop || :
else
kill_tarbitrator
fi
sleep 1
fi
install_main_path
#install_header
install_bin
install_service
echo
#echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
fi
echo
echo -e "\033[44;32;1mProDB's arbitrator is updated successfully!${NC}"
}
function install_prodb() {
# Start to install
echo -e "${GREEN}Start to install ProDB's arbitrator ...${NC}"
install_main_path
#install_header
install_bin
install_service
echo
#echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
fi
echo -e "\033[44;32;1mProDB's arbitrator is installed successfully!${NC}"
echo
}
## ==============================Main program starts from here============================
# Install server and client
if [ -x ${bin_dir}/tarbitrator ]; then
update_flag=1
update_prodb
else
install_prodb
fi
#!/bin/bash
#
# This file is used to install ProDB client on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
osType=Linux
pagMode=full
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/ProDB"
log_dir="/var/log/ProDB"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
data_dir="/var/lib/ProDB"
log_dir="~/ProDB/log"
fi
log_link_dir="/usr/local/ProDB/log"
cfg_install_dir="/etc/ProDB"
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="/usr/local/ProDB"
# old bin dir
bin_dir="/usr/local/ProDB/bin"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
update_flag=0
function kill_client() {
pid=$(ps -ef | grep "prodbc" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg
${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples
${csudo} mkdir -p ${install_main_dir}/include
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/prodbc || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/prodemo || :
${csudo} rm -f ${bin_link_dir}/prodump || :
fi
${csudo} rm -f ${bin_link_dir}/rmprodb || :
${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/prodbc ] && ${csudo} ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || :
[ -x ${install_main_dir}/bin/prodump ] && ${csudo} ln -s ${install_main_dir}/bin/prodump ${bin_link_dir}/prodump || :
fi
[ -x ${install_main_dir}/bin/remove_client_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_prodb.sh ${bin_link_dir}/rmprodb || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
}
function clean_lib() {
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -rf ${lib_dir} || :
}
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
if [ "$osType" != "Darwin" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
else
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
${csudo} ldconfig
}
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
fi
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
}
function install_examples() {
if [ -d ${script_dir}/examples ]; then
${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
fi
}
function update_prodb() {
# Start to update
if [ ! -e prodb.tar.gz ]; then
echo "File prodb.tar.gz does not exist"
exit 1
fi
tar -zxf prodb.tar.gz
echo -e "${GREEN}Start to update ProDB client...${NC}"
# Stop the client shell if running
if pidof prodbc &> /dev/null; then
kill_client
sleep 1
fi
install_main_path
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1mProDB client is updated successfully!${NC}"
rm -rf $(tar -tf prodb.tar.gz)
}
function install_prodb() {
# Start to install
if [ ! -e prodb.tar.gz ]; then
echo "File prodb.tar.gz does not exist"
exit 1
fi
tar -zxf prodb.tar.gz
echo -e "${GREEN}Start to install ProDB client...${NC}"
install_main_path
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1mProDB client is installed successfully!${NC}"
rm -rf $(tar -tf prodb.tar.gz)
}
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
if [ -e ${bin_dir}/prodbs ]; then
echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}"
exit 0
fi
if [ -x ${bin_dir}/prodbc ]; then
update_flag=1
update_prodb
else
install_prodb
fi
此差异已折叠。
#!/bin/bash
#
# Generate arbitrator's tar.gz setup package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/ProDB-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/ProDB-arbitrator-${version}"
fi
# Directories and files.
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh"
install_files="${script_dir}/install_arbi_pro.sh"
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_pro.sh || :
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate tar.gz package for linux client in all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/../..
fi
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/ProDB-enterprise-client-${version}"
else
install_dir="${release_dir}/ProDB-client-${version}"
fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_pro.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install_client_pro.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
mkdir -p ${install_dir}/bin
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${script_dir}/remove_pro.sh ${install_dir}/bin
else
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${script_dir}/remove_pro.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi
else
cp ${bin_files} ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
if [ -f ${build_dir}/bin/jemalloc.sh ]; then
cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/bin/jeprof ]; then
cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
fi
if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
fi
if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
fi
if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
fi
fi
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
tar -zcv -f prodb.tar.gz * --remove-files || :
else
tar -zcv -f prodb.tar.gz * || :
mv prodb.tar.gz ..
rm -rf ./*
mv ../prodb.tar.gz .
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh
mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh
mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh
fi
chmod a+x ${install_dir}/install_client_pro.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go
sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py
sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py
fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stable or beta"
exit 1
fi
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
versionComp=$9
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/ProDB-enterprise-server-${version}"
else
install_dir="${release_dir}/ProDB-server-${version}"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
fi
install_files="${script_dir}/install_pro.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs
cp ${script_dir}/remove_pro.sh ${install_dir}/bin
else
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs
cp ${script_dir}/remove_pro.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_pro.sh >> remove_prodb_temp.sh
mv remove_prodb_temp.sh ${install_dir}/bin/remove_pro.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/js/*.js
sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
tar -zcv -f prodb.tar.gz * --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar prodb.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_pro.sh >> install_prodb_temp.sh
mv install_prodb_temp.sh ${install_dir}/install_pro.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_prodb_temp.sh
mv install_prodb_temp.sh ${install_dir}/install_pro.sh
fi
chmod a+x ${install_dir}/install_pro.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go
sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go
fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector/
mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py
sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py
sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py
fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
#!/bin/bash
#
# Script to stop the service and uninstall ProDB's arbitrator
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/tarbitrator"
bin_link_dir="/usr/bin"
service_config_dir="/etc/systemd/system"
tarbitrator_service_name="tarbitratord"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_log() {
# Remove link
${csudo} rm -rf /arbitrator.log || :
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "ProDB tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &> /dev/null; then
echo "ProDB's tarbitrator is running, stopping it..."
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
##clean_header
# Remove log file
clean_log
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}ProDB's arbitrator is removed successfully!${NC}"
echo
#!/bin/bash
#
# Script to stop the client and uninstall database, but retain the config and log files.
set -e
# set -x
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/ProDB"
log_link_dir="/usr/local/ProDB/log"
cfg_link_dir="/usr/local/ProDB/cfg"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
function kill_client() {
if [ -n "$(pidof prodbc)" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/prodbc || :
${csudo} rm -f ${bin_link_dir}/prodemo || :
${csudo} rm -f ${bin_link_dir}/prodump || :
${csudo} rm -f ${bin_link_dir}/rmprodb || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo} rm -rf ${log_link_dir} || :
}
# Stop client.
kill_client
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}ProDB client is removed successfully!${NC}"
echo
#!/bin/bash
#
# Script to stop the service and uninstall ProDB, but retain the config, data and log files.
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/ProDB"
data_link_dir="/usr/local/ProDB/data"
log_link_dir="/usr/local/ProDB/log"
cfg_link_dir="/usr/local/ProDB/cfg"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
install_nginxd_dir="/usr/local/nginxd"
service_config_dir="/etc/systemd/system"
prodb_service_name="prodbs"
tarbitrator_service_name="tarbitratord"
nginx_service_name="nginxd"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_prodbs() {
pid=$(ps -ef | grep "prodbs" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/prodbc || :
${csudo} rm -f ${bin_link_dir}/prodbs || :
${csudo} rm -f ${bin_link_dir}/prodemo || :
${csudo} rm -f ${bin_link_dir}/prodump || :
${csudo} rm -f ${bin_link_dir}/rmprodb || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo} rm -rf ${log_link_dir} || :
}
function clean_service_on_systemd() {
prodb_service_config="${service_config_dir}/${prodb_service_name}.service"
if systemctl is-active --quiet ${prodb_service_name}; then
echo "ProDB prodbs is running, stopping it..."
${csudo} systemctl stop ${prodb_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${prodb_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${prodb_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "ProDB tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for ProDB is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
fi
fi
}
function clean_service_on_sysvinit() {
if pidof prodbs &> /dev/null; then
echo "ProDB prodbs is running, stopping it..."
${csudo} service prodbs stop || :
fi
if pidof tarbitrator &> /dev/null; then
echo "ProDB tarbitrator is running, stopping it..."
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/prodbs ]; then
${csudo} chkconfig --del prodbs || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/prodbs ]; then
${csudo} insserv -r prodbs || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/prodbs ]; then
${csudo} update-rc.d -f prodbs remove || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/prodbs || :
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop taosd
kill_prodbs
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
# Remove data link directory
${csudo} rm -rf ${data_link_dir} || :
${csudo} rm -rf ${install_main_dir}
${csudo} rm -rf ${install_nginxd_dir}
if [[ -e /etc/os-release ]]; then
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
else
osinfo=""
fi
echo -e "${GREEN}ProDB is removed successfully!${NC}"
echo
name: tdengine name: tdengine
base: core18 base: core18
version: '2.2.0.1' version: '2.2.0.2'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
...@@ -72,7 +72,7 @@ parts: ...@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.2.0.1 - usr/lib/libtaos.so.2.2.0.2
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
......
...@@ -580,7 +580,7 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc ...@@ -580,7 +580,7 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc
SKVRow kvRow = memRowKvBody(dest); SKVRow kvRow = memRowKvBody(dest);
memRowSetType(dest, SMEM_ROW_KV); memRowSetType(dest, SMEM_ROW_KV);
memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); memRowSetKvVersion(dest, dataRowVersion(dataRow));
kvRowSetNCols(kvRow, nBoundCols); kvRowSetNCols(kvRow, nBoundCols);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
......
...@@ -43,3 +43,10 @@ taos_unsubscribe ...@@ -43,3 +43,10 @@ taos_unsubscribe
taos_open_stream taos_open_stream
taos_close_stream taos_close_stream
taos_load_table_info taos_load_table_info
taos_data_type
taos_stmt_set_sub_tbname
taos_stmt_get_param
taos_stmt_bind_param_batch
taos_stmt_bind_single_param_batch
taos_is_null
taos_insert_lines
...@@ -77,6 +77,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para ...@@ -77,6 +77,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
executeQuery(pSql, pQueryInfo); executeQuery(pSql, pQueryInfo);
taosReleaseRef(tscObjRef, pSql->self); taosReleaseRef(tscObjRef, pSql->self);
} }
......
...@@ -2752,7 +2752,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { ...@@ -2752,7 +2752,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tfree(pTableMetaInfo->pTableMeta); tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
taosHashClear(tscTableMetaMap); if (pSql->res.pRsp == NULL) {
tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name);
return 0;
}
return tscProcessTableMetaRsp(pSql);
} }
return 0; return 0;
......
...@@ -663,16 +663,6 @@ static void doInitGlobalConfig(void) { ...@@ -663,16 +663,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_MS; cfg.unitType = TAOS_CFG_UTYPE_MS;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "rpcForceTcp";
cfg.ptr = &tsRpcForceTcp;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "rpcMaxTime"; cfg.option = "rpcMaxTime";
cfg.ptr = &tsRpcMaxTime; cfg.ptr = &tsRpcMaxTime;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
...@@ -683,6 +673,16 @@ static void doInitGlobalConfig(void) { ...@@ -683,6 +673,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_SECOND; cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "rpcForceTcp";
cfg.ptr = &tsRpcForceTcp;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "statusInterval"; cfg.option = "statusInterval";
cfg.ptr = &tsStatusInterval; cfg.ptr = &tsStatusInterval;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
......
...@@ -165,12 +165,14 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=Field ...@@ -165,12 +165,14 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=Field
assert nbytes is not None assert nbytes is not None
res = [] res = []
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):
try: rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop()
rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte))
tmpstr = ctypes.c_char_p(data + nbytes * i + 2) buffer = create_string_buffer(rbyte + 1)
res.append(tmpstr.value.decode()[0:rbyte]) buffer[:rbyte] = chars[0][:rbyte]
except ValueError: if rbyte == 1 and buffer[0] == b'\xff':
res.append(None) res.append(None)
else:
res.append(cast(buffer, c_char_p).value.decode())
return res return res
...@@ -179,11 +181,14 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldT ...@@ -179,11 +181,14 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldT
assert nbytes is not None assert nbytes is not None
res = [] res = []
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):
try: rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop()
tmpstr = ctypes.c_char_p(data + nbytes * i + 2) chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte))
res.append(tmpstr.value.decode()) buffer = create_string_buffer(rbyte + 1)
except ValueError: buffer[:rbyte] = chars[0][:rbyte]
if rbyte == 4 and buffer[:4] == b'\xff'*4:
res.append(None) res.append(None)
else:
res.append(cast(buffer, c_char_p).value.decode())
return res return res
......
...@@ -83,6 +83,8 @@ extern const int32_t TYPE_BYTES[15]; ...@@ -83,6 +83,8 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DEFAULT_PASS "powerdb" #define TSDB_DEFAULT_PASS "powerdb"
#elif (_TD_TQ_ == true) #elif (_TD_TQ_ == true)
#define TSDB_DEFAULT_PASS "tqueue" #define TSDB_DEFAULT_PASS "tqueue"
#elif (_TD_PRO_ == true)
#define TSDB_DEFAULT_PASS "prodb"
#else #else
#define TSDB_DEFAULT_PASS "taosdata" #define TSDB_DEFAULT_PASS "taosdata"
#endif #endif
......
...@@ -44,6 +44,13 @@ char PROMPT_HEADER[] = "tq> "; ...@@ -44,6 +44,13 @@ char PROMPT_HEADER[] = "tq> ";
char CONTINUE_PROMPT[] = " -> "; char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 4; int prompt_size = 4;
#elif (_TD_PRO_ == true)
char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "ProDB> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
#else #else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
......
...@@ -104,6 +104,7 @@ extern char configDir[]; ...@@ -104,6 +104,7 @@ extern char configDir[];
#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) #define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) #define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
#define DEFAULT_NTHREADS 8
#define DEFAULT_TIMESTAMP_STEP 1 #define DEFAULT_TIMESTAMP_STEP 1
#define DEFAULT_INTERLACE_ROWS 0 #define DEFAULT_INTERLACE_ROWS 0
#define DEFAULT_DATATYPE_NUM 1 #define DEFAULT_DATATYPE_NUM 1
...@@ -227,7 +228,7 @@ typedef struct SArguments_S { ...@@ -227,7 +228,7 @@ typedef struct SArguments_S {
char * sqlFile; char * sqlFile;
bool use_metric; bool use_metric;
bool drop_database; bool drop_database;
bool insert_only; bool aggr_func;
bool answer_yes; bool answer_yes;
bool debug_print; bool debug_print;
bool verbose_print; bool verbose_print;
...@@ -375,8 +376,7 @@ typedef struct SDbs_S { ...@@ -375,8 +376,7 @@ typedef struct SDbs_S {
char password[SHELL_MAX_PASSWORD_LEN]; char password[SHELL_MAX_PASSWORD_LEN];
char resultFile[MAX_FILE_NAME_LEN]; char resultFile[MAX_FILE_NAME_LEN];
bool use_metric; bool use_metric;
bool insert_only; bool aggr_func;
bool do_aggreFunc;
bool asyncMode; bool asyncMode;
uint32_t threadCount; uint32_t threadCount;
...@@ -605,6 +605,9 @@ char *g_rand_current_buff = NULL; ...@@ -605,6 +605,9 @@ char *g_rand_current_buff = NULL;
char *g_rand_phase_buff = NULL; char *g_rand_phase_buff = NULL;
char *g_randdouble_buff = NULL; char *g_randdouble_buff = NULL;
char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)",
"max(current)", "min(current)", "first(current)", "last(current)"};
char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
"max(C0)", "min(C0)", "first(C0)", "last(C0)"}; "max(C0)", "min(C0)", "first(C0)", "last(C0)"};
...@@ -619,6 +622,8 @@ SArguments g_args = { ...@@ -619,6 +622,8 @@ SArguments g_args = {
"powerdb", // password "powerdb", // password
#elif (_TD_TQ_ == true) #elif (_TD_TQ_ == true)
"tqueue", // password "tqueue", // password
#elif (_TD_PRO_ == true)
"prodb", // password
#else #else
"taosdata", // password "taosdata", // password
#endif #endif
...@@ -628,7 +633,7 @@ SArguments g_args = { ...@@ -628,7 +633,7 @@ SArguments g_args = {
NULL, // sqlFile NULL, // sqlFile
true, // use_metric true, // use_metric
true, // drop_database true, // drop_database
true, // insert_only false, // aggr_func
false, // debug_print false, // debug_print
false, // verbose_print false, // verbose_print
false, // performance statistic print false, // performance statistic print
...@@ -646,7 +651,7 @@ SArguments g_args = { ...@@ -646,7 +651,7 @@ SArguments g_args = {
64, // binwidth 64, // binwidth
4, // columnCount, timestamp + float + int + float 4, // columnCount, timestamp + float + int + float
20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow
8, // num_of_connections/thread DEFAULT_NTHREADS,// nthreads
0, // insert_interval 0, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step DEFAULT_TIMESTAMP_STEP, // timestamp_step
1, // query_times 1, // query_times
...@@ -748,19 +753,24 @@ static void printHelp() { ...@@ -748,19 +753,24 @@ static void printHelp() {
char indent[10] = " "; char indent[10] = " ";
printf("%s\n\n", "Usage: taosdemo [OPTION...]"); printf("%s\n\n", "Usage: taosdemo [OPTION...]");
printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t",
"The meta file to the execution procedure. Default is './meta.json'."); "The meta file to the execution procedure.");
printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t",
"The user name to use when connecting to the server."); "The user name to use when connecting to the server.");
#ifdef _TD_POWER_ #ifdef _TD_POWER_
printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
"The password to use when connecting to the server. Default is 'powerdb'"); "The password to use when connecting to the server. By default is 'powerdb'");
printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
"Configuration directory. Default is '/etc/power/'."); "Configuration directory. By default is '/etc/power/'.");
#elif (_TD_TQ_ == true) #elif (_TD_TQ_ == true)
printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
"The password to use when connecting to the server. Default is 'tqueue'"); "The password to use when connecting to the server. By default is 'tqueue'");
printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
"Configuration directory. By default is '/etc/tq/'.");
#elif (_TD_PRO_ == true)
printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
"The password to use when connecting to the server. By default is 'prodb'");
printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
"Configuration directory. Default is '/etc/tq/'."); "Configuration directory. By default is '/etc/ProDB/'.");
#else #else
printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
"The password to use when connecting to the server."); "The password to use when connecting to the server.");
...@@ -772,24 +782,24 @@ static void printHelp() { ...@@ -772,24 +782,24 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t",
"The TCP/IP port number to use for the connection."); "The TCP/IP port number to use for the connection.");
printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t",
"The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'.");
printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t",
"Destination database. Default is 'test'."); "Destination database. By default is 'test'.");
printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t",
"Set the replica parameters of the database, Default 1, min: 1, max: 3."); "Set the replica parameters of the database, By default use 1, min: 1, max: 3.");
printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t",
"Table prefix name. Default is 'd'."); "Table prefix name. By default use 'd'.");
printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t",
"The select sql file."); "The select sql file.");
printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag.");
printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t",
"Direct output to the named file. Default is './output.txt'."); "Direct output to the named file. By default use './output.txt'.");
printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t",
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC.");
printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t",
"The data_type of columns, default: FLOAT, INT, FLOAT."); "The data_type of columns, By default use: FLOAT, INT, FLOAT.");
printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t",
"The width of data_type 'BINARY' or 'NCHAR'. Default is ", "The width of data_type 'BINARY' or 'NCHAR'. By default use ",
g_args.binwidth); g_args.binwidth);
printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t",
"The number of columns per record. Demo mode by default is ", "The number of columns per record. Demo mode by default is ",
...@@ -798,32 +808,32 @@ static void printHelp() { ...@@ -798,32 +808,32 @@ static void printHelp() {
MAX_NUM_COLUMNS); MAX_NUM_COLUMNS);
printf("%s%s%s%s\n", indent, indent, indent, printf("%s%s%s%s\n", indent, indent, indent,
"\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
printf("%s%s%s%s\n", indent, "-T, --threads=NUMBER", "\t\t", printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t",
"The number of threads. Default is 10."); "The number of threads. By default use ", DEFAULT_NTHREADS);
printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t",
"The sleep time (ms) between insertion. Default is 0."); "The sleep time (ms) between insertion. By default is 0.");
printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t",
"The timestamp step between insertion. Default is ", "The timestamp step between insertion. By default is ",
DEFAULT_TIMESTAMP_STEP); DEFAULT_TIMESTAMP_STEP);
printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t",
"The interlace rows of insertion. Default is ", "The interlace rows of insertion. By default is ",
DEFAULT_INTERLACE_ROWS); DEFAULT_INTERLACE_ROWS);
printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t",
"The number of records per request. Default is 30000."); "The number of records per request. By default is 30000.");
printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t",
"The number of tables. Default is 10000."); "The number of tables. By default is 10000.");
printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t",
"The number of records per table. Default is 10000."); "The number of records per table. By default is 10000.");
printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t",
"The value of records generated are totally random."); "The value of records generated are totally random.");
printf("%s\n", "\t\t\t\tThe default is to simulate power equipment scenario."); printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario.");
printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t", printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t",
"No-insert flag."); "Test aggregation functions after insertion.");
printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt."); printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt.");
printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t",
"Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order.");
printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t",
"Out of order data's range, ms, default is 1000."); "Out of order data's range. Unit is ms. By default is 1000.");
printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t",
"Print debug info."); "Print debug info.");
printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t",
...@@ -1712,13 +1722,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { ...@@ -1712,13 +1722,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
} }
} else if ((strcmp(argv[i], "-N") == 0) } else if ((strcmp(argv[i], "-N") == 0)
|| (0 == strcmp(argv[i], "--normal-table"))) { || (0 == strcmp(argv[i], "--normal-table"))) {
arguments->demo_mode = false;
arguments->use_metric = false; arguments->use_metric = false;
} else if ((strcmp(argv[i], "-M") == 0) } else if ((strcmp(argv[i], "-M") == 0)
|| (0 == strcmp(argv[i], "--random"))) { || (0 == strcmp(argv[i], "--random"))) {
arguments->demo_mode = false; arguments->demo_mode = false;
} else if ((strcmp(argv[i], "-x") == 0) } else if ((strcmp(argv[i], "-x") == 0)
|| (0 == strcmp(argv[i], "--no-insert"))) { || (0 == strcmp(argv[i], "--aggr-func"))) {
arguments->insert_only = false; arguments->aggr_func = true;
} else if ((strcmp(argv[i], "-y") == 0) } else if ((strcmp(argv[i], "-y") == 0)
|| (0 == strcmp(argv[i], "--answer-yes"))) { || (0 == strcmp(argv[i], "--answer-yes"))) {
arguments->answer_yes = true; arguments->answer_yes = true;
...@@ -2429,10 +2440,11 @@ static void init_rand_data() { ...@@ -2429,10 +2440,11 @@ static void init_rand_data() {
static int printfInsertMeta() { static int printfInsertMeta() {
SHOW_PARSE_RESULT_START(); SHOW_PARSE_RESULT_START();
if (g_args.demo_mode) if (g_args.demo_mode) {
printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n"); printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n");
else } else {
printf("\ntaosdemo is simulating random data as you request..\n\n"); printf("\ntaosdemo is simulating random data as you request..\n\n");
}
if (g_args.iface != INTERFACE_BUT) { if (g_args.iface != INTERFACE_BUT) {
// first time if no iface specified // first time if no iface specified
...@@ -10065,11 +10077,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -10065,11 +10077,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
free(infos); free(infos);
} }
static void *readTable(void *sarg) { static void *queryNtableAggrFunc(void *sarg) {
#if 1
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos; TAOS *taos = pThreadInfo->taos;
setThreadName("readTable"); setThreadName("queryNtableAggrFunc");
char *command = calloc(1, BUFFER_SIZE); char *command = calloc(1, BUFFER_SIZE);
assert(command); assert(command);
...@@ -10092,10 +10103,20 @@ static void *readTable(void *sarg) { ...@@ -10092,10 +10103,20 @@ static void *readTable(void *sarg) {
int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
int64_t totalData = insertRows * ntables; int64_t totalData = insertRows * ntables;
bool do_aggreFunc = g_Dbs.do_aggreFunc; bool aggr_func = g_Dbs.aggr_func;
int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; char **aggreFunc;
if (!do_aggreFunc) { int n;
if (g_args.demo_mode) {
aggreFunc = g_aggreFuncDemo;
n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
} else {
aggreFunc = g_aggreFunc;
n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
}
if (!aggr_func) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
} }
printf("%"PRId64" records:\n", totalData); printf("%"PRId64" records:\n", totalData);
...@@ -10106,9 +10127,11 @@ static void *readTable(void *sarg) { ...@@ -10106,9 +10127,11 @@ static void *readTable(void *sarg) {
uint64_t count = 0; uint64_t count = 0;
for (int64_t i = 0; i < ntables; i++) { for (int64_t i = 0; i < ntables; i++) {
sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64,
g_aggreFunc[j], tb_prefix, i, startTime); aggreFunc[j], tb_prefix, i, startTime);
double t = taosGetTimestampMs(); double t = taosGetTimestampUs();
debugPrint("%s() LN%d, sql command: %s\n",
__func__, __LINE__, command);
TAOS_RES *pSql = taos_query(taos, command); TAOS_RES *pSql = taos_query(taos, command);
int32_t code = taos_errno(pSql); int32_t code = taos_errno(pSql);
...@@ -10125,29 +10148,27 @@ static void *readTable(void *sarg) { ...@@ -10125,29 +10148,27 @@ static void *readTable(void *sarg) {
count++; count++;
} }
t = taosGetTimestampMs() - t; t = taosGetTimestampUs() - t;
totalT += t; totalT += t;
taos_free_result(pSql); taos_free_result(pSql);
} }
fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData, aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
(double)(ntables * insertRows) / totalT, totalT * 1000); (double)(ntables * insertRows) / totalT, totalT / 1000000);
printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000); printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000);
} }
fprintf(fp, "\n"); fprintf(fp, "\n");
fclose(fp); fclose(fp);
free(command); free(command);
#endif
return NULL; return NULL;
} }
static void *readMetric(void *sarg) { static void *queryStableAggrFunc(void *sarg) {
#if 1
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos; TAOS *taos = pThreadInfo->taos;
setThreadName("readMetric"); setThreadName("queryStableAggrFunc");
char *command = calloc(1, BUFFER_SIZE); char *command = calloc(1, BUFFER_SIZE);
assert(command); assert(command);
...@@ -10161,12 +10182,23 @@ static void *readMetric(void *sarg) { ...@@ -10161,12 +10182,23 @@ static void *readMetric(void *sarg) {
int64_t insertRows = pThreadInfo->stbInfo->insertRows; int64_t insertRows = pThreadInfo->stbInfo->insertRows;
int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
int64_t totalData = insertRows * ntables; int64_t totalData = insertRows * ntables;
bool do_aggreFunc = g_Dbs.do_aggreFunc; bool aggr_func = g_Dbs.aggr_func;
char **aggreFunc;
int n;
int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; if (g_args.demo_mode) {
if (!do_aggreFunc) { aggreFunc = g_aggreFuncDemo;
n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
} else {
aggreFunc = g_aggreFunc;
n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
}
if (!aggr_func) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
} }
printf("%"PRId64" records:\n", totalData); printf("%"PRId64" records:\n", totalData);
fprintf(fp, "Querying On %"PRId64" records:\n", totalData); fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
...@@ -10178,18 +10210,29 @@ static void *readMetric(void *sarg) { ...@@ -10178,18 +10210,29 @@ static void *readMetric(void *sarg) {
for (int64_t i = 1; i <= m; i++) { for (int64_t i = 1; i <= m; i++) {
if (i == 1) { if (i == 1) {
sprintf(tempS, "t1 = %"PRId64"", i); if (g_args.demo_mode) {
sprintf(tempS, "groupid = %"PRId64"", i);
} else {
sprintf(tempS, "t0 = %"PRId64"", i);
}
} else { } else {
sprintf(tempS, " or t1 = %"PRId64" ", i); if (g_args.demo_mode) {
sprintf(tempS, " or groupid = %"PRId64" ", i);
} else {
sprintf(tempS, " or t0 = %"PRId64" ", i);
}
} }
strncat(condition, tempS, COND_BUF_LEN - 1); strncat(condition, tempS, COND_BUF_LEN - 1);
sprintf(command, "SELECT %s FROM meters WHERE %s", g_aggreFunc[j], condition); sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition);
printf("Where condition: %s\n", condition); printf("Where condition: %s\n", condition);
debugPrint("%s() LN%d, sql command: %s\n",
__func__, __LINE__, command);
fprintf(fp, "%s\n", command); fprintf(fp, "%s\n", command);
double t = taosGetTimestampMs(); double t = taosGetTimestampUs();
TAOS_RES *pSql = taos_query(taos, command); TAOS_RES *pSql = taos_query(taos, command);
int32_t code = taos_errno(pSql); int32_t code = taos_errno(pSql);
...@@ -10206,11 +10249,11 @@ static void *readMetric(void *sarg) { ...@@ -10206,11 +10249,11 @@ static void *readMetric(void *sarg) {
while(taos_fetch_row(pSql) != NULL) { while(taos_fetch_row(pSql) != NULL) {
count++; count++;
} }
t = taosGetTimestampMs() - t; t = taosGetTimestampUs() - t;
fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
ntables * insertRows / (t * 1000.0), t); ntables * insertRows / (t / 1000), t);
printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0); printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000);
taos_free_result(pSql); taos_free_result(pSql);
} }
...@@ -10218,7 +10261,7 @@ static void *readMetric(void *sarg) { ...@@ -10218,7 +10261,7 @@ static void *readMetric(void *sarg) {
} }
fclose(fp); fclose(fp);
free(command); free(command);
#endif
return NULL; return NULL;
} }
...@@ -11225,9 +11268,8 @@ static void setParaFromArg() { ...@@ -11225,9 +11268,8 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
g_Dbs.use_metric = g_args.use_metric; g_Dbs.use_metric = g_args.use_metric;
g_Dbs.insert_only = g_args.insert_only;
g_Dbs.do_aggreFunc = true; g_Dbs.aggr_func = g_args.aggr_func;
char dataString[TSDB_MAX_BYTES_PER_ROW]; char dataString[TSDB_MAX_BYTES_PER_ROW];
char *data_type = g_args.data_type; char *data_type = g_args.data_type;
...@@ -11238,7 +11280,7 @@ static void setParaFromArg() { ...@@ -11238,7 +11280,7 @@ static void setParaFromArg() {
if ((data_type[0] == TSDB_DATA_TYPE_BINARY) if ((data_type[0] == TSDB_DATA_TYPE_BINARY)
|| (data_type[0] == TSDB_DATA_TYPE_BOOL) || (data_type[0] == TSDB_DATA_TYPE_BOOL)
|| (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) {
g_Dbs.do_aggreFunc = false; g_Dbs.aggr_func = false;
} }
if (g_args.use_metric) { if (g_args.use_metric) {
...@@ -11420,7 +11462,7 @@ static void testMetaFile() { ...@@ -11420,7 +11462,7 @@ static void testMetaFile() {
} }
} }
static void queryResult() { static void queryAggrFunc() {
// query data // query data
pthread_t read_id; pthread_t read_id;
...@@ -11429,7 +11471,6 @@ static void queryResult() { ...@@ -11429,7 +11471,6 @@ static void queryResult() {
pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0; pThreadInfo->start_table_from = 0;
//pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
if (g_args.use_metric) { if (g_args.use_metric) {
pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
...@@ -11458,9 +11499,9 @@ static void queryResult() { ...@@ -11458,9 +11499,9 @@ static void queryResult() {
tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
if (!g_Dbs.use_metric) { if (!g_Dbs.use_metric) {
pthread_create(&read_id, NULL, readTable, pThreadInfo); pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo);
} else { } else {
pthread_create(&read_id, NULL, readMetric, pThreadInfo); pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo);
} }
pthread_join(read_id, NULL); pthread_join(read_id, NULL);
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
...@@ -11482,8 +11523,9 @@ static void testCmdLine() { ...@@ -11482,8 +11523,9 @@ static void testCmdLine() {
g_args.test_mode = INSERT_TEST; g_args.test_mode = INSERT_TEST;
insertTestProcess(); insertTestProcess();
if (false == g_Dbs.insert_only) if (g_Dbs.aggr_func) {
queryResult(); queryAggrFunc();
}
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
......
...@@ -181,6 +181,7 @@ typedef struct { ...@@ -181,6 +181,7 @@ typedef struct {
int32_t threadIndex; int32_t threadIndex;
int32_t totalThreads; int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
int precision;
void *taosCon; void *taosCon;
int64_t rowsOfDumpOut; int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut; int64_t tablesOfDumpOut;
...@@ -246,11 +247,6 @@ static struct argp_option options[] = { ...@@ -246,11 +247,6 @@ static struct argp_option options[] = {
{"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1
{"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else
{"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6},
#endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
...@@ -281,8 +277,11 @@ typedef struct arguments { ...@@ -281,8 +277,11 @@ typedef struct arguments {
bool with_property; bool with_property;
bool avro; bool avro;
int64_t start_time; int64_t start_time;
char humanStartTime[28];
int64_t end_time; int64_t end_time;
char humanEndTime[28];
char precision[8]; char precision[8];
int32_t data_batch; int32_t data_batch;
int32_t max_sql_len; int32_t max_sql_len;
int32_t table_batch; // num of table which will be dump into one output file. int32_t table_batch; // num of table which will be dump into one output file.
...@@ -296,6 +295,8 @@ typedef struct arguments { ...@@ -296,6 +295,8 @@ typedef struct arguments {
bool debug_print; bool debug_print;
bool verbose_print; bool verbose_print;
bool performance_print; bool performance_print;
int dbCount;
} SArguments; } SArguments;
/* Our argp parser. */ /* Our argp parser. */
...@@ -318,13 +319,17 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, ...@@ -318,13 +319,17 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
int numOfCols, FILE *fp, char* dbName); int numOfCols, FILE *fp, char* dbName);
static int32_t taosDumpTable(char *tbName, char *metric, static int32_t taosDumpTable(char *tbName, char *metric,
FILE *fp, TAOS* taosCon, char* dbName); FILE *fp, TAOS* taosCon, char* dbName, int precision);
static int taosDumpTableData(FILE *fp, char *tbName, static int taosDumpTableData(FILE *fp, char *tbName,
TAOS* taosCon, char* dbName, TAOS* taosCon, char* dbName,
int precision,
char *jsonAvroSchema); char *jsonAvroSchema);
static int taosCheckParam(struct arguments *arguments); static int taosCheckParam(struct arguments *arguments);
static void taosFreeDbInfos(); static void taosFreeDbInfos();
static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName); static void taosStartDumpOutWorkThreads(
int32_t numOfThread,
char *dbName,
int precision);
struct arguments g_args = { struct arguments g_args = {
// connection option // connection option
...@@ -349,8 +354,10 @@ struct arguments g_args = { ...@@ -349,8 +354,10 @@ struct arguments g_args = {
false, // schemeonly false, // schemeonly
true, // with_property true, // with_property
false, // avro format false, // avro format
-INT64_MAX, // start_time -INT64_MAX + 1, // start_time
{0}, // humanStartTime
INT64_MAX, // end_time INT64_MAX, // end_time
{0}, // humanEndTime
"ms", // precision "ms", // precision
1, // data_batch 1, // data_batch
TSDB_MAX_SQL_LEN, // max_sql_len TSDB_MAX_SQL_LEN, // max_sql_len
...@@ -364,7 +371,8 @@ struct arguments g_args = { ...@@ -364,7 +371,8 @@ struct arguments g_args = {
false, // isDumpIn false, // isDumpIn
false, // debug_print false, // debug_print
false, // verbose_print false, // verbose_print
false // performance_print false, // performance_print
0, // dbCount
}; };
static void errorPrintReqArg2(char *program, char *wrong_arg) static void errorPrintReqArg2(char *program, char *wrong_arg)
...@@ -472,12 +480,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -472,12 +480,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break; break;
case 'S': case 'S':
// parse time here. // parse time here.
g_args.start_time = atol(arg);
break; break;
case 'E': case 'E':
g_args.end_time = atol(arg);
break;
case 'C':
break; break;
case 'B': case 'B':
g_args.data_batch = atoi(arg); g_args.data_batch = atoi(arg);
...@@ -550,7 +554,7 @@ static int queryDbImpl(TAOS *taos, char *command) { ...@@ -550,7 +554,7 @@ static int queryDbImpl(TAOS *taos, char *command) {
return 0; return 0;
} }
static void parse_precision_first( UNUSED_FUNC static void parse_precision_first(
int argc, char *argv[], SArguments *arguments) { int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-C") == 0) { if (strcmp(argv[i], "-C") == 0) {
...@@ -616,6 +620,73 @@ static void parse_args( ...@@ -616,6 +620,73 @@ static void parse_args(
} }
} }
static void copyHumanTimeToArg(char *timeStr, bool isStartTime)
{
if (isStartTime)
strcpy(g_args.humanStartTime, timeStr);
else
strcpy(g_args.humanEndTime, timeStr);
}
static void copyTimestampToArg(char *timeStr, bool isStartTime)
{
if (isStartTime)
g_args.start_time = atol(timeStr);
else
g_args.end_time = atol(timeStr);
}
static void parse_timestamp(
int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
char *tmp;
bool isStartTime = false;
bool isEndTime = false;
if (strcmp(argv[i], "-S") == 0) {
isStartTime = true;
} else if (strcmp(argv[i], "-E") == 0) {
isEndTime = true;
}
if (isStartTime || isEndTime) {
if (NULL == argv[i+1]) {
errorPrint("%s need a valid value following!\n", argv[i]);
exit(-1);
}
tmp = strdup(argv[i+1]);
if (strchr(tmp, ':') && strchr(tmp, '-')) {
copyHumanTimeToArg(tmp, isStartTime);
} else {
copyTimestampToArg(tmp, isStartTime);
}
}
}
}
static int getPrecisionByString(char *precision)
{
if (0 == strncasecmp(precision,
"ms", 2)) {
return TSDB_TIME_PRECISION_MILLI;
} else if (0 == strncasecmp(precision,
"us", 2)) {
return TSDB_TIME_PRECISION_MICRO;
#if TSDB_SUPPORT_NANOSECOND == 1
} else if (0 == strncasecmp(precision,
"ns", 2)) {
return TSDB_TIME_PRECISION_NANO;
#endif
} else {
errorPrint("Invalid time precision: %s",
precision);
}
return -1;
}
/*
static void parse_timestamp( static void parse_timestamp(
int argc, char *argv[], SArguments *arguments) { int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
...@@ -634,6 +705,7 @@ static void parse_timestamp( ...@@ -634,6 +705,7 @@ static void parse_timestamp(
int64_t tmpEpoch; int64_t tmpEpoch;
if (strchr(tmp, ':') && strchr(tmp, '-')) { if (strchr(tmp, ':') && strchr(tmp, '-')) {
strcpy(g_args.humanStartTime, tmp)
int32_t timePrec; int32_t timePrec;
if (0 == strncasecmp(arguments->precision, if (0 == strncasecmp(arguments->precision,
"ms", strlen("ms"))) { "ms", strlen("ms"))) {
...@@ -672,6 +744,7 @@ static void parse_timestamp( ...@@ -672,6 +744,7 @@ static void parse_timestamp(
} }
} }
} }
*/
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
static char verType[32] = {0}; static char verType[32] = {0};
...@@ -682,7 +755,7 @@ int main(int argc, char *argv[]) { ...@@ -682,7 +755,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be /* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */ reflected in arguments. */
if (argc > 1) { if (argc > 1) {
parse_precision_first(argc, argv, &g_args); // parse_precision_first(argc, argv, &g_args);
parse_timestamp(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args);
parse_args(argc, argv, &g_args); parse_args(argc, argv, &g_args);
} }
...@@ -714,7 +787,9 @@ int main(int argc, char *argv[]) { ...@@ -714,7 +787,9 @@ int main(int argc, char *argv[]) {
printf("with_property: %s\n", g_args.with_property?"true":"false"); printf("with_property: %s\n", g_args.with_property?"true":"false");
printf("avro format: %s\n", g_args.avro?"true":"false"); printf("avro format: %s\n", g_args.avro?"true":"false");
printf("start_time: %" PRId64 "\n", g_args.start_time); printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time); printf("end_time: %" PRId64 "\n", g_args.end_time);
printf("human readable end time: %s \n", g_args.humanEndTime);
printf("precision: %s\n", g_args.precision); printf("precision: %s\n", g_args.precision);
printf("data_batch: %d\n", g_args.data_batch); printf("data_batch: %d\n", g_args.data_batch);
printf("max_sql_len: %d\n", g_args.max_sql_len); printf("max_sql_len: %d\n", g_args.max_sql_len);
...@@ -759,7 +834,9 @@ int main(int argc, char *argv[]) { ...@@ -759,7 +834,9 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime);
fprintf(g_fpOfResult, "precision: %s\n", g_args.precision); fprintf(g_fpOfResult, "precision: %s\n", g_args.precision);
fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
...@@ -816,7 +893,8 @@ int main(int argc, char *argv[]) { ...@@ -816,7 +893,8 @@ int main(int argc, char *argv[]) {
static void taosFreeDbInfos() { static void taosFreeDbInfos() {
if (g_dbInfos == NULL) return; if (g_dbInfos == NULL) return;
for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]); for (int i = 0; i < g_args.dbCount; i++)
tfree(g_dbInfos[i]);
tfree(g_dbInfos); tfree(g_dbInfos);
} }
...@@ -1046,6 +1124,88 @@ static int32_t taosSaveTableOfMetricToTempFile( ...@@ -1046,6 +1124,88 @@ static int32_t taosSaveTableOfMetricToTempFile(
return 0; return 0;
} }
static int getDbCount()
{
int count;
TAOS *taos = NULL;
TAOS_RES *result = NULL;
char *command = NULL;
TAOS_ROW row;
command = (char *)malloc(COMMAND_SIZE);
if (command == NULL) {
errorPrint("%s() LN%d, failed to allocate command buffer\n", __func__, __LINE__);
return 0;
}
/* Connect to server */
taos = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port);
if (NULL == taos) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
free(command);
return 0;
}
sprintf(command, "show databases");
result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (0 != code) {
errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n",
__func__, __LINE__, command, taos_errstr(result));
free(command);
return 0;
}
TAOS_FIELD *fields = taos_fetch_fields(result);
while ((row = taos_fetch_row(result)) != NULL) {
// sys database name : 'log', but subsequent version changed to 'log'
if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
&& (!g_args.allow_sys)) {
continue;
}
if (g_args.databases) { // input multi dbs
for (int i = 0; g_args.arg_list[i]; i++) {
if (strncasecmp(g_args.arg_list[i],
(char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
goto _dump_db_point;
}
continue;
} else if (!g_args.all_databases) { // only input one db
if (strncasecmp(g_args.arg_list[0],
(char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
goto _dump_db_point;
else
continue;
}
_dump_db_point:
count++;
if (g_args.databases) {
if (count > g_args.arg_list_len) break;
} else if (!g_args.all_databases) {
if (count >= 1) break;
}
}
if (count == 0) {
errorPrint("%d databases valid to dump\n", count);
}
free(command);
return count;
}
static int taosDumpOut() { static int taosDumpOut() {
TAOS *taos = NULL; TAOS *taos = NULL;
TAOS_RES *result = NULL; TAOS_RES *result = NULL;
...@@ -1070,7 +1230,14 @@ static int taosDumpOut() { ...@@ -1070,7 +1230,14 @@ static int taosDumpOut() {
return -1; return -1;
} }
g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); g_args.dbCount = getDbCount();
if (0 == g_args.dbCount) {
errorPrint("%d databases valid to dump\n", g_args.dbCount);
return -1;
}
g_dbInfos = (SDbInfo **)calloc(g_args.dbCount, sizeof(SDbInfo *));
if (g_dbInfos == NULL) { if (g_dbInfos == NULL) {
errorPrint("%s() LN%d, failed to allocate memory\n", errorPrint("%s() LN%d, failed to allocate memory\n",
__func__, __LINE__); __func__, __LINE__);
...@@ -1165,9 +1332,9 @@ _dump_db_point: ...@@ -1165,9 +1332,9 @@ _dump_db_point:
g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], tstrncpy(g_dbInfos[count]->precision,
min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes + 1)); (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
//g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); DB_PRECISION_LEN);
g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
} }
count++; count++;
...@@ -1263,8 +1430,10 @@ _dump_db_point: ...@@ -1263,8 +1430,10 @@ _dump_db_point:
} }
// start multi threads to dumpout // start multi threads to dumpout
taosStartDumpOutWorkThreads(totalNumOfThread, taosStartDumpOutWorkThreads(totalNumOfThread,
g_dbInfos[0]->name); g_dbInfos[0]->name,
getPrecisionByString(g_dbInfos[0]->precision));
char tmpFileName[MAX_FILE_NAME_LEN]; char tmpFileName[MAX_FILE_NAME_LEN];
_clean_tmp_file: _clean_tmp_file:
...@@ -1465,7 +1634,7 @@ static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema) ...@@ -1465,7 +1634,7 @@ static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema)
static int32_t taosDumpTable( static int32_t taosDumpTable(
char *tbName, char *metric, char *tbName, char *metric,
FILE *fp, TAOS* taosCon, char* dbName) { FILE *fp, TAOS* taosCon, char* dbName, int precision) {
int count = 0; int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef)
...@@ -1516,7 +1685,7 @@ static int32_t taosDumpTable( ...@@ -1516,7 +1685,7 @@ static int32_t taosDumpTable(
int32_t ret = 0; int32_t ret = 0;
if (!g_args.schemaonly) { if (!g_args.schemaonly) {
ret = taosDumpTableData(fp, tbName, taosCon, dbName, ret = taosDumpTableData(fp, tbName, taosCon, dbName, precision,
jsonAvroSchema); jsonAvroSchema);
} }
...@@ -1607,7 +1776,8 @@ static void* taosDumpOutWorkThreadFp(void *arg) ...@@ -1607,7 +1776,8 @@ static void* taosDumpOutWorkThreadFp(void *arg)
int ret = taosDumpTable( int ret = taosDumpTable(
tableRecord.name, tableRecord.metric, tableRecord.name, tableRecord.metric,
fp, pThread->taosCon, pThread->dbName); fp, pThread->taosCon, pThread->dbName,
pThread->precision);
if (ret >= 0) { if (ret >= 0) {
// TODO: sum table count and table rows by self // TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++; pThread->tablesOfDumpOut++;
...@@ -1656,7 +1826,7 @@ static void* taosDumpOutWorkThreadFp(void *arg) ...@@ -1656,7 +1826,7 @@ static void* taosDumpOutWorkThreadFp(void *arg)
return NULL; return NULL;
} }
static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName, int precision)
{ {
pthread_attr_t thattr; pthread_attr_t thattr;
SThreadParaObj *threadObj = SThreadParaObj *threadObj =
...@@ -1675,6 +1845,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) ...@@ -1675,6 +1845,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
pThread->threadIndex = t; pThread->threadIndex = t;
pThread->totalThreads = numOfThread; pThread->totalThreads = numOfThread;
tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
pThread->precision = precision;
pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password, pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port); NULL, g_args.port);
if (pThread->taosCon == NULL) { if (pThread->taosCon == NULL) {
...@@ -1924,7 +2095,8 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { ...@@ -1924,7 +2095,8 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
} }
// start multi threads to dumpout // start multi threads to dumpout
taosStartDumpOutWorkThreads(numOfThread, dbInfo->name); taosStartDumpOutWorkThreads(numOfThread, dbInfo->name,
getPrecisionByString(dbInfo->precision));
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf); (void)remove(tmpBuf);
...@@ -2215,14 +2387,38 @@ static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbN ...@@ -2215,14 +2387,38 @@ static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbN
} }
static int taosDumpTableData(FILE *fp, char *tbName, static int taosDumpTableData(FILE *fp, char *tbName,
TAOS* taosCon, char* dbName, TAOS* taosCon, char* dbName, int precision,
char *jsonAvroSchema) { char *jsonAvroSchema) {
int64_t totalRows = 0; int64_t totalRows = 0;
char sqlstr[1024] = {0}; char sqlstr[1024] = {0};
int64_t start_time, end_time;
if (strlen(g_args.humanStartTime)) {
if (TSDB_CODE_SUCCESS != taosParseTime(
g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime),
precision, 0)) {
errorPrint("Input %s, time format error!\n", g_args.humanStartTime);
return -1;
}
} else {
start_time = g_args.start_time;
}
if (strlen(g_args.humanEndTime)) {
if (TSDB_CODE_SUCCESS != taosParseTime(
g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
precision, 0)) {
errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
return -1;
}
} else {
end_time = g_args.end_time;
}
sprintf(sqlstr, sprintf(sqlstr,
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbName, g_args.start_time, g_args.end_time); dbName, tbName, start_time, end_time);
TAOS_RES* res = taos_query(taosCon, sqlstr); TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res); int32_t code = taos_errno(res);
......
...@@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code), pStable->numOfTags); tstrerror(code), pStable->numOfTags);
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
...@@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
...@@ -1321,6 +1326,10 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1321,6 +1326,10 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
...@@ -1376,6 +1385,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1376,6 +1385,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
...@@ -1444,6 +1456,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1444,6 +1456,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
...@@ -1489,6 +1504,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1489,6 +1504,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {
code = mnodeGetSuperTableMeta(pMsg);
}
return code; return code;
} }
......
...@@ -32,6 +32,13 @@ void osInit() { ...@@ -32,6 +32,13 @@ void osInit() {
strcpy(tsDataDir, "/var/lib/tq"); strcpy(tsDataDir, "/var/lib/tq");
strcpy(tsLogDir, "/var/log/tq"); strcpy(tsLogDir, "/var/log/tq");
strcpy(tsScriptDir, "/etc/tq"); strcpy(tsScriptDir, "/etc/tq");
#elif (_TD_PRO_ == true)
if (configDir[0] == 0) {
strcpy(configDir, "/etc/ProDB");
}
strcpy(tsDataDir, "/var/lib/ProDB");
strcpy(tsLogDir, "/var/log/ProDB");
strcpy(tsScriptDir, "/etc/ProDB");
#else #else
if (configDir[0] == 0) { if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos"); strcpy(configDir, "/etc/taos");
......
...@@ -39,6 +39,14 @@ void osInit() { ...@@ -39,6 +39,14 @@ void osInit() {
strcpy(tsDataDir, "C:/TQ/data"); strcpy(tsDataDir, "C:/TQ/data");
strcpy(tsLogDir, "C:/TQ/log"); strcpy(tsLogDir, "C:/TQ/log");
strcpy(tsScriptDir, "C:/TQ/script"); strcpy(tsScriptDir, "C:/TQ/script");
#elif (_TD_PRO_ == true)
if (configDir[0] == 0) {
strcpy(configDir, "C:/ProDB/cfg");
}
strcpy(tsVnodeDir, "C:/ProDB/data");
strcpy(tsDataDir, "C:/ProDB/data");
strcpy(tsLogDir, "C:/ProDB/log");
strcpy(tsScriptDir, "C:/ProDB/script");
#else #else
if (configDir[0] == 0) { if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg"); strcpy(configDir, "C:/TDengine/cfg");
......
...@@ -399,7 +399,8 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 ...@@ -399,7 +399,8 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64
pContext->oldInUse = pEpSet->inUse; pContext->oldInUse = pEpSet->inUse;
pContext->connType = RPC_CONN_UDPC; pContext->connType = RPC_CONN_UDPC;
if (contLen > tsRpcMaxUdpSize || tsRpcForceTcp ) pContext->connType = RPC_CONN_TCPC;
if (contLen > tsRpcMaxUdpSize || tsRpcForceTcp) pContext->connType = RPC_CONN_TCPC;
// connection type is application specific. // connection type is application specific.
// for TDengine, all the query, show commands shall have TCP connection // for TDengine, all the query, show commands shall have TCP connection
...@@ -407,7 +408,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 ...@@ -407,7 +408,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64
if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE
|| type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP
|| type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META
|| type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS) || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS ||type == TSDB_MSG_TYPE_CM_ALTER_TABLE)
pContext->connType = RPC_CONN_TCPC; pContext->connType = RPC_CONN_TCPC;
pContext->rid = taosAddRef(tsRpcRefId, pContext); pContext->rid = taosAddRef(tsRpcRefId, pContext);
......
...@@ -74,7 +74,7 @@ void tsdbFreeMeta(STsdbMeta* pMeta); ...@@ -74,7 +74,7 @@ void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo); int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo); int tsdbCloseMeta(STsdbRepo* pRepo);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid); STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version); STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version, int8_t rowType);
int tsdbWLockRepoMeta(STsdbRepo* pRepo); int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo); int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo); int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
...@@ -99,7 +99,9 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k ...@@ -99,7 +99,9 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k
} }
} }
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) { // set rowType to -1 at default if have no relationship with row
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version,
int8_t rowType) {
STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose
STSchema* pSchema = NULL; STSchema* pSchema = NULL;
STSchema* pTSchema = NULL; STSchema* pTSchema = NULL;
...@@ -110,8 +112,12 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, ...@@ -110,8 +112,12 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
} else { // get the schema with version } else { // get the schema with version
void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) { if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; if (rowType == SMEM_ROW_KV) {
goto _exit; ptr = taosArrayGetLast(pDTable->schema);
} else {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
}
} }
pTSchema = *(STSchema**)ptr; pTSchema = *(STSchema**)ptr;
} }
...@@ -130,7 +136,7 @@ _exit: ...@@ -130,7 +136,7 @@ _exit:
} }
static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) { static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) {
return tsdbGetTableSchemaImpl(pTable, false, false, -1); return tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
} }
static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) {
......
...@@ -866,7 +866,7 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) { ...@@ -866,7 +866,7 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) {
} }
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) {
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
pCommith->pTable = pTable; pCommith->pTable = pTable;
...@@ -1283,7 +1283,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1283,7 +1283,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
(*iter)++; (*iter)++;
} else if (key1 > key2) { } else if (key1 > key2) {
if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); pSchema =
tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row), (int8_t)memRowType(row));
ASSERT(pSchema != NULL); ASSERT(pSchema != NULL);
} }
...@@ -1304,7 +1305,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1304,7 +1305,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
if (update != TD_ROW_DISCARD_UPDATE) { if (update != TD_ROW_DISCARD_UPDATE) {
//copy mem data //copy mem data
if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); pSchema =
tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row), (int8_t)memRowType(row));
ASSERT(pSchema != NULL); ASSERT(pSchema != NULL);
} }
......
...@@ -431,7 +431,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { ...@@ -431,7 +431,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) {
if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue; if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue;
pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1); pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1, -1);
taosArrayClear(pComph->aSupBlk); taosArrayClear(pComph->aSupBlk);
if ((tdInitDataCols(pComph->pDataCols, pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) || if ((tdInitDataCols(pComph->pDataCols, pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) ||
(tdInitDataCols(pReadh->pDCols[1], pSchema) < 0)) { (tdInitDataCols(pReadh->pDCols[1], pSchema) < 0)) {
......
...@@ -617,7 +617,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) { ...@@ -617,7 +617,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
STable *pTable = pMeta->tables[i]; STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) { if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1), 0); tsdbGetTableSchemaImpl(pTable, false, false, -1, -1), 0);
} }
} }
} }
......
...@@ -582,7 +582,7 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ...@@ -582,7 +582,7 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row) { static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row) {
if (pCols) { if (pCols) {
if (*ppSchema == NULL || schemaVersion(*ppSchema) != memRowVersion(row)) { if (*ppSchema == NULL || schemaVersion(*ppSchema) != memRowVersion(row)) {
*ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row)); *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row), (int8_t)memRowType(row));
if (*ppSchema == NULL) { if (*ppSchema == NULL) {
ASSERT(false); ASSERT(false);
return -1; return -1;
...@@ -730,7 +730,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep ...@@ -730,7 +730,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep
if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) { if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) {
*ppSchema1 = pSchema2; *ppSchema1 = pSchema2;
} else { } else {
*ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1)); *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1), (int8_t)memRowType(row1));
} }
pSchema1 = *ppSchema1; pSchema1 = *ppSchema1;
} }
...@@ -739,7 +739,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep ...@@ -739,7 +739,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep
if(schemaVersion(pSchema1) == dv2) { if(schemaVersion(pSchema1) == dv2) {
pSchema2 = pSchema1; pSchema2 = pSchema1;
} else { } else {
*ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2)); *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2), (int8_t)memRowType(row2));
pSchema2 = *ppSchema2; pSchema2 = *ppSchema2;
} }
} }
...@@ -847,7 +847,7 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * ...@@ -847,7 +847,7 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *
} }
} }
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion, -1);
pRepo->stat.pointsWritten += points * schemaNCols(pSchema); pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
pRepo->stat.totalStorage += points * schemaVLen(pSchema); pRepo->stat.totalStorage += points * schemaVLen(pSchema);
...@@ -894,7 +894,7 @@ static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { ...@@ -894,7 +894,7 @@ static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
ASSERT(pTable != NULL); ASSERT(pTable != NULL);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
int sversion = schemaVersion(pSchema); int sversion = schemaVersion(pSchema);
if (pBlock->sversion == sversion) { if (pBlock->sversion == sversion) {
...@@ -951,7 +951,7 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT ...@@ -951,7 +951,7 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
} }
} else { } else {
ASSERT(pBlock->sversion >= 0); ASSERT(pBlock->sversion >= 0);
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion, -1) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
...@@ -972,7 +972,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro ...@@ -972,7 +972,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
return; return;
} }
pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row), (int8_t)memRowType(row));
if (pSchema == NULL) { if (pSchema == NULL) {
return; return;
} }
......
...@@ -534,8 +534,8 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { ...@@ -534,8 +534,8 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
return *(STable **)ptr; return *(STable **)ptr;
} }
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version) { STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version, int8_t rowType) {
return tsdbGetTableSchemaImpl(pTable, true, false, _version); return tsdbGetTableSchemaImpl(pTable, true, false, _version, rowType);
} }
int tsdbWLockRepoMeta(STsdbRepo *pRepo) { int tsdbWLockRepoMeta(STsdbRepo *pRepo) {
...@@ -652,7 +652,7 @@ int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) { ...@@ -652,7 +652,7 @@ int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) {
} }
STSchema* tsdbGetTableLatestSchema(STable *pTable) { STSchema* tsdbGetTableLatestSchema(STable *pTable) {
return tsdbGetTableSchemaByVersion(pTable, -1); return tsdbGetTableSchemaByVersion(pTable, -1, -1);
} }
int tsdbUpdateLastColSchema(STable *pTable, STSchema *pNewSchema) { int tsdbUpdateLastColSchema(STable *pTable, STSchema *pNewSchema) {
...@@ -957,7 +957,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo ...@@ -957,7 +957,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
} }
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
} }
...@@ -965,7 +965,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo ...@@ -965,7 +965,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1;
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql,
tsdbGetTableSchemaImpl(pTable, false, false, -1), 1); tsdbGetTableSchemaImpl(pTable, false, false, -1, -1), 1);
} }
tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
...@@ -984,7 +984,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro ...@@ -984,7 +984,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
SListNode *pNode = NULL; SListNode *pNode = NULL;
STable * tTable = NULL; STable * tTable = NULL;
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
int maxCols = schemaNCols(pSchema); int maxCols = schemaNCols(pSchema);
int maxRowBytes = schemaTLen(pSchema); int maxRowBytes = schemaTLen(pSchema);
...@@ -1018,7 +1018,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro ...@@ -1018,7 +1018,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
for (int i = 0; i < pMeta->maxTables; i++) { for (int i = 0; i < pMeta->maxTables; i++) {
STable *_pTable = pMeta->tables[i]; STable *_pTable = pMeta->tables[i];
if (_pTable != NULL) { if (_pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1); pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1, -1);
maxCols = MAX(maxCols, schemaNCols(pSchema)); maxCols = MAX(maxCols, schemaNCols(pSchema));
maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema)); maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema));
} }
......
...@@ -1582,7 +1582,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, ...@@ -1582,7 +1582,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int32_t numOfColsOfRow1 = 0; int32_t numOfColsOfRow1 = 0;
if (pSchema1 == NULL) { if (pSchema1 == NULL) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1));
} }
if(isRow1DataRow) { if(isRow1DataRow) {
numOfColsOfRow1 = schemaNCols(pSchema1); numOfColsOfRow1 = schemaNCols(pSchema1);
...@@ -1594,7 +1594,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, ...@@ -1594,7 +1594,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
if(row2) { if(row2) {
isRow2DataRow = isDataRow(row2); isRow2DataRow = isDataRow(row2);
if (pSchema2 == NULL) { if (pSchema2 == NULL) {
pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2));
} }
if(isRow2DataRow) { if(isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2); numOfColsOfRow2 = schemaNCols(pSchema2);
...@@ -1961,11 +1961,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* ...@@ -1961,11 +1961,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (rv1 != memRowVersion(row1)) { if (rv1 != memRowVersion(row1)) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1));
rv1 = memRowVersion(row1); rv1 = memRowVersion(row1);
} }
if(row2 && rv2 != memRowVersion(row2)) { if(row2 && rv2 != memRowVersion(row2)) {
pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2));
rv2 = memRowVersion(row2); rv2 = memRowVersion(row2);
} }
...@@ -1986,11 +1986,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* ...@@ -1986,11 +1986,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos); doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos);
} }
if (rv1 != memRowVersion(row1)) { if (rv1 != memRowVersion(row1)) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1));
rv1 = memRowVersion(row1); rv1 = memRowVersion(row1);
} }
if(row2 && rv2 != memRowVersion(row2)) { if(row2 && rv2 != memRowVersion(row2)) {
pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2));
rv2 = memRowVersion(row2); rv2 = memRowVersion(row2);
} }
...@@ -2654,7 +2654,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int ...@@ -2654,7 +2654,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
win->ekey = key; win->ekey = key;
if (rv != memRowVersion(row)) { if (rv != memRowVersion(row)) {
pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row), (int8_t)memRowType(row));
rv = memRowVersion(row); rv = memRowVersion(row);
} }
mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true); mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true);
......
...@@ -153,7 +153,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { ...@@ -153,7 +153,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) {
} }
int tsdbSetReadTable(SReadH *pReadh, STable *pTable) { int tsdbSetReadTable(SReadH *pReadh, STable *pTable) {
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1);
pReadh->pTable = pTable; pReadh->pTable = pTable;
......
...@@ -336,6 +336,9 @@ void taosReadGlobalLogCfg() { ...@@ -336,6 +336,9 @@ void taosReadGlobalLogCfg() {
#elif (_TD_TQ_ == true) #elif (_TD_TQ_ == true)
printf("configDir:%s not there, use default value: /etc/tq", configDir); printf("configDir:%s not there, use default value: /etc/tq", configDir);
strcpy(configDir, "/etc/tq"); strcpy(configDir, "/etc/tq");
#elif (_TD_PRO_ == true)
printf("configDir:%s not there, use default value: /etc/ProDB", configDir);
strcpy(configDir, "/etc/ProDB");
#else #else
printf("configDir:%s not there, use default value: /etc/taos", configDir); printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos"); strcpy(configDir, "/etc/taos");
......
...@@ -85,6 +85,8 @@ int64_t dbgWSize = 0; ...@@ -85,6 +85,8 @@ int64_t dbgWSize = 0;
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
#elif (_TD_TQ_ == true) #elif (_TD_TQ_ == true)
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq";
#elif (_TD_PRO_ == true)
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB";
#else #else
char tsLogDir[PATH_MAX] = "/var/log/taos"; char tsLogDir[PATH_MAX] = "/var/log/taos";
#endif #endif
......
#!/bin/bash
for N in -1 0 1 10000 10001
do
for l in 1023 1024 1073741824 1073741825
do
for S in udp tcp
do
taos -n speed -h BCC-2 -P 6030 -N $N -l $l -S $S 2>&1 | tee -a result.txt
done
done
done
...@@ -18177,4 +18177,40 @@ ...@@ -18177,4 +18177,40 @@
fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8 obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__openssl
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_PyObject_GC_New
fun:ffi_internal_new
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__constant_time
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
} }
\ No newline at end of file
...@@ -178,7 +178,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns ...@@ -178,7 +178,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py python3 test.py -f tools/taosdumpTestNanoSupport.py
# update # update
python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update.py
......
...@@ -1128,9 +1128,8 @@ class TDTestCase: ...@@ -1128,9 +1128,8 @@ class TDTestCase:
self.td3690() self.td3690()
self.td4082() self.td4082()
self.td4288() self.td4288()
# self.td4724() self.td4724()
# self.td5798() self.td5935()
# self.td5935()
self.td6068() self.td6068()
# develop branch # develop branch
...@@ -1138,6 +1137,7 @@ class TDTestCase: ...@@ -1138,6 +1137,7 @@ class TDTestCase:
# self.td4889() # self.td4889()
# self.td5168() # self.td5168()
# self.td5433() # self.td5433()
# self.td5798()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
......
...@@ -49,10 +49,11 @@ class TDTestCase: ...@@ -49,10 +49,11 @@ class TDTestCase:
# select as cname with cname_list # select as cname with cname_list
sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check' sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check'
sql_seq_no_as = sql_seq.replace('as ', '') sql_seq_no_as = sql_seq.replace(' as ', ' ')
print(sql_seq)
print(sql_seq_no_as)
res = tdSql.getColNameList(sql_seq) res = tdSql.getColNameList(sql_seq)
res_no_as = tdSql.getColNameList(sql_seq_no_as) res_no_as = tdSql.getColNameList(sql_seq_no_as)
# cname[1] > 64, it is expected to be equal to 64 # cname[1] > 64, it is expected to be equal to 64
cname_list_1_expected = cname_list[1][:-1] cname_list_1_expected = cname_list[1][:-1]
cname_list[1] = cname_list_1_expected cname_list[1] = cname_list_1_expected
...@@ -79,7 +80,7 @@ class TDTestCase: ...@@ -79,7 +80,7 @@ class TDTestCase:
# select as cname with cname_list # select as cname with cname_list
sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check'
sql_seq_no_as = sql_seq.replace('as ', '') sql_seq_no_as = sql_seq.replace(' as ', ' ')
res = tdSql.getColNameList(sql_seq) res = tdSql.getColNameList(sql_seq)
res_no_as = tdSql.getColNameList(sql_seq_no_as) res_no_as = tdSql.getColNameList(sql_seq_no_as)
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00"
self.numberOfTables = 10
self.numberOfRecords = 100
def checkCommunity(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
return False
else:
return True
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def createdb(self, precision="ns"):
tb_nums = self.numberOfTables
per_tb_rows = self.numberOfRecords
def build_db(precision, start_time):
tdSql.execute("drop database if exists timedb1")
tdSql.execute(
"create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
tdSql.execute("use timedb1")
tdSql.execute(
"create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))")
for tb in range(tb_nums):
tbname = "t"+str(tb)
tdSql.execute("create table " + tbname +
" using st tags(1, 'beijing')")
sql = "insert into " + tbname + " values"
currts = start_time
if precision == "ns":
ts_seed = 1000000000
elif precision == "us":
ts_seed = 1000000
else:
ts_seed = 1000
for i in range(per_tb_rows):
sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i %
100, i % 100, currts + i*100) # currts +1000ms (1000000000ns)
tdSql.execute(sql)
if precision == "ns":
start_time = 1625068800000000000
build_db(precision, start_time)
elif precision == "us":
start_time = 1625068800000000
build_db(precision, start_time)
elif precision == "ms":
start_time = 1625068800000
build_db(precision, start_time)
else:
print("other time precision not valid , please check! ")
def run(self):
# clear envs
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exist!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
# create nano second database
self.createdb(precision="ns")
# dump all data
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
# dump part data with -S -E
os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
# replace strings to dump in databases
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
# dump data and check for taosdump
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
# check data
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test nano second : dump check data pass for all data!" )
else:
tdLog.info("test nano second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " )
else:
tdLog.info(" test nano second : dump check data failed for data !" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " )
else:
tdLog.info(" test nano second : dump check data failed for data !" )
# us second support test case
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exits!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
self.createdb(precision="us")
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test us second : dump check data pass for all data!" )
else:
tdLog.info("test us second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " )
else:
tdLog.info(" test us second : dump check data failed for data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " )
else:
tdLog.info(" test us second : dump check data failed for data! " )
# ms second support test case
os.system("rm -rf ./taosdumptest/")
tdSql.execute("drop database if exists dumptmp1")
tdSql.execute("drop database if exists dumptmp2")
tdSql.execute("drop database if exists dumptmp3")
if not os.path.exists("./taosdumptest/tmp1"):
os.makedirs("./taosdumptest/dumptmp1")
else:
print("path exits!")
if not os.path.exists("./taosdumptest/dumptmp2"):
os.makedirs("./taosdumptest/dumptmp2")
if not os.path.exists("./taosdumptest/dumptmp3"):
os.makedirs("./taosdumptest/dumptmp3")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
self.createdb(precision="ms")
os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
'%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system(
"sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
os.system(
"sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000)
tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510)
tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900)
origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res:
tdLog.info("test ms second : dump check data pass for all data!" )
else:
tdLog.info("test ms second : dump check data failed for all data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000")
dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " )
else:
tdLog.info(" test ms second : dump check data failed for data!" )
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " )
else:
tdLog.info(" test ms second : dump check data failed for data! " )
os.system("rm -rf ./taosdumptest/")
os.system("rm -rf ./dump_result.txt")
os.system("rm -rf *.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
...@@ -26,6 +26,9 @@ class TDTestCase: ...@@ -26,6 +26,9 @@ class TDTestCase:
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
now = time.time() now = time.time()
print(int(round(now * 1000)))
self.ts = int(round(now * 1000)) self.ts = int(round(now * 1000))
def getBuildPath(self): def getBuildPath(self):
...@@ -54,6 +57,7 @@ class TDTestCase: ...@@ -54,6 +57,7 @@ class TDTestCase:
# insert: create one or mutiple tables per sql and insert multiple rows per sql # insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-4985 # test case for https://jira.taosdata.com:18080/browse/TD-4985
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath)
tdSql.execute("use db") tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0") tdSql.query("select count (tbname) from stb0")
...@@ -82,6 +86,7 @@ class TDTestCase: ...@@ -82,6 +86,7 @@ class TDTestCase:
% (self.ts + i, i, -10000+i, i)) % (self.ts + i, i, -10000+i, i))
tdSql.query("select * from stb0 where c2 like 'test99%' ") tdSql.query("select * from stb0 where c2 like 'test99%' ")
tdSql.checkRows(1000) tdSql.checkRows(1000)
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" )
tdSql.checkData(0, 1, 0) tdSql.checkData(0, 1, 0)
tdSql.checkData(1, 1, 1) tdSql.checkData(1, 1, 1)
......
...@@ -44,14 +44,12 @@ class TDTestCase: ...@@ -44,14 +44,12 @@ class TDTestCase:
projPath = selfPath[:selfPath.find("tests")] projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath): for root, dirs, files in os.walk(projPath):
if ("taosdump" in files): if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root)) rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath): if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")] buildPath = root[:len(root) - len("/build/bin")]
break break
return buildPath return buildPath
def createdb(self, precision="ns"): def createdb(self, precision="ns"):
tb_nums = self.numberOfTables tb_nums = self.numberOfTables
...@@ -60,13 +58,16 @@ class TDTestCase: ...@@ -60,13 +58,16 @@ class TDTestCase:
def build_db(precision, start_time): def build_db(precision, start_time):
tdSql.execute("drop database if exists timedb1") tdSql.execute("drop database if exists timedb1")
tdSql.execute( tdSql.execute(
"create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") "create database timedb1 days 10 keep 365 blocks 8 precision " +
"\"" +
precision +
"\"")
tdSql.execute("use timedb1") tdSql.execute("use timedb1")
tdSql.execute( tdSql.execute(
"create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))")
for tb in range(tb_nums): for tb in range(tb_nums):
tbname = "t"+str(tb) tbname = "t" + str(tb)
tdSql.execute("create table " + tbname + tdSql.execute("create table " + tbname +
" using st tags(1, 'beijing')") " using st tags(1, 'beijing')")
sql = "insert into " + tbname + " values" sql = "insert into " + tbname + " values"
...@@ -79,8 +80,8 @@ class TDTestCase: ...@@ -79,8 +80,8 @@ class TDTestCase:
ts_seed = 1000 ts_seed = 1000
for i in range(per_tb_rows): for i in range(per_tb_rows):
sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % sql += "(%d, %d, 'nchar%d',%d)" % (currts + i * ts_seed, i %
100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) 100, i % 100, currts + i * 100) # currts +1000ms (1000000000ns)
tdSql.execute(sql) tdSql.execute(sql)
if precision == "ns": if precision == "ns":
...@@ -97,7 +98,6 @@ class TDTestCase: ...@@ -97,7 +98,6 @@ class TDTestCase:
else: else:
print("other time precision not valid , please check! ") print("other time precision not valid , please check! ")
def run(self): def run(self):
...@@ -132,11 +132,12 @@ class TDTestCase: ...@@ -132,11 +132,12 @@ class TDTestCase:
# dump all data # dump all data
os.system( os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
# dump part data with -S -E # dump part data with -S -E
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
binPath) binPath)
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
...@@ -150,42 +151,44 @@ class TDTestCase: ...@@ -150,42 +151,44 @@ class TDTestCase:
os.system( os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath)
# dump data and check for taosdump # dump data and check for taosdump
tdSql.query("select count(*) from dumptmp1.st") tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000) tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from dumptmp2.st") tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510) tdSql.checkData(0, 0, 510)
tdSql.query("select count(*) from dumptmp3.st") tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900) tdSql.checkData(0, 0, 900)
# check data # check data
origin_res = tdSql.getResult("select * from timedb1.st") origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st") dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info("test nano second : dump check data pass for all data!" ) tdLog.info("test nano second : dump check data pass for all data!")
else: else:
tdLog.info("test nano second : dump check data failed for all data!" ) tdLog.info(
"test nano second : dump check data failed for all data!")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
dump_res = tdSql.getResult("select * from dumptmp2.st") dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " ) tdLog.info(" test nano second : dump check data pass for data! ")
else: else:
tdLog.info(" test nano second : dump check data failed for data !" ) tdLog.info(" test nano second : dump check data failed for data !")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st") dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test nano second : dump check data pass for data! " ) tdLog.info(" test nano second : dump check data pass for data! ")
else: else:
tdLog.info(" test nano second : dump check data failed for data !" ) tdLog.info(" test nano second : dump check data failed for data !")
# us second support test case # us second support test case
...@@ -215,10 +218,11 @@ class TDTestCase: ...@@ -215,10 +218,11 @@ class TDTestCase:
self.createdb(precision="us") self.createdb(precision="us")
os.system( os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
binPath) binPath)
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
...@@ -231,43 +235,42 @@ class TDTestCase: ...@@ -231,43 +235,42 @@ class TDTestCase:
os.system( os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath)
tdSql.query("select count(*) from dumptmp1.st") tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000) tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from dumptmp2.st") tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510) tdSql.checkData(0, 0, 510)
tdSql.query("select count(*) from dumptmp3.st") tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900) tdSql.checkData(0, 0, 900)
origin_res = tdSql.getResult("select * from timedb1.st") origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st") dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info("test us second : dump check data pass for all data!" ) tdLog.info("test us second : dump check data pass for all data!")
else: else:
tdLog.info("test us second : dump check data failed for all data!" ) tdLog.info("test us second : dump check data failed for all data!")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000")
dump_res = tdSql.getResult("select * from dumptmp2.st") dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " ) tdLog.info(" test us second : dump check data pass for data! ")
else: else:
tdLog.info(" test us second : dump check data failed for data!" ) tdLog.info(" test us second : dump check data failed for data!")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st") dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test us second : dump check data pass for data! " ) tdLog.info(" test us second : dump check data pass for data! ")
else: else:
tdLog.info(" test us second : dump check data failed for data! " ) tdLog.info(" test us second : dump check data failed for data! ")
# ms second support test case # ms second support test case
os.system("rm -rf ./taosdumptest/") os.system("rm -rf ./taosdumptest/")
...@@ -296,10 +299,11 @@ class TDTestCase: ...@@ -296,10 +299,11 @@ class TDTestCase:
self.createdb(precision="ms") self.createdb(precision="ms")
os.system( os.system(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
binPath) binPath)
os.system( os.system(
'%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
...@@ -312,43 +316,42 @@ class TDTestCase: ...@@ -312,43 +316,42 @@ class TDTestCase:
os.system( os.system(
"sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath)
os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath)
tdSql.query("select count(*) from dumptmp1.st") tdSql.query("select count(*) from dumptmp1.st")
tdSql.checkData(0,0,1000) tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from dumptmp2.st") tdSql.query("select count(*) from dumptmp2.st")
tdSql.checkData(0,0,510) tdSql.checkData(0, 0, 510)
tdSql.query("select count(*) from dumptmp3.st") tdSql.query("select count(*) from dumptmp3.st")
tdSql.checkData(0,0,900) tdSql.checkData(0, 0, 900)
origin_res = tdSql.getResult("select * from timedb1.st") origin_res = tdSql.getResult("select * from timedb1.st")
dump_res = tdSql.getResult("select * from dumptmp1.st") dump_res = tdSql.getResult("select * from dumptmp1.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info("test ms second : dump check data pass for all data!" ) tdLog.info("test ms second : dump check data pass for all data!")
else: else:
tdLog.info("test ms second : dump check data failed for all data!" ) tdLog.info("test ms second : dump check data failed for all data!")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000")
dump_res = tdSql.getResult("select * from dumptmp2.st") dump_res = tdSql.getResult("select * from dumptmp2.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " ) tdLog.info(" test ms second : dump check data pass for data! ")
else: else:
tdLog.info(" test ms second : dump check data failed for data!" ) tdLog.info(" test ms second : dump check data failed for data!")
origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") origin_res = tdSql.getResult(
"select * from timedb1.st where ts >=1625068810000 ")
dump_res = tdSql.getResult("select * from dumptmp3.st") dump_res = tdSql.getResult("select * from dumptmp3.st")
if origin_res == dump_res: if origin_res == dump_res:
tdLog.info(" test ms second : dump check data pass for data! " ) tdLog.info(" test ms second : dump check data pass for data! ")
else: else:
tdLog.info(" test ms second : dump check data failed for data! " ) tdLog.info(" test ms second : dump check data failed for data! ")
os.system("rm -rf ./taosdumptest/") os.system("rm -rf ./taosdumptest/")
os.system("rm -rf ./dump_result.txt") os.system("rm -rf ./dump_result.txt")
os.system("rm -rf *.py.sql") os.system("rm -rf *.py.sql")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册