diff --git a/cmake/define.inc b/cmake/define.inc index 6c466fee026097b0bdeb89c7a4fc54fc382c2726..87c88b35a9e9f68ce9d30e340f5b13570ce00231 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -45,6 +45,10 @@ IF (TD_TQ) ADD_DEFINITIONS(-D_TD_TQ_) ENDIF () +IF (TD_PRO) + ADD_DEFINITIONS(-D_TD_PRO_) +ENDIF () + IF (TD_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index 9d716e1e7345955f7b6b844c85ace7e7bd5c6080..d746cf52f6eb016795d6fa6d01f408925159c710 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -49,6 +49,9 @@ IF (${DBNAME} MATCHES "power") ELSEIF (${DBNAME} MATCHES "tq") SET(TD_TQ TRUE) MESSAGE(STATUS "tq is true") +ELSEIF (${DBNAME} MATCHES "pro") + SET(TD_PRO TRUE) + MESSAGE(STATUS "pro is true") ENDIF () IF (${DLLTYPE} MATCHES "go") diff --git a/cmake/version.inc b/cmake/version.inc index 77e0479169809b9302e544ec3343024136ed2f36..baba08d748ea59df3ed3a4eb27343ba159c074eb 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.2.0.1") + SET(TD_VER_NUMBER "2.2.0.2") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/packaging/release.sh b/packaging/release.sh index 5ba6c01a0bd5689278bdb5c86b538b3c447f086a..44887c6cf749ecfecdef46799311de38dbbbed23 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] pagMode=full # [full | lite] soMode=dynamic # [static | dynamic] -dbName=taos # [taos | power | tq] +dbName=taos # [taos | power | tq | pro] allocator=glibc # [glibc | jemalloc] verNumber="" verNumberComp="1.0.0.0" @@ -78,7 +78,7 @@ do echo " -l [full | lite] " echo " -a [glibc | jemalloc] " echo " -s [static | dynamic] " - echo " -d [taos | power | tq ] " + echo " -d [taos | power | tq | pro] " echo " -n [version number] " echo " -m [compatible version number] " exit 0 @@ -253,6 +253,10 @@ if [ "$osType" != "Darwin" ]; then ${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + elif [[ "$dbName" == "pro" ]]; then + ${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} + ${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} + ${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} else ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} @@ -262,4 +266,3 @@ else cd ${script_dir}/tools ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName} fi - diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..11165dbdd8bdf6afb4659250499cf1d9184c2395 --- /dev/null +++ b/packaging/tools/install_arbi_pro.sh @@ -0,0 +1,293 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) + +bin_link_dir="/usr/bin" +#inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/tarbitrator" + +# old bin dir +bin_dir="/usr/local/tarbitrator/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact hanatech.com.cn for support." + os_type=1 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/bin + #${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_prodb.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function clean_service_on_sysvinit() { + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install prodbs service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function install_service_on_systemd() { + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo} systemctl enable tarbitratord +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop taosd + kill_tarbitrator + fi +} + +function update_prodb() { + # Start to update + echo -e "${GREEN}Start to update ProDB's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop tarbitratord || : + elif ((${service_mod}==1)); then + ${csudo} service tarbitratord stop || : + else + kill_tarbitrator + fi + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + + echo + #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mProDB's arbitrator is updated successfully!${NC}" +} + +function install_prodb() { + # Start to install + echo -e "${GREEN}Start to install ProDB's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + echo + #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi + + echo -e "\033[44;32;1mProDB's arbitrator is installed successfully!${NC}" + echo +} + + +## ==============================Main program starts from here============================ +# Install server and client +if [ -x ${bin_dir}/tarbitrator ]; then + update_flag=1 + update_prodb +else + install_prodb +fi + diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..fff8ae31200669ee3ab918a873e33fc32ece37c8 --- /dev/null +++ b/packaging/tools/install_client_pro.sh @@ -0,0 +1,248 @@ +#!/bin/bash +# +# This file is used to install ProDB client on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/ProDB" + log_dir="/var/log/ProDB" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/ProDB" + log_dir="~/ProDB/log" +fi + +log_link_dir="/usr/local/ProDB/log" + +cfg_install_dir="/etc/ProDB" + +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi + +#install main path +install_main_dir="/usr/local/ProDB" + +# old bin dir +bin_dir="/usr/local/ProDB/bin" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "prodbc" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/prodbc || : + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/prodemo || : + ${csudo} rm -f ${bin_link_dir}/prodump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmprodb || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/prodbc ] && ${csudo} ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || : + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : + [ -x ${install_main_dir}/bin/prodump ] && ${csudo} ln -s ${install_main_dir}/bin/prodump ${bin_link_dir}/prodump || : + fi + [ -x ${install_main_dir}/bin/remove_client_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_prodb.sh ${bin_link_dir}/rmprodb || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi + + ${csudo} ldconfig +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : + + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update_prodb() { + # Start to update + if [ ! -e prodb.tar.gz ]; then + echo "File prodb.tar.gz does not exist" + exit 1 + fi + tar -zxf prodb.tar.gz + + echo -e "${GREEN}Start to update ProDB client...${NC}" + # Stop the client shell if running + if pidof prodbc &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mProDB client is updated successfully!${NC}" + + rm -rf $(tar -tf prodb.tar.gz) +} + +function install_prodb() { + # Start to install + if [ ! -e prodb.tar.gz ]; then + echo "File prodb.tar.gz does not exist" + exit 1 + fi + tar -zxf prodb.tar.gz + + echo -e "${GREEN}Start to install ProDB client...${NC}" + + install_main_path + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mProDB client is installed successfully!${NC}" + + rm -rf $(tar -tf prodb.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/prodbs ]; then + echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/prodbc ]; then + update_flag=1 + update_prodb + else + install_prodb + fi diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..564561441646d4bd27f22c5abd9250a9c3377002 --- /dev/null +++ b/packaging/tools/install_pro.sh @@ -0,0 +1,948 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) +# Dynamic directory +data_dir="/var/lib/ProDB" +log_dir="/var/log/ProDB" + +data_link_dir="/usr/local/ProDB/data" +log_link_dir="/usr/local/ProDB/log" + +cfg_install_dir="/etc/ProDB" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/ProDB" + +# old bin dir +bin_dir="/usr/local/ProDB/bin" + +service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact hanatech.com.cn for support." + os_type=1 +fi + + +# ============================= get input parameters ================================================= + +# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] + +# set parameters by default value +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] + +while getopts "hv:e:i:" arg +do + case $arg in + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$( echo $OPTARG ) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/prodbc || : + ${csudo} rm -f ${bin_link_dir}/prodbs || : + ${csudo} rm -f ${bin_link_dir}/prodemo || : + ${csudo} rm -f ${bin_link_dir}/rmprodb || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/prodbc ] && ${csudo} ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || : + [ -x ${install_main_dir}/bin/prodbs ] && ${csudo} ln -s ${install_main_dir}/bin/prodbs ${bin_link_dir}/prodbs || : + [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : + [ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi +} + +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" + + # first full-qualified domain name (FQDN) for ProDB cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.hanatech.com.cn:6030) of an existing ProDB cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + ${csudo} mkdir -p ${data_dir} + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof prodbs &> /dev/null; then + ${csudo} service prodbs stop || : + fi + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} chkconfig --del prodbs || : + fi + + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} insserv -r prodbs || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} update-rc.d -f prodbs remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/prodbs || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install prodbs service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/prodbs.deb ${install_main_dir}/init.d/prodbs + ${csudo} cp ${script_dir}/init.d/prodbs.deb ${service_config_dir}/prodbs && ${csudo} chmod a+x ${service_config_dir}/prodbs + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/prodbs.rpm ${install_main_dir}/init.d/prodbs + ${csudo} cp ${script_dir}/init.d/prodbs.rpm ${service_config_dir}/prodbs && ${csudo} chmod a+x ${service_config_dir}/prodbs + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add prodbs || : + ${csudo} chkconfig --level 2345 prodbs on || : + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv prodbs || : + ${csudo} insserv -d prodbs || : + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d prodbs defaults || : + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + prodbs_service_config="${service_config_dir}/prodbs.service" + if systemctl is-active --quiet prodbs; then + echo "ProDB is running, stopping it..." + ${csudo} systemctl stop prodbs &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable prodbs &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${prodbs_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for ProDB is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${nginx_service_config} + fi +} + +function install_service_on_systemd() { + clean_service_on_systemd + + prodbs_service_config="${service_config_dir}/prodbs.service" + ${csudo} bash -c "echo '[Unit]' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'Description=ProDB server service' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${prodbs_service_config}" + ${csudo} bash -c "echo >> ${prodbs_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/prodbs' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'ExecStartPre=/usr/local/ProDB/bin/startPre.sh' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${prodbs_service_config}" + ${csudo} bash -c "echo >> ${prodbs_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${prodbs_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${prodbs_service_config}" + ${csudo} systemctl enable prodbs + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop prodbs + kill_process prodbs + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` + else + min_compatible_version=$(${script_dir}/bin/prodbs -V | head -1 | cut -d ' ' -f 5) + fi + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update_prodb() { + # Start to update + if [ ! -e prodb.tar.gz ]; then + echo "File prodb.tar.gz does not exist" + exit 1 + fi + tar -zxf prodb.tar.gz + install_jemalloc + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update ProDB...${NC}" + # Stop the service if running + if pidof prodbs &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop prodbs || : + elif ((${service_mod}==1)); then + ${csudo} service prodbs stop || : + else + kill_process prodbs + fi + sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for ProDB is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m" + fi + fi + fi + + #echo + #echo -e "\033[44;32;1mProDB is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} service prodbs start${NC}" + else + echo -e "${GREEN_DARK}To start ProDB ${NC}: ./prodbs${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell${NC}" + fi + + echo + echo -e "\033[44;32;1mProDB is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1mProDB client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf prodb.tar.gz) +} + +function install_prodb() { + # Start to install + if [ ! -e prodb.tar.gz ]; then + echo "File prodb.tar.gz does not exist" + exit 1 + fi + tar -zxf prodb.tar.gz + + echo -e "${GREEN}Start to install ProDB...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for ProDB is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mProDB is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} service prodbs start${NC}" + else + echo -e "${GREEN_DARK}To start ProDB ${NC}: prodbs${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $serverFqdn${GREEN_DARK} to login into ProDB server${NC}" + echo + fi + echo -e "\033[44;32;1mProDB is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mProDB client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf prodb.tar.gz) +} + + +## ==============================Main program starts from here============================ +serverFqdn=$(hostname) +if [ "$verType" == "server" ]; then + # Install server and client + if [ -x ${bin_dir}/prodbs ]; then + update_flag=1 + update_prodb + else + install_prodb + fi +elif [ "$verType" == "client" ]; then + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/prodbc ]; then + update_flag=1 + update_prodb client + else + install_prodb client + fi +else + echo "please input correct verType" +fi diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..6ce3765e44acc408ced9730c54b793338eb37b38 --- /dev/null +++ b/packaging/tools/makearbi_pro.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Generate arbitrator's tar.gz setup package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/ProDB-enterprise-arbitrator-${version}" +else + install_dir="${release_dir}/ProDB-arbitrator-${version}" +fi + +# Directories and files. +bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh" +install_files="${script_dir}/install_arbi_pro.sh" + +#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord +init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord + +# make directories. +mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_pro.sh || : +#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..599c91fbf082955887c677b750aa12f946c0890b --- /dev/null +++ b/packaging/tools/makeclient_pro.sh @@ -0,0 +1,225 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client in all os system +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/ProDB-enterprise-client-${version}" +else + install_dir="${release_dir}/ProDB-client-${version}" +fi + +# Directories and files. + +if [ "$osType" != "Darwin" ]; then + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_pro.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + +install_files="${script_dir}/install_client_pro.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg + +sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg +sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg +sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg + +mkdir -p ${install_dir}/bin +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc + cp ${script_dir}/remove_pro.sh ${install_dir}/bin + else + cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc + cp ${script_dir}/remove_pro.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + fi +else + cp ${bin_files} ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + +cd ${install_dir} + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f prodb.tar.gz * --remove-files || : +else + tar -zcv -f prodb.tar.gz * || : + mv prodb.tar.gz .. + rm -rf ./* + mv ../prodb.tar.gz . +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh + mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh + mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh +fi +chmod a+x ${install_dir}/install_client_pro.sh + +# Copy example code +mkdir -p ${install_dir}/examples +examples_dir="${top_dir}/tests/examples" +cp -r ${examples_dir}/c ${install_dir}/examples +sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c +sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c + +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp -r ${examples_dir}/JDBC ${install_dir}/examples + cp -r ${examples_dir}/matlab ${install_dir}/examples + mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m + cp -r ${examples_dir}/python ${install_dir}/examples + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py + cp -r ${examples_dir}/R ${install_dir}/examples + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt + cp -r ${examples_dir}/go ${install_dir}/examples + mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go + sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go +fi +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +# Copy connector +connector_dir="${code_dir}/connector" +mkdir -p ${install_dir}/connector + +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + if [ "$osType" != "Darwin" ]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: + fi + if [ -d "${connector_dir}/grafanaplugin/dist" ]; then + cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin + else + echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" + fi + if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then + cp -r ${connector_dir}/go ${install_dir}/connector + else + echo "WARNING: go connector not found, please check if want to use it!" + fi + cp -r ${connector_dir}/python ${install_dir}/connector + mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py + sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py +fi + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..ffe4566b42017a7bffa6166ae28e18ca29bd03cd --- /dev/null +++ b/packaging/tools/makepkg_pro.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# +# Generate tar.gz package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 +versionComp=$9 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/ProDB-enterprise-server-${version}" +else + install_dir="${release_dir}/ProDB-server-${version}" +fi + +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi +install_files="${script_dir}/install_pro.sh" +nginx_dir="${code_dir}/../../enterprise/src/plugins/web" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg + +#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/bin +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc + cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs + cp ${script_dir}/remove_pro.sh ${install_dir}/bin +else + cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc + cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs + cp ${script_dir}/remove_pro.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump + cp ${build_dir}/bin/tarbitrator ${install_dir}/bin + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/startPre.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_pro.sh >> remove_prodb_temp.sh + mv remove_prodb_temp.sh ${install_dir}/bin/remove_pro.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/*.html + sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/js/*.js + + sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg + sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg + sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + +cd ${install_dir} +tar -zcv -f prodb.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar prodb.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_pro.sh >> install_prodb_temp.sh + mv install_prodb_temp.sh ${install_dir}/install_pro.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_prodb_temp.sh + mv install_prodb_temp.sh ${install_dir}/install_pro.sh +fi +chmod a+x ${install_dir}/install_pro.sh + +# Copy example code +mkdir -p ${install_dir}/examples +examples_dir="${top_dir}/tests/examples" +cp -r ${examples_dir}/c ${install_dir}/examples +sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c +sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c + +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp -r ${examples_dir}/JDBC ${install_dir}/examples + cp -r ${examples_dir}/matlab ${install_dir}/examples + mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m + cp -r ${examples_dir}/python ${install_dir}/examples + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py + cp -r ${examples_dir}/R ${install_dir}/examples + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt + cp -r ${examples_dir}/go ${install_dir}/examples + mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go + sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go +fi +# Copy driver +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt + +# Copy connector +connector_dir="${code_dir}/connector" +mkdir -p ${install_dir}/connector +if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: + + if [ -d "${connector_dir}/grafanaplugin/dist" ]; then + cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin + else + echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" + fi + if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then + cp -r ${connector_dir}/go ${install_dir}/connector + else + echo "WARNING: go connector not found, please check if want to use it!" + fi + cp -r ${connector_dir}/python ${install_dir}/connector/ + mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py + + sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py + + sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py +fi + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/remove_arbi_pro.sh b/packaging/tools/remove_arbi_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..ff10478881628bdaf027c618a1b89f204ebbdb35 --- /dev/null +++ b/packaging/tools/remove_arbi_pro.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Script to stop the service and uninstall ProDB's arbitrator + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/tarbitrator" +bin_link_dir="/usr/bin" + +service_config_dir="/etc/systemd/system" +tarbitrator_service_name="tarbitratord" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf /arbitrator.log || : +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "ProDB tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + echo "ProDB's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +##clean_header +# Remove log file +clean_log + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}ProDB's arbitrator is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_client_pro.sh b/packaging/tools/remove_client_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..59e4e8997620af035821df5a975fe58f1357c9dc --- /dev/null +++ b/packaging/tools/remove_client_pro.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/ProDB" + +log_link_dir="/usr/local/ProDB/log" +cfg_link_dir="/usr/local/ProDB/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + if [ -n "$(pidof prodbc)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/prodbc || : + ${csudo} rm -f ${bin_link_dir}/prodemo || : + ${csudo} rm -f ${bin_link_dir}/prodump || : + ${csudo} rm -f ${bin_link_dir}/rmprodb || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}ProDB client is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..f6dad22bc21b02a9d717d530c50bc19c5a718478 --- /dev/null +++ b/packaging/tools/remove_pro.sh @@ -0,0 +1,210 @@ +#!/bin/bash +# +# Script to stop the service and uninstall ProDB, but retain the config, data and log files. + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/ProDB" +data_link_dir="/usr/local/ProDB/data" +log_link_dir="/usr/local/ProDB/log" +cfg_link_dir="/usr/local/ProDB/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +service_config_dir="/etc/systemd/system" +prodb_service_name="prodbs" +tarbitrator_service_name="tarbitratord" +nginx_service_name="nginxd" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_prodbs() { + pid=$(ps -ef | grep "prodbs" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/prodbc || : + ${csudo} rm -f ${bin_link_dir}/prodbs || : + ${csudo} rm -f ${bin_link_dir}/prodemo || : + ${csudo} rm -f ${bin_link_dir}/prodump || : + ${csudo} rm -f ${bin_link_dir}/rmprodb || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +function clean_service_on_systemd() { + prodb_service_config="${service_config_dir}/${prodb_service_name}.service" + if systemctl is-active --quiet ${prodb_service_name}; then + echo "ProDB prodbs is running, stopping it..." + ${csudo} systemctl stop ${prodb_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${prodb_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${prodb_service_config} + + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "ProDB tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for ProDB is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi +} + +function clean_service_on_sysvinit() { + if pidof prodbs &> /dev/null; then + echo "ProDB prodbs is running, stopping it..." + ${csudo} service prodbs stop || : + fi + + if pidof tarbitrator &> /dev/null; then + echo "ProDB tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} chkconfig --del prodbs || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} insserv -r prodbs || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/prodbs ]; then + ${csudo} update-rc.d -f prodbs remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/prodbs || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop taosd + kill_prodbs + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config +# Remove data link directory +${csudo} rm -rf ${data_link_dir} || : + +${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} +if [[ -e /etc/os-release ]]; then + osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +else + osinfo="" +fi + +echo -e "${GREEN}ProDB is removed successfully!${NC}" +echo diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 11bdfe868710541539bf7e2df2af75d8c2f0fec6..80a538b1a79dd6870b95d2a746050bc366606fa3 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.2.0.1' +version: '2.2.0.2' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.2.0.1 + - usr/lib/libtaos.so.2.2.0.2 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b9aaea8469795771854919a2584d28d5c3f4e9e3..3ab9e2f66faafc8d4bda0b6993c0070e0ec14ea6 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -580,7 +580,7 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc SKVRow kvRow = memRowKvBody(dest); memRowSetType(dest, SMEM_ROW_KV); - memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); + memRowSetKvVersion(dest, dataRowVersion(dataRow)); kvRowSetNCols(kvRow, nBoundCols); kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); diff --git a/src/client/src/taos.def b/src/client/src/taos.def index 7d3b8e80c20226c4a509c95ab5728f41852110f5..d64abb2ba858c30d252a2d92b7709ebc33618836 100644 --- a/src/client/src/taos.def +++ b/src/client/src/taos.def @@ -43,3 +43,10 @@ taos_unsubscribe taos_open_stream taos_close_stream taos_load_table_info +taos_data_type +taos_stmt_set_sub_tbname +taos_stmt_get_param +taos_stmt_bind_param_batch +taos_stmt_bind_single_param_batch +taos_is_null +taos_insert_lines diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 1041034011001e590c373e0bae174e251b3ea234..82be24cff3aa983cf9d1235e07962f0cc04275d2 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -77,6 +77,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); executeQuery(pSql, pQueryInfo); + taosReleaseRef(tscObjRef, pSql->self); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 015265b505f430d500285387403beca01684d8f8..7af5a7df617e4b304736c6cb73b99785af39be25 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2752,7 +2752,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaMap); + if (pSql->res.pRsp == NULL) { + tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name); + return 0; + } + return tscProcessTableMetaRsp(pSql); } return 0; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 8ab24bdde2a2cc7063f0c70efe56ede811fc139d..876eb4fa92bada1d22e0e43b6d6532d31a0b913c 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -663,16 +663,6 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_MS; taosInitConfigOption(cfg); - cfg.option = "rpcForceTcp"; - cfg.ptr = &tsRpcForceTcp; - cfg.valType = TAOS_CFG_VTYPE_INT32; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; - cfg.minValue = 0; - cfg.maxValue = 1; - cfg.ptrLength = 0; - cfg.unitType = TAOS_CFG_UTYPE_NONE; - taosInitConfigOption(cfg); - cfg.option = "rpcMaxTime"; cfg.ptr = &tsRpcMaxTime; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -683,6 +673,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_SECOND; taosInitConfigOption(cfg); + cfg.option = "rpcForceTcp"; + cfg.ptr = &tsRpcForceTcp; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "statusInterval"; cfg.ptr = &tsStatusInterval; cfg.valType = TAOS_CFG_VTYPE_INT32; diff --git a/src/connector/python/taos/field.py b/src/connector/python/taos/field.py index 445cd8afdba6f2512c73be95c9b0dbd8dc00da8a..b0bec58b932f2136b868739bb28fca04de759e3f 100644 --- a/src/connector/python/taos/field.py +++ b/src/connector/python/taos/field.py @@ -165,12 +165,14 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=Field assert nbytes is not None res = [] for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: + rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() + chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte)) + buffer = create_string_buffer(rbyte + 1) + buffer[:rbyte] = chars[0][:rbyte] + if rbyte == 1 and buffer[0] == b'\xff': res.append(None) + else: + res.append(cast(buffer, c_char_p).value.decode()) return res @@ -179,11 +181,14 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldT assert nbytes is not None res = [] for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: + rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() + chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte)) + buffer = create_string_buffer(rbyte + 1) + buffer[:rbyte] = chars[0][:rbyte] + if rbyte == 4 and buffer[:4] == b'\xff'*4: res.append(None) + else: + res.append(cast(buffer, c_char_p).value.decode()) return res diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index bc4ddbe067b0997695ef22bbcc21228df9e92199..8fd6fd29ed7875e7b17ecf627755259ebe3fe163 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -83,6 +83,8 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DEFAULT_PASS "powerdb" #elif (_TD_TQ_ == true) #define TSDB_DEFAULT_PASS "tqueue" +#elif (_TD_PRO_ == true) +#define TSDB_DEFAULT_PASS "prodb" #else #define TSDB_DEFAULT_PASS "taosdata" #endif diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index ef3a2458a07ba7ab3ad516566e1a38a32526146b..55a854066953e2bf2f155b4b461811fd781982ed 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -44,6 +44,13 @@ char PROMPT_HEADER[] = "tq> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 4; +#elif (_TD_PRO_ == true) +char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "ProDB> "; + +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 7; #else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9b3be1556a47b52142f41b84ca385108088a9018..5adf9f342a41f9b3886c9c9654e0ef9dd7571c54 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -104,6 +104,7 @@ extern char configDir[]; #define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) #define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) +#define DEFAULT_NTHREADS 8 #define DEFAULT_TIMESTAMP_STEP 1 #define DEFAULT_INTERLACE_ROWS 0 #define DEFAULT_DATATYPE_NUM 1 @@ -227,7 +228,7 @@ typedef struct SArguments_S { char * sqlFile; bool use_metric; bool drop_database; - bool insert_only; + bool aggr_func; bool answer_yes; bool debug_print; bool verbose_print; @@ -375,8 +376,7 @@ typedef struct SDbs_S { char password[SHELL_MAX_PASSWORD_LEN]; char resultFile[MAX_FILE_NAME_LEN]; bool use_metric; - bool insert_only; - bool do_aggreFunc; + bool aggr_func; bool asyncMode; uint32_t threadCount; @@ -605,6 +605,9 @@ char *g_rand_current_buff = NULL; char *g_rand_phase_buff = NULL; char *g_randdouble_buff = NULL; +char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)", + "max(current)", "min(current)", "first(current)", "last(current)"}; + char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; @@ -619,6 +622,8 @@ SArguments g_args = { "powerdb", // password #elif (_TD_TQ_ == true) "tqueue", // password +#elif (_TD_PRO_ == true) + "prodb", // password #else "taosdata", // password #endif @@ -628,7 +633,7 @@ SArguments g_args = { NULL, // sqlFile true, // use_metric true, // drop_database - true, // insert_only + false, // aggr_func false, // debug_print false, // verbose_print false, // performance statistic print @@ -646,7 +651,7 @@ SArguments g_args = { 64, // binwidth 4, // columnCount, timestamp + float + int + float 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow - 8, // num_of_connections/thread + DEFAULT_NTHREADS,// nthreads 0, // insert_interval DEFAULT_TIMESTAMP_STEP, // timestamp_step 1, // query_times @@ -748,19 +753,24 @@ static void printHelp() { char indent[10] = " "; printf("%s\n\n", "Usage: taosdemo [OPTION...]"); printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", - "The meta file to the execution procedure. Default is './meta.json'."); + "The meta file to the execution procedure."); printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", "The user name to use when connecting to the server."); #ifdef _TD_POWER_ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. Default is 'powerdb'"); + "The password to use when connecting to the server. By default is 'powerdb'"); printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. Default is '/etc/power/'."); + "Configuration directory. By default is '/etc/power/'."); #elif (_TD_TQ_ == true) printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. Default is 'tqueue'"); + "The password to use when connecting to the server. By default is 'tqueue'"); + printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", + "Configuration directory. By default is '/etc/tq/'."); +#elif (_TD_PRO_ == true) + printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", + "The password to use when connecting to the server. By default is 'prodb'"); printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. Default is '/etc/tq/'."); + "Configuration directory. By default is '/etc/ProDB/'."); #else printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", "The password to use when connecting to the server."); @@ -772,24 +782,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", "The TCP/IP port number to use for the connection."); printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", - "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); + "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'."); printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", - "Destination database. Default is 'test'."); + "Destination database. By default is 'test'."); printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", - "Set the replica parameters of the database, Default 1, min: 1, max: 3."); + "Set the replica parameters of the database, By default use 1, min: 1, max: 3."); printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", - "Table prefix name. Default is 'd'."); + "Table prefix name. By default use 'd'."); printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", "The select sql file."); printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", - "Direct output to the named file. Default is './output.txt'."); + "Direct output to the named file. By default use './output.txt'."); printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", - "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); + "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", - "The data_type of columns, default: FLOAT, INT, FLOAT."); + "The data_type of columns, By default use: FLOAT, INT, FLOAT."); printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", - "The width of data_type 'BINARY' or 'NCHAR'. Default is ", + "The width of data_type 'BINARY' or 'NCHAR'. By default use ", g_args.binwidth); printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", "The number of columns per record. Demo mode by default is ", @@ -798,32 +808,32 @@ static void printHelp() { MAX_NUM_COLUMNS); printf("%s%s%s%s\n", indent, indent, indent, "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); - printf("%s%s%s%s\n", indent, "-T, --threads=NUMBER", "\t\t", - "The number of threads. Default is 10."); + printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", + "The number of threads. By default use ", DEFAULT_NTHREADS); printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", - "The sleep time (ms) between insertion. Default is 0."); + "The sleep time (ms) between insertion. By default is 0."); printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", - "The timestamp step between insertion. Default is ", + "The timestamp step between insertion. By default is ", DEFAULT_TIMESTAMP_STEP); printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", - "The interlace rows of insertion. Default is ", + "The interlace rows of insertion. By default is ", DEFAULT_INTERLACE_ROWS); printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", - "The number of records per request. Default is 30000."); + "The number of records per request. By default is 30000."); printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", - "The number of tables. Default is 10000."); + "The number of tables. By default is 10000."); printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", - "The number of records per table. Default is 10000."); + "The number of records per table. By default is 10000."); printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tThe default is to simulate power equipment scenario."); - printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t", - "No-insert flag."); - printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt."); + printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); + printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", + "Test aggregation functions after insertion."); + printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt."); printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", - "Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); + "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order."); printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", - "Out of order data's range, ms, default is 1000."); + "Out of order data's range. Unit is ms. By default is 1000."); printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", "Print debug info."); printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", @@ -1712,13 +1722,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } } else if ((strcmp(argv[i], "-N") == 0) || (0 == strcmp(argv[i], "--normal-table"))) { + arguments->demo_mode = false; arguments->use_metric = false; } else if ((strcmp(argv[i], "-M") == 0) || (0 == strcmp(argv[i], "--random"))) { arguments->demo_mode = false; } else if ((strcmp(argv[i], "-x") == 0) - || (0 == strcmp(argv[i], "--no-insert"))) { - arguments->insert_only = false; + || (0 == strcmp(argv[i], "--aggr-func"))) { + arguments->aggr_func = true; } else if ((strcmp(argv[i], "-y") == 0) || (0 == strcmp(argv[i], "--answer-yes"))) { arguments->answer_yes = true; @@ -2429,10 +2440,11 @@ static void init_rand_data() { static int printfInsertMeta() { SHOW_PARSE_RESULT_START(); - if (g_args.demo_mode) - printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n"); - else + if (g_args.demo_mode) { + printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n"); + } else { printf("\ntaosdemo is simulating random data as you request..\n\n"); + } if (g_args.iface != INTERFACE_BUT) { // first time if no iface specified @@ -10065,11 +10077,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, free(infos); } -static void *readTable(void *sarg) { -#if 1 +static void *queryNtableAggrFunc(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; - setThreadName("readTable"); + setThreadName("queryNtableAggrFunc"); char *command = calloc(1, BUFFER_SIZE); assert(command); @@ -10092,10 +10103,20 @@ static void *readTable(void *sarg) { int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t totalData = insertRows * ntables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; + bool aggr_func = g_Dbs.aggr_func; - int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - if (!do_aggreFunc) { + char **aggreFunc; + int n; + + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } + + if (!aggr_func) { printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); } printf("%"PRId64" records:\n", totalData); @@ -10106,9 +10127,11 @@ static void *readTable(void *sarg) { uint64_t count = 0; for (int64_t i = 0; i < ntables; i++) { sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, - g_aggreFunc[j], tb_prefix, i, startTime); + aggreFunc[j], tb_prefix, i, startTime); - double t = taosGetTimestampMs(); + double t = taosGetTimestampUs(); + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -10125,29 +10148,27 @@ static void *readTable(void *sarg) { count++; } - t = taosGetTimestampMs() - t; + t = taosGetTimestampUs() - t; totalT += t; taos_free_result(pSql); } fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", - g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData, - (double)(ntables * insertRows) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000); + aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, + (double)(ntables * insertRows) / totalT, totalT / 1000000); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000); } fprintf(fp, "\n"); fclose(fp); free(command); -#endif return NULL; } -static void *readMetric(void *sarg) { -#if 1 +static void *queryStableAggrFunc(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; - setThreadName("readMetric"); + setThreadName("queryStableAggrFunc"); char *command = calloc(1, BUFFER_SIZE); assert(command); @@ -10161,12 +10182,23 @@ static void *readMetric(void *sarg) { int64_t insertRows = pThreadInfo->stbInfo->insertRows; int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t totalData = insertRows * ntables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; + bool aggr_func = g_Dbs.aggr_func; + + char **aggreFunc; + int n; - int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - if (!do_aggreFunc) { + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } + + if (!aggr_func) { printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); } + printf("%"PRId64" records:\n", totalData); fprintf(fp, "Querying On %"PRId64" records:\n", totalData); @@ -10178,18 +10210,29 @@ static void *readMetric(void *sarg) { for (int64_t i = 1; i <= m; i++) { if (i == 1) { - sprintf(tempS, "t1 = %"PRId64"", i); + if (g_args.demo_mode) { + sprintf(tempS, "groupid = %"PRId64"", i); + } else { + sprintf(tempS, "t0 = %"PRId64"", i); + } } else { - sprintf(tempS, " or t1 = %"PRId64" ", i); + if (g_args.demo_mode) { + sprintf(tempS, " or groupid = %"PRId64" ", i); + } else { + sprintf(tempS, " or t0 = %"PRId64" ", i); + } } strncat(condition, tempS, COND_BUF_LEN - 1); - sprintf(command, "SELECT %s FROM meters WHERE %s", g_aggreFunc[j], condition); + sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition); printf("Where condition: %s\n", condition); + + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); fprintf(fp, "%s\n", command); - double t = taosGetTimestampMs(); + double t = taosGetTimestampUs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -10206,11 +10249,11 @@ static void *readMetric(void *sarg) { while(taos_fetch_row(pSql) != NULL) { count++; } - t = taosGetTimestampMs() - t; + t = taosGetTimestampUs() - t; fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - ntables * insertRows / (t * 1000.0), t); - printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0); + ntables * insertRows / (t / 1000), t); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000); taos_free_result(pSql); } @@ -10218,7 +10261,7 @@ static void *readMetric(void *sarg) { } fclose(fp); free(command); -#endif + return NULL; } @@ -11225,9 +11268,8 @@ static void setParaFromArg() { tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); g_Dbs.use_metric = g_args.use_metric; - g_Dbs.insert_only = g_args.insert_only; - g_Dbs.do_aggreFunc = true; + g_Dbs.aggr_func = g_args.aggr_func; char dataString[TSDB_MAX_BYTES_PER_ROW]; char *data_type = g_args.data_type; @@ -11238,7 +11280,7 @@ static void setParaFromArg() { if ((data_type[0] == TSDB_DATA_TYPE_BINARY) || (data_type[0] == TSDB_DATA_TYPE_BOOL) || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { - g_Dbs.do_aggreFunc = false; + g_Dbs.aggr_func = false; } if (g_args.use_metric) { @@ -11420,7 +11462,7 @@ static void testMetaFile() { } } -static void queryResult() { +static void queryAggrFunc() { // query data pthread_t read_id; @@ -11429,7 +11471,6 @@ static void queryResult() { pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 pThreadInfo->start_table_from = 0; - //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc; if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; @@ -11458,9 +11499,9 @@ static void queryResult() { tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, pThreadInfo); + pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); } else { - pthread_create(&read_id, NULL, readMetric, pThreadInfo); + pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); } pthread_join(read_id, NULL); taos_close(pThreadInfo->taos); @@ -11482,8 +11523,9 @@ static void testCmdLine() { g_args.test_mode = INSERT_TEST; insertTestProcess(); - if (false == g_Dbs.insert_only) - queryResult(); + if (g_Dbs.aggr_func) { + queryAggrFunc(); + } } int main(int argc, char *argv[]) { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 683562be32e29d0588fedfd420fbd0151129fb22..133719dad62f913d7132f03fcc1f51fe4e8e8b97 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -181,6 +181,7 @@ typedef struct { int32_t threadIndex; int32_t totalThreads; char dbName[TSDB_DB_NAME_LEN]; + int precision; void *taosCon; int64_t rowsOfDumpOut; int64_t tablesOfDumpOut; @@ -246,11 +247,6 @@ static struct argp_option options[] = { {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, -#if TSDB_SUPPORT_NANOSECOND == 1 - {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6}, -#else - {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6}, -#endif {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, @@ -281,8 +277,11 @@ typedef struct arguments { bool with_property; bool avro; int64_t start_time; + char humanStartTime[28]; int64_t end_time; + char humanEndTime[28]; char precision[8]; + int32_t data_batch; int32_t max_sql_len; int32_t table_batch; // num of table which will be dump into one output file. @@ -296,6 +295,8 @@ typedef struct arguments { bool debug_print; bool verbose_print; bool performance_print; + + int dbCount; } SArguments; /* Our argp parser. */ @@ -318,13 +319,17 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); static int32_t taosDumpTable(char *tbName, char *metric, - FILE *fp, TAOS* taosCon, char* dbName); + FILE *fp, TAOS* taosCon, char* dbName, int precision); static int taosDumpTableData(FILE *fp, char *tbName, TAOS* taosCon, char* dbName, + int precision, char *jsonAvroSchema); static int taosCheckParam(struct arguments *arguments); static void taosFreeDbInfos(); -static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName); +static void taosStartDumpOutWorkThreads( + int32_t numOfThread, + char *dbName, + int precision); struct arguments g_args = { // connection option @@ -349,8 +354,10 @@ struct arguments g_args = { false, // schemeonly true, // with_property false, // avro format - -INT64_MAX, // start_time + -INT64_MAX + 1, // start_time + {0}, // humanStartTime INT64_MAX, // end_time + {0}, // humanEndTime "ms", // precision 1, // data_batch TSDB_MAX_SQL_LEN, // max_sql_len @@ -364,7 +371,8 @@ struct arguments g_args = { false, // isDumpIn false, // debug_print false, // verbose_print - false // performance_print + false, // performance_print + 0, // dbCount }; static void errorPrintReqArg2(char *program, char *wrong_arg) @@ -472,12 +480,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; case 'S': // parse time here. - g_args.start_time = atol(arg); break; case 'E': - g_args.end_time = atol(arg); - break; - case 'C': break; case 'B': g_args.data_batch = atoi(arg); @@ -550,7 +554,7 @@ static int queryDbImpl(TAOS *taos, char *command) { return 0; } -static void parse_precision_first( +UNUSED_FUNC static void parse_precision_first( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-C") == 0) { @@ -616,6 +620,73 @@ static void parse_args( } } +static void copyHumanTimeToArg(char *timeStr, bool isStartTime) +{ + if (isStartTime) + strcpy(g_args.humanStartTime, timeStr); + else + strcpy(g_args.humanEndTime, timeStr); +} + +static void copyTimestampToArg(char *timeStr, bool isStartTime) +{ + if (isStartTime) + g_args.start_time = atol(timeStr); + else + g_args.end_time = atol(timeStr); +} + +static void parse_timestamp( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + char *tmp; + bool isStartTime = false; + bool isEndTime = false; + + if (strcmp(argv[i], "-S") == 0) { + isStartTime = true; + } else if (strcmp(argv[i], "-E") == 0) { + isEndTime = true; + } + + if (isStartTime || isEndTime) { + if (NULL == argv[i+1]) { + errorPrint("%s need a valid value following!\n", argv[i]); + exit(-1); + } + tmp = strdup(argv[i+1]); + + if (strchr(tmp, ':') && strchr(tmp, '-')) { + copyHumanTimeToArg(tmp, isStartTime); + } else { + copyTimestampToArg(tmp, isStartTime); + } + } + } +} + +static int getPrecisionByString(char *precision) +{ + if (0 == strncasecmp(precision, + "ms", 2)) { + return TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, + "us", 2)) { + return TSDB_TIME_PRECISION_MICRO; +#if TSDB_SUPPORT_NANOSECOND == 1 + } else if (0 == strncasecmp(precision, + "ns", 2)) { + return TSDB_TIME_PRECISION_NANO; +#endif + } else { + errorPrint("Invalid time precision: %s", + precision); + } + + return -1; +} + +/* static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -634,6 +705,7 @@ static void parse_timestamp( int64_t tmpEpoch; if (strchr(tmp, ':') && strchr(tmp, '-')) { + strcpy(g_args.humanStartTime, tmp) int32_t timePrec; if (0 == strncasecmp(arguments->precision, "ms", strlen("ms"))) { @@ -672,6 +744,7 @@ static void parse_timestamp( } } } +*/ int main(int argc, char *argv[]) { static char verType[32] = {0}; @@ -682,7 +755,7 @@ int main(int argc, char *argv[]) { /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ if (argc > 1) { - parse_precision_first(argc, argv, &g_args); +// parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); parse_args(argc, argv, &g_args); } @@ -714,7 +787,9 @@ int main(int argc, char *argv[]) { printf("with_property: %s\n", g_args.with_property?"true":"false"); printf("avro format: %s\n", g_args.avro?"true":"false"); printf("start_time: %" PRId64 "\n", g_args.start_time); + printf("human readable start time: %s \n", g_args.humanStartTime); printf("end_time: %" PRId64 "\n", g_args.end_time); + printf("human readable end time: %s \n", g_args.humanEndTime); printf("precision: %s\n", g_args.precision); printf("data_batch: %d\n", g_args.data_batch); printf("max_sql_len: %d\n", g_args.max_sql_len); @@ -759,7 +834,9 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); + fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); + fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime); fprintf(g_fpOfResult, "precision: %s\n", g_args.precision); fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); @@ -816,7 +893,8 @@ int main(int argc, char *argv[]) { static void taosFreeDbInfos() { if (g_dbInfos == NULL) return; - for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]); + for (int i = 0; i < g_args.dbCount; i++) + tfree(g_dbInfos[i]); tfree(g_dbInfos); } @@ -1046,6 +1124,88 @@ static int32_t taosSaveTableOfMetricToTempFile( return 0; } +static int getDbCount() +{ + int count; + + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + TAOS_ROW row; + + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + errorPrint("%s() LN%d, failed to allocate command buffer\n", __func__, __LINE__); + return 0; + } + + /* Connect to server */ + taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (NULL == taos) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(command); + return 0; + } + + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); + + if (0 != code) { + errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n", + __func__, __LINE__, command, taos_errstr(result)); + free(command); + return 0; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + && (!g_args.allow_sys)) { + continue; + } + + if (g_args.databases) { // input multi dbs + for (int i = 0; g_args.arg_list[i]; i++) { + if (strncasecmp(g_args.arg_list[i], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!g_args.all_databases) { // only input one db + if (strncasecmp(g_args.arg_list[0], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } + +_dump_db_point: + + count++; + + if (g_args.databases) { + if (count > g_args.arg_list_len) break; + + } else if (!g_args.all_databases) { + if (count >= 1) break; + } + } + + if (count == 0) { + errorPrint("%d databases valid to dump\n", count); + } + + free(command); + return count; +} + static int taosDumpOut() { TAOS *taos = NULL; TAOS_RES *result = NULL; @@ -1070,7 +1230,14 @@ static int taosDumpOut() { return -1; } - g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); + g_args.dbCount = getDbCount(); + + if (0 == g_args.dbCount) { + errorPrint("%d databases valid to dump\n", g_args.dbCount); + return -1; + } + + g_dbInfos = (SDbInfo **)calloc(g_args.dbCount, sizeof(SDbInfo *)); if (g_dbInfos == NULL) { errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); @@ -1165,9 +1332,9 @@ _dump_db_point: g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes + 1)); - //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + tstrncpy(g_dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + DB_PRECISION_LEN); g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } count++; @@ -1263,8 +1430,10 @@ _dump_db_point: } // start multi threads to dumpout + taosStartDumpOutWorkThreads(totalNumOfThread, - g_dbInfos[0]->name); + g_dbInfos[0]->name, + getPrecisionByString(g_dbInfos[0]->precision)); char tmpFileName[MAX_FILE_NAME_LEN]; _clean_tmp_file: @@ -1465,7 +1634,7 @@ static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema) static int32_t taosDumpTable( char *tbName, char *metric, - FILE *fp, TAOS* taosCon, char* dbName) { + FILE *fp, TAOS* taosCon, char* dbName, int precision) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) @@ -1516,7 +1685,7 @@ static int32_t taosDumpTable( int32_t ret = 0; if (!g_args.schemaonly) { - ret = taosDumpTableData(fp, tbName, taosCon, dbName, + ret = taosDumpTableData(fp, tbName, taosCon, dbName, precision, jsonAvroSchema); } @@ -1607,7 +1776,8 @@ static void* taosDumpOutWorkThreadFp(void *arg) int ret = taosDumpTable( tableRecord.name, tableRecord.metric, - fp, pThread->taosCon, pThread->dbName); + fp, pThread->taosCon, pThread->dbName, + pThread->precision); if (ret >= 0) { // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; @@ -1656,7 +1826,7 @@ static void* taosDumpOutWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) +static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName, int precision) { pthread_attr_t thattr; SThreadParaObj *threadObj = @@ -1675,6 +1845,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) pThread->threadIndex = t; pThread->totalThreads = numOfThread; tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); + pThread->precision = precision; pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password, NULL, g_args.port); if (pThread->taosCon == NULL) { @@ -1924,7 +2095,8 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { } // start multi threads to dumpout - taosStartDumpOutWorkThreads(numOfThread, dbInfo->name); + taosStartDumpOutWorkThreads(numOfThread, dbInfo->name, + getPrecisionByString(dbInfo->precision)); for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); (void)remove(tmpBuf); @@ -2215,14 +2387,38 @@ static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbN } static int taosDumpTableData(FILE *fp, char *tbName, - TAOS* taosCon, char* dbName, + TAOS* taosCon, char* dbName, int precision, char *jsonAvroSchema) { int64_t totalRows = 0; char sqlstr[1024] = {0}; + + int64_t start_time, end_time; + if (strlen(g_args.humanStartTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanStartTime); + return -1; + } + } else { + start_time = g_args.start_time; + } + + if (strlen(g_args.humanEndTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanEndTime); + return -1; + } + } else { + end_time = g_args.end_time; + } + sprintf(sqlstr, "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbName, g_args.start_time, g_args.end_time); + dbName, tbName, start_time, end_time); TAOS_RES* res = taos_query(taosCon, sqlstr); int32_t code = taos_errno(res); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 68529ab8a240c2313ae9417bef9f4112759b0c9f..e3c1af9bc01fddd8c7df78ace6c5c2b6ce13576c 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code), pStable->numOfTags); - + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1321,6 +1326,10 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } + return code; } @@ -1376,6 +1385,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1444,6 +1456,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1489,6 +1504,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c index 650a45aae42c8d2dfba63d8f4e7e6ec35b385ae8..35ca64d79f8b7a883014fd6ca980300ede22d6e2 100644 --- a/src/os/src/linux/linuxEnv.c +++ b/src/os/src/linux/linuxEnv.c @@ -32,6 +32,13 @@ void osInit() { strcpy(tsDataDir, "/var/lib/tq"); strcpy(tsLogDir, "/var/log/tq"); strcpy(tsScriptDir, "/etc/tq"); +#elif (_TD_PRO_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/ProDB"); + } + strcpy(tsDataDir, "/var/lib/ProDB"); + strcpy(tsLogDir, "/var/log/ProDB"); + strcpy(tsScriptDir, "/etc/ProDB"); #else if (configDir[0] == 0) { strcpy(configDir, "/etc/taos"); diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index b35cb8f040aec5ff4b4fb12665d0842e72958ba1..6f46bb43c75ff2c9735fc53a11bce585c1c213f6 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -39,6 +39,14 @@ void osInit() { strcpy(tsDataDir, "C:/TQ/data"); strcpy(tsLogDir, "C:/TQ/log"); strcpy(tsScriptDir, "C:/TQ/script"); +#elif (_TD_PRO_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/ProDB/cfg"); + } + strcpy(tsVnodeDir, "C:/ProDB/data"); + strcpy(tsDataDir, "C:/ProDB/data"); + strcpy(tsLogDir, "C:/ProDB/log"); + strcpy(tsScriptDir, "C:/ProDB/script"); #else if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e958a8e5ec5b6542d609028ee052d21a9a84d397..2c5bf953a4be1b83cdcfa0d366e49d3b17124dc4 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -399,7 +399,8 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 pContext->oldInUse = pEpSet->inUse; pContext->connType = RPC_CONN_UDPC; - if (contLen > tsRpcMaxUdpSize || tsRpcForceTcp ) pContext->connType = RPC_CONN_TCPC; + + if (contLen > tsRpcMaxUdpSize || tsRpcForceTcp) pContext->connType = RPC_CONN_TCPC; // connection type is application specific. // for TDengine, all the query, show commands shall have TCP connection @@ -407,7 +408,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META - || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS) + || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS ||type == TSDB_MSG_TYPE_CM_ALTER_TABLE) pContext->connType = RPC_CONN_TCPC; pContext->rid = taosAddRef(tsRpcRefId, pContext); diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 8ce5e7ade80b2006ac8c39fec178994073c5a26d..a13b522eece382fc9c8f0d1de471e2f1c9421840 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -74,7 +74,7 @@ void tsdbFreeMeta(STsdbMeta* pMeta); int tsdbOpenMeta(STsdbRepo* pRepo); int tsdbCloseMeta(STsdbRepo* pRepo); STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid); -STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version); +STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version, int8_t rowType); int tsdbWLockRepoMeta(STsdbRepo* pRepo); int tsdbRLockRepoMeta(STsdbRepo* pRepo); int tsdbUnlockRepoMeta(STsdbRepo* pRepo); @@ -99,7 +99,9 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k } } -static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) { +// set rowType to -1 at default if have no relationship with row +static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version, + int8_t rowType) { STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose STSchema* pSchema = NULL; STSchema* pTSchema = NULL; @@ -110,8 +112,12 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, } else { // get the schema with version void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); if (ptr == NULL) { - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - goto _exit; + if (rowType == SMEM_ROW_KV) { + ptr = taosArrayGetLast(pDTable->schema); + } else { + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + goto _exit; + } } pTSchema = *(STSchema**)ptr; } @@ -130,7 +136,7 @@ _exit: } static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) { - return tsdbGetTableSchemaImpl(pTable, false, false, -1); + return tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); } static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6c41571c2d2274b0bbda57460e1fe93505038c97..2c36e1b67ac7efd845b0e818d9cfa45b214795b3 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -866,7 +866,7 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) { } static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); pCommith->pTable = pTable; @@ -1283,7 +1283,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt (*iter)++; } else if (key1 > key2) { if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + pSchema = + tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row), (int8_t)memRowType(row)); ASSERT(pSchema != NULL); } @@ -1304,7 +1305,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt if (update != TD_ROW_DISCARD_UPDATE) { //copy mem data if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + pSchema = + tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row), (int8_t)memRowType(row)); ASSERT(pSchema != NULL); } diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 5ccb9e90f2407561709d36a85ac3e992e5d5a8ba..9b890ec113600420c179f4d13159706d79dd322a 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -431,7 +431,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue; - pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1); + pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1, -1); taosArrayClear(pComph->aSupBlk); if ((tdInitDataCols(pComph->pDataCols, pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[1], pSchema) < 0)) { diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index b2e6fe89161d0e9bceaf74a46807f51ec402fb2a..a22f9265e0a829f91292f427cdb81b20c8172f98 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -617,7 +617,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) { STable *pTable = pMeta->tables[i]; if (pTable && pTable->type == TSDB_STREAM_TABLE) { pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, - tsdbGetTableSchemaImpl(pTable, false, false, -1), 0); + tsdbGetTableSchemaImpl(pTable, false, false, -1, -1), 0); } } } diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index e766d97a97a5905db87691426d282a219eef9d68..c82490c3e8aa0c5ffabda78a9fc3222c958fd79e 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -582,7 +582,7 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row) { if (pCols) { if (*ppSchema == NULL || schemaVersion(*ppSchema) != memRowVersion(row)) { - *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row)); + *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row), (int8_t)memRowType(row)); if (*ppSchema == NULL) { ASSERT(false); return -1; @@ -730,7 +730,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) { *ppSchema1 = pSchema2; } else { - *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1)); + *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1), (int8_t)memRowType(row1)); } pSchema1 = *ppSchema1; } @@ -739,7 +739,7 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep if(schemaVersion(pSchema1) == dv2) { pSchema2 = pSchema1; } else { - *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2)); + *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2), (int8_t)memRowType(row2)); pSchema2 = *ppSchema2; } } @@ -847,7 +847,7 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * } } - STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); + STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion, -1); pRepo->stat.pointsWritten += points * schemaNCols(pSchema); pRepo->stat.totalStorage += points * schemaVLen(pSchema); @@ -894,7 +894,7 @@ static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { ASSERT(pTable != NULL); - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); int sversion = schemaVersion(pSchema); if (pBlock->sversion == sversion) { @@ -951,7 +951,7 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT } } else { ASSERT(pBlock->sversion >= 0); - if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { + if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion, -1) == NULL) { tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; @@ -972,7 +972,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro return; } - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row), (int8_t)memRowType(row)); if (pSchema == NULL) { return; } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 8407a0519a5595b750e0fd45cc82dcd9d1f6b5b1..e604b07cc1dd0d37574235263a018baecbe670c1 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -534,8 +534,8 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) { return *(STable **)ptr; } -STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version) { - return tsdbGetTableSchemaImpl(pTable, true, false, _version); +STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version, int8_t rowType) { + return tsdbGetTableSchemaImpl(pTable, true, false, _version, rowType); } int tsdbWLockRepoMeta(STsdbRepo *pRepo) { @@ -652,7 +652,7 @@ int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) { } STSchema* tsdbGetTableLatestSchema(STable *pTable) { - return tsdbGetTableSchemaByVersion(pTable, -1); + return tsdbGetTableSchemaByVersion(pTable, -1, -1); } int tsdbUpdateLastColSchema(STable *pTable, STSchema *pNewSchema) { @@ -957,7 +957,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo } if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); } @@ -965,7 +965,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, - tsdbGetTableSchemaImpl(pTable, false, false, -1), 1); + tsdbGetTableSchemaImpl(pTable, false, false, -1, -1), 1); } tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), @@ -984,7 +984,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro SListNode *pNode = NULL; STable * tTable = NULL; - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); int maxCols = schemaNCols(pSchema); int maxRowBytes = schemaTLen(pSchema); @@ -1018,7 +1018,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro for (int i = 0; i < pMeta->maxTables; i++) { STable *_pTable = pMeta->tables[i]; if (_pTable != NULL) { - pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1); + pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1, -1); maxCols = MAX(maxCols, schemaNCols(pSchema)); maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema)); } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0f1554a3f35007caad7a3047cc7631836bdb7365..02dbe00bf7d5647ca410683647c69e62b248b758 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1582,7 +1582,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfColsOfRow1 = 0; if (pSchema1 == NULL) { - pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1)); } if(isRow1DataRow) { numOfColsOfRow1 = schemaNCols(pSchema1); @@ -1594,7 +1594,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, if(row2) { isRow2DataRow = isDataRow(row2); if (pSchema2 == NULL) { - pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2)); } if(isRow2DataRow) { numOfColsOfRow2 = schemaNCols(pSchema2); @@ -1961,11 +1961,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { if (rv1 != memRowVersion(row1)) { - pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1)); rv1 = memRowVersion(row1); } if(row2 && rv2 != memRowVersion(row2)) { - pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2)); rv2 = memRowVersion(row2); } @@ -1986,11 +1986,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos); } if (rv1 != memRowVersion(row1)) { - pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1)); rv1 = memRowVersion(row1); } if(row2 && rv2 != memRowVersion(row2)) { - pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2), (int8_t)memRowType(row2)); rv2 = memRowVersion(row2); } @@ -2654,7 +2654,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int win->ekey = key; if (rv != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row), (int8_t)memRowType(row)); rv = memRowVersion(row); } mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true); diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 74d41cce194f9921ee0c521de9e329bad5eeb3f9..8ab0130218b8e1c52877a3072d7a5ae1f773dd17 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -153,7 +153,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { } int tsdbSetReadTable(SReadH *pReadh, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1, -1); pReadh->pTable = pTable; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 5a3dc3f9bcdee41f974e48f22b27beb2a1eb5a35..131d05155283375fefcb5c8b0851c8b2236e83af 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -336,6 +336,9 @@ void taosReadGlobalLogCfg() { #elif (_TD_TQ_ == true) printf("configDir:%s not there, use default value: /etc/tq", configDir); strcpy(configDir, "/etc/tq"); + #elif (_TD_PRO_ == true) + printf("configDir:%s not there, use default value: /etc/ProDB", configDir); + strcpy(configDir, "/etc/ProDB"); #else printf("configDir:%s not there, use default value: /etc/taos", configDir); strcpy(configDir, "/etc/taos"); diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 1ce3eadf58432337511d0d600848ad334b96fc91..0d335ca2664ffee75a79144b97181a5b625df66d 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -85,6 +85,8 @@ int64_t dbgWSize = 0; char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; #elif (_TD_TQ_ == true) char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; +#elif (_TD_PRO_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB"; #else char tsLogDir[PATH_MAX] = "/var/log/taos"; #endif diff --git a/tests/nettest/TCPUDP.sh b/tests/nettest/TCPUDP.sh deleted file mode 100755 index 3a4b5d77a4f26862b03194488380c8dad172bb42..0000000000000000000000000000000000000000 --- a/tests/nettest/TCPUDP.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -for N in -1 0 1 10000 10001 -do - for l in 1023 1024 1073741824 1073741825 - do - for S in udp tcp - do - taos -n speed -h BCC-2 -P 6030 -N $N -l $l -S $S 2>&1 | tee -a result.txt - done - done -done diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 344ad5dde5f9fc58b760691b94f112e9b458f1d7..8c35778018b9c34789f862f6a728e487694357f4 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18177,4 +18177,40 @@ fun:_PyEval_EvalFrameDefault obj:/usr/bin/python3.8 fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_my_Py_InitModule + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__openssl + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_PyObject_GC_New + fun:ffi_internal_new + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__constant_time + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault } \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 522ce678a16e26ca12340df91fd6ffc212bf0c3e..03a5681c9230f27eb59794aa1090ac3ab6be09dd 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -178,7 +178,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +python3 test.py -f tools/taosdumpTestNanoSupport.py # update python3 ./test.py -f update/allow_update.py diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index e69cb8f4682b130d6c2182b005544afaf41c16c5..e8023546c446ff76b5c1976de83c01d4db8f6751 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -1128,9 +1128,8 @@ class TDTestCase: self.td3690() self.td4082() self.td4288() - # self.td4724() - # self.td5798() - # self.td5935() + self.td4724() + self.td5935() self.td6068() # develop branch @@ -1138,6 +1137,7 @@ class TDTestCase: # self.td4889() # self.td5168() # self.td5433() + # self.td5798() def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py index 8864c0e37621c72ad39fb4249749244b1fbe8367..66a7f85120fe13293996d1bd3153b6fe9b1d6a72 100644 --- a/tests/pytest/query/queryCnameDisplay.py +++ b/tests/pytest/query/queryCnameDisplay.py @@ -49,10 +49,11 @@ class TDTestCase: # select as cname with cname_list sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check' - sql_seq_no_as = sql_seq.replace('as ', '') + sql_seq_no_as = sql_seq.replace(' as ', ' ') + print(sql_seq) + print(sql_seq_no_as) res = tdSql.getColNameList(sql_seq) res_no_as = tdSql.getColNameList(sql_seq_no_as) - # cname[1] > 64, it is expected to be equal to 64 cname_list_1_expected = cname_list[1][:-1] cname_list[1] = cname_list_1_expected @@ -79,7 +80,7 @@ class TDTestCase: # select as cname with cname_list sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' - sql_seq_no_as = sql_seq.replace('as ', '') + sql_seq_no_as = sql_seq.replace(' as ', ' ') res = tdSql.getColNameList(sql_seq) res_no_as = tdSql.getColNameList(sql_seq_no_as) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py deleted file mode 100644 index a2059ec924ad1e2239c2709bc99dd58fbafa1337..0000000000000000000000000000000000000000 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +++ /dev/null @@ -1,362 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import os -from util.log import * -from util.cases import * -from util.sql import * -from util.dnodes import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" - self.numberOfTables = 10 - self.numberOfRecords = 100 - - def checkCommunity(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - return False - else: - return True - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root) - len("/build/bin")] - break - return buildPath - - - - def createdb(self, precision="ns"): - tb_nums = self.numberOfTables - per_tb_rows = self.numberOfRecords - - def build_db(precision, start_time): - tdSql.execute("drop database if exists timedb1") - tdSql.execute( - "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") - - tdSql.execute("use timedb1") - tdSql.execute( - "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") - for tb in range(tb_nums): - tbname = "t"+str(tb) - tdSql.execute("create table " + tbname + - " using st tags(1, 'beijing')") - sql = "insert into " + tbname + " values" - currts = start_time - if precision == "ns": - ts_seed = 1000000000 - elif precision == "us": - ts_seed = 1000000 - else: - ts_seed = 1000 - - for i in range(per_tb_rows): - sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % - 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) - tdSql.execute(sql) - - if precision == "ns": - start_time = 1625068800000000000 - build_db(precision, start_time) - - elif precision == "us": - start_time = 1625068800000000 - build_db(precision, start_time) - - elif precision == "ms": - start_time = 1625068800000 - build_db(precision, start_time) - - else: - print("other time precision not valid , please check! ") - - - def run(self): - - # clear envs - os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") - - if not os.path.exists("./taosdumptest/tmp1"): - os.makedirs("./taosdumptest/dumptmp1") - else: - print("path exist!") - - if not os.path.exists("./taosdumptest/dumptmp2"): - os.makedirs("./taosdumptest/dumptmp2") - - if not os.path.exists("./taosdumptest/dumptmp3"): - os.makedirs("./taosdumptest/dumptmp3") - - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosdump not found!") - else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" - - # create nano second database - - self.createdb(precision="ns") - - # dump all data - - os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) - - # dump part data with -S -E - os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % - binPath) - os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % - binPath) - - # replace strings to dump in databases - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) - - # dump data and check for taosdump - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) - - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) - - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) - - # check data - origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") - if origin_res == dump_res: - tdLog.info("test nano second : dump check data pass for all data!" ) - else: - tdLog.info("test nano second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) - else: - tdLog.info(" test nano second : dump check data failed for data !" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) - else: - tdLog.info(" test nano second : dump check data failed for data !" ) - - - # us second support test case - - os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") - - if not os.path.exists("./taosdumptest/tmp1"): - os.makedirs("./taosdumptest/dumptmp1") - else: - print("path exits!") - - if not os.path.exists("./taosdumptest/dumptmp2"): - os.makedirs("./taosdumptest/dumptmp2") - - if not os.path.exists("./taosdumptest/dumptmp3"): - os.makedirs("./taosdumptest/dumptmp3") - - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosdump not found!") - else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" - - self.createdb(precision="us") - - os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) - - os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % - binPath) - os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % - binPath) - - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) - - - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) - - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) - - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) - - - origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") - if origin_res == dump_res: - tdLog.info("test us second : dump check data pass for all data!" ) - else: - tdLog.info("test us second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) - else: - tdLog.info(" test us second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) - else: - tdLog.info(" test us second : dump check data failed for data! " ) - - - # ms second support test case - - os.system("rm -rf ./taosdumptest/") - tdSql.execute("drop database if exists dumptmp1") - tdSql.execute("drop database if exists dumptmp2") - tdSql.execute("drop database if exists dumptmp3") - - if not os.path.exists("./taosdumptest/tmp1"): - os.makedirs("./taosdumptest/dumptmp1") - else: - print("path exits!") - - if not os.path.exists("./taosdumptest/dumptmp2"): - os.makedirs("./taosdumptest/dumptmp2") - - if not os.path.exists("./taosdumptest/dumptmp3"): - os.makedirs("./taosdumptest/dumptmp3") - - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosdump not found!") - else: - tdLog.info("taosdump found in %s" % buildPath) - binPath = buildPath + "/build/bin/" - - self.createdb(precision="ms") - - os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) - - os.system( - '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % - binPath) - os.system( - '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % - binPath) - - os.system( - "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") - os.system( - "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") - os.system( - "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) - - - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) - - tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) - - tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) - - - origin_res = tdSql.getResult("select * from timedb1.st") - dump_res = tdSql.getResult("select * from dumptmp1.st") - if origin_res == dump_res: - tdLog.info("test ms second : dump check data pass for all data!" ) - else: - tdLog.info("test ms second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") - dump_res = tdSql.getResult("select * from dumptmp2.st") - if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) - else: - tdLog.info(" test ms second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") - dump_res = tdSql.getResult("select * from dumptmp3.st") - if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) - else: - tdLog.info(" test ms second : dump check data failed for data! " ) - - - os.system("rm -rf ./taosdumptest/") - os.system("rm -rf ./dump_result.txt") - os.system("rm -rf *.py.sql") - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py index 46674203ff7bc22283ac479fd10d9df1083d0112..aad28fee8188281fc94d8e20f728007630c3ab4b 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -26,6 +26,9 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) now = time.time() + + print(int(round(now * 1000))) + self.ts = int(round(now * 1000)) def getBuildPath(self): @@ -54,6 +57,7 @@ class TDTestCase: # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-4985 + os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -82,6 +86,7 @@ class TDTestCase: % (self.ts + i, i, -10000+i, i)) tdSql.query("select * from stb0 where c2 like 'test99%' ") tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) tdSql.checkData(0, 1, 0) tdSql.checkData(1, 1, 1) diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py index ca8832170b7706621f5ef9d3225fe2cf16141c34..727690c6e629217997bd5ecbf085116be4a7e347 100644 --- a/tests/pytest/tools/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -44,14 +44,12 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] break return buildPath - - def createdb(self, precision="ns"): tb_nums = self.numberOfTables @@ -60,13 +58,16 @@ class TDTestCase: def build_db(precision, start_time): tdSql.execute("drop database if exists timedb1") tdSql.execute( - "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") + "create database timedb1 days 10 keep 365 blocks 8 precision " + + "\"" + + precision + + "\"") tdSql.execute("use timedb1") tdSql.execute( "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") for tb in range(tb_nums): - tbname = "t"+str(tb) + tbname = "t" + str(tb) tdSql.execute("create table " + tbname + " using st tags(1, 'beijing')") sql = "insert into " + tbname + " values" @@ -79,8 +80,8 @@ class TDTestCase: ts_seed = 1000 for i in range(per_tb_rows): - sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % - 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i * ts_seed, i % + 100, i % 100, currts + i * 100) # currts +1000ms (1000000000ns) tdSql.execute(sql) if precision == "ns": @@ -97,7 +98,6 @@ class TDTestCase: else: print("other time precision not valid , please check! ") - def run(self): @@ -132,11 +132,12 @@ class TDTestCase: # dump all data os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % + binPath) # dump part data with -S -E os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % @@ -150,42 +151,44 @@ class TDTestCase: os.system( "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) # dump data and check for taosdump tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.checkData(0, 0, 1000) tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.checkData(0, 0, 510) tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.checkData(0, 0, 900) # check data origin_res = tdSql.getResult("select * from timedb1.st") dump_res = tdSql.getResult("select * from dumptmp1.st") if origin_res == dump_res: - tdLog.info("test nano second : dump check data pass for all data!" ) + tdLog.info("test nano second : dump check data pass for all data!") else: - tdLog.info("test nano second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") + tdLog.info( + "test nano second : dump check data failed for all data!") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") dump_res = tdSql.getResult("select * from dumptmp2.st") if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) + tdLog.info(" test nano second : dump check data pass for data! ") else: - tdLog.info(" test nano second : dump check data failed for data !" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") + tdLog.info(" test nano second : dump check data failed for data !") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000000000 ") dump_res = tdSql.getResult("select * from dumptmp3.st") if origin_res == dump_res: - tdLog.info(" test nano second : dump check data pass for data! " ) + tdLog.info(" test nano second : dump check data pass for data! ") else: - tdLog.info(" test nano second : dump check data failed for data !" ) - + tdLog.info(" test nano second : dump check data failed for data !") # us second support test case @@ -215,10 +218,11 @@ class TDTestCase: self.createdb(precision="us") os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % + binPath) os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % @@ -231,43 +235,42 @@ class TDTestCase: os.system( "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.checkData(0, 0, 1000) tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.checkData(0, 0, 510) tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.checkData(0, 0, 900) - origin_res = tdSql.getResult("select * from timedb1.st") dump_res = tdSql.getResult("select * from dumptmp1.st") if origin_res == dump_res: - tdLog.info("test us second : dump check data pass for all data!" ) + tdLog.info("test us second : dump check data pass for all data!") else: - tdLog.info("test us second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") + tdLog.info("test us second : dump check data failed for all data!") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") dump_res = tdSql.getResult("select * from dumptmp2.st") if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) + tdLog.info(" test us second : dump check data pass for data! ") else: - tdLog.info(" test us second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") + tdLog.info(" test us second : dump check data failed for data!") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000000 ") dump_res = tdSql.getResult("select * from dumptmp3.st") if origin_res == dump_res: - tdLog.info(" test us second : dump check data pass for data! " ) + tdLog.info(" test us second : dump check data pass for data! ") else: - tdLog.info(" test us second : dump check data failed for data! " ) + tdLog.info(" test us second : dump check data failed for data! ") - # ms second support test case os.system("rm -rf ./taosdumptest/") @@ -296,10 +299,11 @@ class TDTestCase: self.createdb(precision="ms") os.system( - "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % + binPath) os.system( - '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % @@ -312,43 +316,42 @@ class TDTestCase: os.system( "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") - os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) - os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp1" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp2" % binPath) + os.system("%staosdump -i ./taosdumptest/dumptmp3" % binPath) - tdSql.query("select count(*) from dumptmp1.st") - tdSql.checkData(0,0,1000) + tdSql.checkData(0, 0, 1000) tdSql.query("select count(*) from dumptmp2.st") - tdSql.checkData(0,0,510) + tdSql.checkData(0, 0, 510) tdSql.query("select count(*) from dumptmp3.st") - tdSql.checkData(0,0,900) + tdSql.checkData(0, 0, 900) - origin_res = tdSql.getResult("select * from timedb1.st") dump_res = tdSql.getResult("select * from dumptmp1.st") if origin_res == dump_res: - tdLog.info("test ms second : dump check data pass for all data!" ) + tdLog.info("test ms second : dump check data pass for all data!") else: - tdLog.info("test ms second : dump check data failed for all data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") + tdLog.info("test ms second : dump check data failed for all data!") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") dump_res = tdSql.getResult("select * from dumptmp2.st") if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) + tdLog.info(" test ms second : dump check data pass for data! ") else: - tdLog.info(" test ms second : dump check data failed for data!" ) - - origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") + tdLog.info(" test ms second : dump check data failed for data!") + + origin_res = tdSql.getResult( + "select * from timedb1.st where ts >=1625068810000 ") dump_res = tdSql.getResult("select * from dumptmp3.st") if origin_res == dump_res: - tdLog.info(" test ms second : dump check data pass for data! " ) + tdLog.info(" test ms second : dump check data pass for data! ") else: - tdLog.info(" test ms second : dump check data failed for data! " ) + tdLog.info(" test ms second : dump check data failed for data! ") - os.system("rm -rf ./taosdumptest/") os.system("rm -rf ./dump_result.txt") os.system("rm -rf *.py.sql")