未验证 提交 4bca360a 编写于 作者: H Hui Li 提交者: GitHub

Merge pull request #9792 from taosdata/test/td-11552

[TD-11552]<test>(other): add stability scripts and no need CI
# How to build image
## eg:
cd ./collectd_docker
docker build -t "taosadapter_collectd:v1" .
# How to run single container
## eg:
cd ./collectd_docker
./run_collectd.sh -h
#Usage:
#1st arg: agent_count
#2nd arg: container_hostname prefix
#3rd arg: TaosadapterIp
#4th arg: TaosadapterPort
#5th arg: CollectdInterval
#eg: ./run_collectd.sh 1 collectd_agent1 172.26.10.86 6047 1
#eg: ./run_collectd.sh 2 collectd_agent* 172.26.10.86 6047 1
#rm all: ./run_collectd.sh rm collectd_agent*
# How to run all container
## You need to edit run_all.sh to set taosadapter ip/port by manual, but count of each agent could be defined in bash_args
./run_all.sh -h
#Usage:
#1st arg: collectd_count
#2nd arg: icinga2_count
#3rd arg: statsd_count
#4th arg: tcollector_count
#5th arg: telegraf_count
#6th arg: node_exporter port range
#eg: ./run_all.sh 10 10 1 10 50 10000:10020
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-04
WORKDIR /root
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex; \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends collectd && \
rm -rf /var/lib/apt/lists/*
COPY collectd.conf /etc/collectd/collectd.conf
COPY entrypoint.sh /entrypoint.sh
ENV CollectdHostname localhost
ENV TaosadapterIp 127.0.0.1
ENV TaosadapterPort 6047
ENV CollectdInterval 10
ENTRYPOINT ["/entrypoint.sh"]
#!/bin/bash
sed -i 's/CollectdHostname/'$HOSTNAME'/g;s/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;s/CollectdInterval/'$CollectdInterval'/g;' /etc/collectd/collectd.conf
/etc/init.d/collectd start
tail -f /dev/null
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: agent_count"
echo "2nd arg: container_hostname prefix"
echo "3rd arg: TaosadapterIp"
echo "4th arg: TaosadapterPort"
echo "5th arg: CollectdInterval"
echo "eg: ./run_collectd.sh 1 collectd_agent1 172.26.10.86 6047 1"
echo "eg: ./run_collectd.sh 2 collectd_agent* 172.26.10.86 6047 1"
echo "rm all: ./run_collectd.sh rm collectd_agent*"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! -n "$3" ]; then
echo "please input 3rd arg"
exit
fi
if [ ! -n "$4" ]; then
echo "please input 4th arg"
exit
fi
if [ ! -n "$5" ]; then
echo "please input 5th arg"
exit
fi
if [ $1 -eq 1 ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e CollectdInterval=$5 taosadapter_collectd:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
for i in `seq 1 $1`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix$i && docker rm $perfix$i
fi
docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e CollectdInterval=$5 taosadapter_collectd:v1 /bin/bash
done
fi
#docker run -itd --name collectd_agent1 -h collectd_agent1 -e CollectdHostname=collectd_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6047 -e CollectdInterval=1 taosadapter_collectd:v1 /bin/bash
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-05
ARG DEBIAN_FRONTEND=noninteractive
WORKDIR /root
RUN set -ex; \
apt update -y --fix-missing && \
apt install -y gnupg
COPY icinga-focal.list /etc/apt/sources.list.d/icinga-focal.list
COPY icinga.key /root/icinga.key
RUN set -ex; \
apt-key add icinga.key && \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends icinga2 monitoring-plugins systemctl && \
icinga2 feature enable opentsdb && \
rm -rf /var/lib/apt/lists/*
COPY opentsdb.conf /etc/icinga2/features-available/opentsdb.conf
COPY entrypoint.sh /entrypoint.sh
COPY templates.conf /etc/icinga2/conf.d/templates.conf
ENV Icinga2Interval 10s
ENV TaosadapterIp 127.0.0.1
ENV TaosadapterPort 6048
ENTRYPOINT ["/entrypoint.sh"]
#!/bin/bash
sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /etc/icinga2/features-available/opentsdb.conf
sed -i 's/Icinga2Interval/'$Icinga2Interval'/g;' /etc/icinga2/conf.d/templates.conf
systemctl restart icinga2
tail -f /dev/null
deb http://packages.icinga.com/ubuntu icinga-focal main
deb-src http://packages.icinga.com/ubuntu icinga-focal main
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v2.0.19 (GNU/Linux)
mQGiBFKHzk4RBACSHMIFTtfw4ZsNKAA03Gf5t7ovsKWnS7kcMYleAidypqhOmkGg
0petiYsMPYT+MOepCJFGNzwQwJhZrdLUxxMSWay4Xj0ArgpD9vbvU+gj8Tb02l+x
SqNGP8jXMV5UnK4gZsrYGLUPvx47uNNYRIRJAGOPYTvohhnFJiG402dzlwCg4u5I
1RdFplkp9JM6vNM9VBIAmcED/2jr7UQGsPs8YOiPkskGHLh/zXgO8SvcNAxCLgbp
BjGcF4Iso/A2TAI/2KGJW6kBW/Paf722ltU6s/6mutdXJppgNAz5nfpEt4uZKZyu
oSWf77179B2B/Wl1BsX/Oc3chscAgQb2pD/qPF/VYRJU+hvdQkq1zfi6cVsxyREV
k+IwA/46nXh51CQxE29ayuy1BoIOxezvuXFUXZ8rP6aCh4KaiN9AJoy7pBieCzsq
d7rPEeGIzBjI+yhEu8p92W6KWzL0xduWfYg9I7a2GTk8CaLX2OCLuwnKd7RVDyyZ
yzRjWs0T5U7SRAWspLStYxMdKert9lLyQiRHtLwmlgBPqa0gh7Q+SWNpbmdhIE9w
ZW4gU291cmNlIE1vbml0b3JpbmcgKEJ1aWxkIHNlcnZlcikgPGluZm9AaWNpbmdh
Lm9yZz6IYAQTEQIAIAUCUofOTgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJ
EMbjGcM0QQaCgSQAnRjXdbsyqziqhmxfAKffNJYuMPwdAKCS/IRCVyQzApFBtIBQ
1xuoym/4C7kCDQRSh85OEAgAvPwjlURCi8z6+7i60no4n16dNcSzd6AT8Kizpv2r
9BmNBff/GNYGnHyob/DMtmO2esEuVG8w62rO9m1wzzXzjbtmtU7NZ1Tg+C+reU2I
GNVu3SYtEVK/UTJHAhLcgry9yD99610tYPN2Fx33Efse94mXOreBfCvDsmFGSc7j
GVNCWXpMR3jTYyGj1igYd5ztOzG63D8gPyOucTTl+RWN/G9EoGBv6sWqk5eCd1Fs
JlWyQX4BJn3YsCZx3uj1DWL0dAl2zqcn6m1M4oj1ozW47MqM/efKOcV6VvCs9SL8
F/NFvZcH4LKzeupCQ5jEONqcTlVlnLlIqId95Z4DI4AV9wADBQf/S6sKA4oH49tD
Yb5xAfUyEp5ben05TzUJbXs0Z7hfRQzy9+vQbWGamWLgg3QRUVPx1e4IT+W5vEm5
dggNTMEwlLMI7izCPDcD32B5oxNVxlfj428KGllYWCFj+edY+xKTvw/PHnn+drKs
LE65Gwx4BPHm9EqWHIBX6aPzbgbJZZ06f6jWVBi/N7e/5n8lkxXqS23DBKemapyu
S1i56sH7mQSMaRZP/iiOroAJemPNxv1IQkykxw2woWMmTLKLMCD/i+4DxejE50tK
dxaOLTc4HDCsattw/RVJO6fwE414IXHMv330z4HKWJevMQ+CmQGfswvCwgeBP9n8
PItLjBQAXIhJBBgRAgAJBQJSh85OAhsMAAoJEMbjGcM0QQaCzpAAmwUNoRyySf9p
5G3/2UD1PMueIwOtAKDVVDXEq5LJPVg4iafNu0SRMwgP0Q==
=icbY
-----END PGP PUBLIC KEY BLOCK-----
/**
* The OpenTsdbWriter type writes check result metrics and
* performance data to a OpenTSDB tcp socket.
*/
object OpenTsdbWriter "opentsdb" {
host = "TaosadapterIp"
port = TaosadapterPort
//enable_generic_metrics = false
// Custom Tagging, refer to Icinga object type documentation for
// OpenTsdbWriter
//host_template = {
// metric = "icinga.host"
// tags = {
// zone = "$host.zone$"
// }
//}
//service_template = {
// metric = "icinga.service.$service.check_command$"
// tags = {
// zone = "$service.zone$"
// }
//}
}
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: agent_count"
echo "2nd arg: container_hostname prefix"
echo "3rd arg: TaosadapterIp"
echo "4th arg: TaosadapterPort"
echo "5th arg: Icinga2Interval"
echo "eg: ./run_icinga2.sh 1 icinga2_agent1 172.26.10.86 6048 1"
echo "eg: ./run_icinga2.sh 2 icinga2_agent* 172.26.10.86 6048 1"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! -n "$3" ]; then
echo "please input 3rd arg"
exit
fi
if [ ! -n "$4" ]; then
echo "please input 4th arg"
exit
fi
if [ ! -n "$5" ]; then
echo "please input 5th arg"
exit
fi
if [ $1 -eq 1 ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e Icinga2Interval=$5 taosadapter_icinga2:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
for i in `seq 1 $1`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix$i && docker rm $perfix$i
fi
docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e Icinga2Interval=$5 taosadapter_icinga2:v1 /bin/bash
done
fi
#docker run -itd --name icinga2_agent1 -h icinga2_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6048 -e Icinga2Interval=1s taosadapter_icinga2:v1 /bin/bash
/*
* Generic template examples.
*/
/**
* Provides default settings for hosts. By convention
* all hosts should import this template.
*
* The CheckCommand object `hostalive` is provided by
* the plugin check command templates.
* Check the documentation for details.
*/
template Host "generic-host" {
max_check_attempts = 3
check_interval = Icinga2Interval
retry_interval = 30s
check_command = "hostalive"
}
/**
* Provides default settings for services. By convention
* all services should import this template.
*/
template Service "generic-service" {
max_check_attempts = 5
check_interval = Icinga2Interval
retry_interval = 30s
}
/**
* Provides default settings for users. By convention
* all users should inherit from this template.
*/
template User "generic-user" {
}
/**
* Provides default settings for host notifications.
* By convention all host notifications should import
* this template.
*/
template Notification "mail-host-notification" {
command = "mail-host-notification"
states = [ Up, Down ]
types = [ Problem, Acknowledgement, Recovery, Custom,
FlappingStart, FlappingEnd,
DowntimeStart, DowntimeEnd, DowntimeRemoved ]
vars += {
// notification_icingaweb2url = "https://www.example.com/icingaweb2"
// notification_from = "Icinga 2 Host Monitoring <icinga@example.com>"
notification_logtosyslog = false
}
period = "24x7"
}
/**
* Provides default settings for service notifications.
* By convention all service notifications should import
* this template.
*/
template Notification "mail-service-notification" {
command = "mail-service-notification"
states = [ OK, Warning, Critical, Unknown ]
types = [ Problem, Acknowledgement, Recovery, Custom,
FlappingStart, FlappingEnd,
DowntimeStart, DowntimeEnd, DowntimeRemoved ]
vars += {
// notification_icingaweb2url = "https://www.example.com/icingaweb2"
// notification_from = "Icinga 2 Service Monitoring <icinga@example.com>"
notification_logtosyslog = false
}
period = "24x7"
}
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-05
WORKDIR /root
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex; \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends wget && \
wget http://39.105.163.10:9000/node_exporter-1.3.0.linux-amd64.tar.gz && \
tar -xvf node_exporter-1.3.0.linux-amd64.tar.gz && \
mv node_exporter-1.3.0.linux-amd64/node_exporter /usr/bin/node_exporter && \
rm -rf node_exporter-1.3.0.linux-amd64 node_exporter-1.3.0.linux-amd64.tar.gz &&\
apt remove -y wget && \
rm -rf /var/lib/apt/lists/*
COPY entrypoint.sh /entrypoint.sh
ENV NodeExporterHostname localhost
ENV NodeExporterInterval 10
ENTRYPOINT ["/entrypoint.sh"]
import sys
## eg: python3 gen_taosadapter_url.py 172.26.10.87 10000:10050
#TaosadapterIp = "172.26.10.87"
TaosadapterIp = sys.argv[1]
#TaosadapterPort = "10000:10050"
TaosadapterPort = sys.argv[2]
start_port = int(TaosadapterPort.split(":")[0])
end_port = int(TaosadapterPort.split(":")[1])
urls_list = []
for port in range(start_port, end_port+1):
urls_list.append(f"http://{TaosadapterIp}:{port}")
print(urls_list)
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: port range"
echo "2nd arg: container_hostname prefix"
echo "eg: ./run_node_exporter.sh 10000 node_exporter_agent1"
echo "eg: ./run_node_exporter.sh 10000:10010 node_exporter_agent*"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! `echo $1 | grep :` ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -p $1:9100 taosadapter_node_exporter:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
start_port=`echo $1 | cut -d ':' -f 1`
end_port=`echo $1 | cut -d ':' -f 2`
for i in `seq $start_port $end_port`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix$i && docker rm $perfix$i
fi
docker run -itd --name $perfix$i -h $perfix$i -p $i:9100 taosadapter_node_exporter:v1 /bin/bash
done
fi
#docker run -itd --name node_exporter_agent1 -h node_exporter_agent1 -p 10000:9100 taosadapter_node_exporter:v1 /bin/bash
#!/bin/bash
./collectd_docker/run_collectd.sh rm collectd_agent*
./icinga2_docker/run_icinga2.sh rm icinga2_agent*
./statsd_docker/run_statsd.sh rm statsd_agent*
./tcollector_docker/run_tcollector.sh rm tcollector_agent*
./telegraf_docker/run_telegraf.sh rm telegraf_agent*
./node_exporter_docker/run_node_exporter.sh rm node_exporter_agent*
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: collectd_count"
echo "2nd arg: icinga2_count"
echo "3rd arg: statsd_count"
echo "4th arg: tcollector_count"
echo "5th arg: telegraf_count"
echo "6th arg: node_exporter port range"
echo "eg: ./run_all.sh 10 10 1 10 50 10000:10010"
exit 0
;;
esac
collectd_count=$1
icinga2_count=$2
statsd_count=$3
tcollector_count=$4
telegraf_count=$5
node_exporter_count=$6
taosadapter1_ip=172.26.10.86
taosadapter2_ip=172.26.10.85
taosadapter3_ip=172.26.10.84
./collectd_docker/run_collectd.sh $1 taosadapter1_collectd_agent* $taosadapter1_ip 6047 1
./icinga2_docker/run_icinga2.sh $2 taosadapter1_icinga2_agent* $taosadapter1_ip 6048 1
./statsd_docker/run_statsd.sh $3 taosadapter1_statsd_agent $taosadapter1_ip 6044
./tcollector_docker/run_tcollector.sh $4 taosadapter1_tcollector_agent* $taosadapter1_ip 6049
./telegraf_docker/run_telegraf.sh $5 taosadapter1_telegraf_agent* $taosadapter1_ip 6041 10s taosadapter1_telegraf
./collectd_docker/run_collectd.sh $1 taosadapter2_collectd_agent* $taosadapter2_ip 6047 1
./icinga2_docker/run_icinga2.sh $2 taosadapter2_icinga2_agent* $taosadapter2_ip 6048 1
./statsd_docker/run_statsd.sh $3 taosadapter2_statsd_agent $taosadapter2_ip 6044
./tcollector_docker/run_tcollector.sh $4 taosadapter2_tcollector_agent* $taosadapter2_ip 6049
./telegraf_docker/run_telegraf.sh $5 taosadapter2_telegraf_agent* $taosadapter2_ip 6041 10s taosadapter2_telegraf
./collectd_docker/run_collectd.sh $1 taosadapter3_collectd_agent* $taosadapter3_ip 6047 1
./icinga2_docker/run_icinga2.sh $2 taosadapter3_icinga2_agent* $taosadapter3_ip 6048 1
./statsd_docker/run_statsd.sh $3 taosadapter3_statsd_agent $taosadapter3_ip 6044
./tcollector_docker/run_tcollector.sh $4 taosadapter3_tcollector_agent* $taosadapter3_ip 6049
./telegraf_docker/run_telegraf.sh $5 taosadapter3_telegraf_agent* $taosadapter3_ip 6041 10s taosadapter3_telegraf
./node_exporter_docker/run_node_exporter.sh $6 node_exporter_agent*
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-06
WORKDIR /root
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex; \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends nodejs devscripts debhelper wget netcat-traditional npm && \
wget http://39.105.163.10:9000/statsd.tar.gz && \
tar -xvf statsd.tar.gz && \
cd statsd && \
npm install && \
npm audit fix && \
rm -rf statsd.tar.gz && \
apt remove -y wget && \
rm -rf /var/lib/apt/lists/*
COPY config.js /root/statsd/config.js
COPY entrypoint.sh /entrypoint.sh
ENV TaosadapterIp 127.0.0.1
ENV TaosadapterPort 6044
ENTRYPOINT ["/entrypoint.sh"]
/*
Graphite Required Variable:
(Leave this unset to avoid sending stats to Graphite.
Set debug flag and leave this unset to run in 'dry' debug mode -
useful for testing statsd clients without a Graphite server.)
graphiteHost: hostname or IP of Graphite server
Optional Variables:
graphitePort: port for the graphite text collector [default: 2003]
graphitePicklePort: port for the graphite pickle collector [default: 2004]
graphiteProtocol: either 'text' or 'pickle' [default: 'text']
backends: an array of backends to load. Each backend must exist
by name in the directory backends/. If not specified,
the default graphite backend will be loaded.
* example for console and graphite:
[ "./backends/console", "./backends/graphite" ]
servers: an array of server configurations.
If not specified, the server, address,
address_ipv6, and port top-level configuration
options are used to configure a single server for
backwards-compatibility
Each server configuration supports the following keys:
server: the server to load. The server must exist by name in the directory
servers/. If not specified, the default udp server will be loaded.
* example for tcp server:
"./servers/tcp"
address: address to listen on [default: 0.0.0.0]
address_ipv6: defines if the address is an IPv4 or IPv6 address [true or false, default: false]
port: port to listen for messages on [default: 8125]
socket: (only for tcp servers) path to unix domain socket which will be used to receive
metrics [default: undefinded]
socket_mod: (only for tcp servers) file mode which should be applied to unix domain socket, relevant
only if socket option is used [default: undefined]
debug: debug flag [default: false]
mgmt_address: address to run the management TCP interface on
[default: 0.0.0.0]
mgmt_port: port to run the management TCP interface on [default: 8126]
title: Allows for overriding the process title. [default: statsd]
if set to false, will not override the process title and let the OS set it.
The length of the title has to be less than or equal to the binary name + cli arguments
NOTE: This does not work on Mac's with node versions prior to v0.10
healthStatus: default health status to be returned and statsd process starts ['up' or 'down', default: 'up']
dumpMessages: log all incoming messages
flushInterval: interval (in ms) to flush metrics to each backend
percentThreshold: for time information, calculate the Nth percentile(s)
(can be a single value or list of floating-point values)
negative values mean to use "top" Nth percentile(s) values
[%, default: 90]
flush_counts: send stats_counts metrics [default: true]
keyFlush: log the most frequently sent keys [object, default: undefined]
interval: how often to log frequent keys [ms, default: 0]
percent: percentage of frequent keys to log [%, default: 100]
log: location of log file for frequent keys [default: STDOUT]
deleteIdleStats: don't send values to graphite for inactive counters, sets, gauges, or timers
as opposed to sending 0. For gauges, this unsets the gauge (instead of sending
the previous value). Can be individually overridden. [default: false]
deleteGauges: don't send values to graphite for inactive gauges, as opposed to sending the previous value [default: false]
gaugesMaxTTL: number of flush cycles to wait before the gauge is marked as inactive, to use in combination with deleteGauges [default: 1]
deleteTimers: don't send values to graphite for inactive timers, as opposed to sending 0 [default: false]
deleteSets: don't send values to graphite for inactive sets, as opposed to sending 0 [default: false]
deleteCounters: don't send values to graphite for inactive counters, as opposed to sending 0 [default: false]
prefixStats: prefix to use for the statsd statistics data for this running instance of statsd [default: statsd]
applies to both legacy and new namespacing
keyNameSanitize: sanitize all stat names on ingress [default: true]
If disabled, it is up to the backends to sanitize keynames
as appropriate per their storage requirements.
calculatedTimerMetrics: List of timer metrics that will be sent. Default will send all metrics.
To filter on percents and top percents: append '_percent' to the metric name.
Example: calculatedTimerMetrics: ['count', 'median', 'upper_percent', 'histogram']
console:
prettyprint: whether to prettyprint the console backend
output [true or false, default: true]
log: log settings [object, default: undefined]
backend: where to log: stdout or syslog [string, default: stdout]
application: name of the application for syslog [string, default: statsd]
level: log level for [node-]syslog [string, default: LOG_INFO]
graphite:
legacyNamespace: use the legacy namespace [default: true]
globalPrefix: global prefix to use for sending stats to graphite [default: "stats"]
prefixCounter: graphite prefix for counter metrics [default: "counters"]
prefixTimer: graphite prefix for timer metrics [default: "timers"]
prefixGauge: graphite prefix for gauge metrics [default: "gauges"]
prefixSet: graphite prefix for set metrics [default: "sets"]
globalSuffix: global suffix to use for sending stats to graphite [default: ""]
This is particularly useful for sending per host stats by
settings this value to: require('os').hostname().split('.')[0]
repeater: an array of hashes of the for host: and port:
that details other statsd servers to which the received
packets should be "repeated" (duplicated to).
e.g. [ { host: '10.10.10.10', port: 8125 },
{ host: 'observer', port: 88125 } ]
repeaterProtocol: whether to use udp4, udp6, or tcp for repeaters.
["udp4," "udp6", or "tcp" default: "udp4"]
histogram: for timers, an array of mappings of strings (to match metrics) and
corresponding ordered non-inclusive upper limits of bins.
For all matching metrics, histograms are maintained over
time by writing the frequencies for all bins.
'inf' means infinity. A lower limit of 0 is assumed.
default: [], meaning no histograms for any timer.
First match wins. examples:
* histogram to only track render durations, with unequal
class intervals and catchall for outliers:
[ { metric: 'render', bins: [ 0.01, 0.1, 1, 10, 'inf'] } ]
* histogram for all timers except 'foo' related,
equal class interval and catchall for outliers:
[ { metric: 'foo', bins: [] },
{ metric: '', bins: [ 50, 100, 150, 200, 'inf'] } ]
automaticConfigReload: whether to watch the config file and reload it when it
changes. The default is true. Set this to false to disable.
*/
{
graphitePort: 2003
, graphiteHost: "127.0.0.1"
, port: 8125
, backends: [ "./backends/console", "./backends/graphite", "./backends/repeater" ]
, repeater: [ { host: '127.0.0.1', port: 8125 }, { host:'TaosadapterIp', port: TaosadapterPort } ]
}
#!/bin/bash
sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /root/statsd/config.js
nohup node /root/statsd/stats.js /root/statsd/config.js &
sleep 10
for i in `seq 1 100`;
do
echo "${HOSTNAME}.count${i}:55|c" | nc -w 1 -u 127.0.0.1 8125
done
tail -f /dev/null
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: agent_count"
echo "2nd arg: container_hostname prefix"
echo "3rd arg: TaosadapterIp"
echo "4th arg: TaosadapterPort"
echo "eg: ./run_statsd.sh 1 statsd_agent1 172.26.10.86 6044"
echo "eg: ./run_statsd.sh 2 statsd_agent* 172.26.10.86 6044"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! -n "$3" ]; then
echo "please input 3rd arg"
exit
fi
if [ ! -n "$4" ]; then
echo "please input 4th arg"
exit
fi
if [ $1 -eq 1 ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_statsd:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
for i in `seq 1 $1`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix$i && docker rm $perfix$i
fi
docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_statsd:v1 /bin/bash
done
fi
#docker run -itd --name statsd_agent1 -h statsd_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6044 taosadapter_statsd:v1 /bin/bash
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-06
WORKDIR /root
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex; \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends git python && \
git clone git://github.com/OpenTSDB/tcollector.git && \
apt remove -y git && \
rm -rf /var/lib/apt/lists/*
COPY config.py /root/tcollector/collectors/etc/config.py
COPY entrypoint.sh /entrypoint.sh
ENV TaosadapterIp 127.0.0.1
ENV TaosadapterPort 6049
ENTRYPOINT ["/entrypoint.sh"]
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2010 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
# This 'onload' function will be called by tcollector when it starts up.
# You can put any code here that you want to load inside the tcollector.
# This also gives you a chance to override the options from the command
# line or to add custom sanity checks on their values.
# You can also use this to change the global tags that will be added to
# every single data point. For instance if you have multiple different
# pools or clusters of machines, you might wanna lookup the name of the
# pool or cluster the current host belongs to and add it to the tags.
# Throwing an exception here will cause the tcollector to die before it
# starts doing any work.
# Python files in this directory that don't have an "onload" function
# will be imported by tcollector too, but no function will be called.
# When this file executes, you can assume that its directory is in
# sys.path, so you can import other Python modules from this directory
# or its subdirectories.
import os
import sys
def onload(options, tags):
"""Function called by tcollector when it starts up.
Args:
options: The options as returned by the OptionParser.
tags: A dictionnary that maps tag names to tag values.
"""
pass
def get_defaults():
"""Configuration values to use as defaults in the code
This is called by the OptionParser.
"""
default_cdir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'collectors')
defaults = {
'verbose': False,
'no_tcollector_stats': False,
'evictinterval': 6000,
'dedupinterval': 300,
'deduponlyzero': False,
'allowed_inactivity_time': 600,
'dryrun': False,
'maxtags': 8,
'http_password': False,
'reconnectinterval': 0,
'http_username': False,
'port': TaosadapterPort,
'pidfile': '/var/run/tcollector.pid',
'http': False,
'http_api_path': "api/put",
'tags': [],
'remove_inactive_collectors': False,
'host': 'TaosadapterIp',
'logfile': '/var/log/tcollector.log',
'cdir': default_cdir,
'ssl': False,
'stdin': False,
'daemonize': False,
'hosts': False,
"monitoring_interface": None,
"monitoring_port": 13280,
"namespace_prefix": "",
}
return defaults
#!/bin/bash
sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /root/tcollector/collectors/etc/config.py
/root/tcollector/tcollector start
tail -f /dev/null
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: agent_count"
echo "2nd arg: container_hostname prefix"
echo "3rd arg: TaosadapterIp"
echo "4th arg: TaosadapterPort"
echo "eg: ./run_tcollector.sh 1 tcollector_agent1 172.26.10.86 6049"
echo "eg: ./run_tcollector.sh 2 tcollector_agent* 172.26.10.86 6049"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! -n "$3" ]; then
echo "please input 3rd arg"
exit
fi
if [ ! -n "$4" ]; then
echo "please input 4th arg"
exit
fi
if [ $1 -eq 1 ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_tcollector:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
for i in `seq 1 $1`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix${i} && docker rm $perfix${i}
fi
docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_tcollector:v1 /bin/bash
done
fi
#docker run -itd --name tcollector_agent1 -h tcollector_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6049 taosadapter_tcollector:v1 /bin/bash
FROM ubuntu:20.04
ENV REFRESHED_AT 2021-12-06
ARG DEBIAN_FRONTEND=noninteractive
WORKDIR /root
RUN set -ex; \
apt update -y --fix-missing && \
apt install -y gnupg curl systemctl
RUN set -ex; \
curl -fsSL https://repos.influxdata.com/influxdb.key | apt-key add - && \
. /etc/lsb-release && \
echo 'deb https://repos.influxdata.com/ubuntu focal stable' > /etc/apt/sources.list.d/influxdb.list && \
apt update -y --fix-missing && \
apt-get install -y --no-install-recommends telegraf && \
apt remove -y gnupg curl && \
rm -rf /var/lib/apt/lists/*
COPY entrypoint.sh /entrypoint.sh
COPY telegraf.conf /etc/telegraf/telegraf.conf
ENV TelegrafInterval 1s
ENV TaosadapterIp 127.0.0.1
ENV TaosadapterPort 6048
ENV Dbname telegraf
ENTRYPOINT ["/entrypoint.sh"]
#!/bin/bash
sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;s/TelegrafInterval/'$TelegrafInterval'/g;s/Dbname/'$Dbname'/g;' /etc/telegraf/telegraf.conf
systemctl restart telegraf
tail -f /dev/null
#!/bin/bash
case "$1" in
-h|--help)
echo "Usage:"
echo "1st arg: agent_count"
echo "2nd arg: container_hostname prefix"
echo "3rd arg: TaosadapterIp"
echo "4th arg: TaosadapterPort"
echo "5th arg: TelegrafInterval"
echo "6th arg: Dbname"
echo "eg: ./run_telegraf.sh 1 telegraf_agent1 172.26.10.86 6041 1s telegraf"
echo "eg: ./run_telegraf.sh 2 telegraf_agent* 172.26.10.86 6041 1s telegraf"
exit 0
;;
esac
if [ $1 == "rm" ]; then
docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm
exit
fi
if [ ! -n "$1" ]; then
echo "please input 1st arg"
exit
fi
if [ ! -n "$2" ]; then
echo "please input 2nd arg"
exit
fi
if [ ! -n "$3" ]; then
echo "please input 3rd arg"
exit
fi
if [ ! -n "$4" ]; then
echo "please input 4th arg"
exit
fi
if [ ! -n "$5" ]; then
echo "please input 5th arg"
exit
fi
if [ ! -n "$6" ]; then
echo "please input 6th arg"
exit
fi
if [ $1 -eq 1 ];then
docker ps | grep $2
if [ $? -eq 0 ];then
docker stop $2 && docker rm $2
fi
docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e TelegrafInterval=$5 taosadapter_telegraf:v1 /bin/bash
else
perfix=`echo $2 | cut -d '*' -f 1`
for i in `seq 1 $1`;
do
docker ps | grep $perfix$i
if [ $? -eq 0 ];then
docker stop $perfix$i && docker rm $perfix$i
fi
docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e TelegrafInterval=$5 -e Dbname=$6 taosadapter_telegraf:v1 /bin/bash
done
fi
#docker run -itd --name telegraf_agent1 -h telegraf_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6041 -e TelegrafInterval=1s -e Dbname=telegraf taosadapter_telegraf:v1 /bin/bash
"insert into db1.stb11 values (nahr,);"
"insert into db1.stb22 (now,1,1);"
"insert into db1.stb13 (now,1,1) (now, 1,1, 2);"
"insert into db1.tb1 (now, 1,2 ,3 ,4);"
"insert into db1.stb16 values (now, null);"
"insert into db1.stb15 values (1614530008000,a, b, %$, d, e, f, g);"
"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000"
"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068"
"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000"
stb15 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC
stb15 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
stb15 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
"{""metric"": ""stb16"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb16"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb16"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"insert into db2.stb21 values (nahr,);"
"insert into db2.stb32 (now,1,1);"
"insert into db2.stb23 (now,1,1) (now, 1,1, 2);"
"insert into db2.tb1 (now, 1,2 ,3 ,4);"
"insert into db2.stb26 values (now, null);"
"insert into db2.stb25 values (1614530008000,a, b, %$, d, e, f, g);"
"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000"
"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068"
"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000"
stb25 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC
stb25 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
stb25 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
"{""metric"": ""stb26"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb26"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb26"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"insert into db3.stb31 values (nahr,);"
"insert into db3.stb12 (now,1,1);"
"insert into db3.stb33 (now,1,1) (now, 1,1, 2);"
"insert into db3.tb1 (now, 1,2 ,3 ,4);"
"insert into db3.stb36 values (now, null);"
"insert into db3.stb35 values (1614530008000,a, b, %$, d, e, f, g);"
"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000"
"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068"
"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000"
stb35 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC
stb35 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
stb35 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC
"{""metric"": ""stb36"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb36"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"{""metric"": ""stb36"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}"
"insert into long_db1.stb11 values (nahr,);"
"insert into long_db1.stb22 (now,1,1);"
"insert into long_db1.stb13 (now,1,1) (now, 1,1, 2);"
"insert into long_db1.tb1 (now, 1,2 ,3 ,4);"
"insert into long_db1.stb16 values (now, null);"
"insert into long_db1.stb15 values (1614530008000,a, b, %$, d, e, f, g);"
"insert into long_db2.stb21 values (nahr,);"
"insert into long_db2.stb32 (now,1,1);"
"insert into long_db2.stb23 (now,1,1) (now, 1,1, 2);"
"insert into long_db2.tb1 (now, 1,2 ,3 ,4);"
"insert into long_db2.stb26 values (now, null);"
"insert into long_db2.stb25 values (1614530008000,a, b, %$, d, e, f, g);"
"insert into long_db3.stb31 values (nahr,);"
"insert into long_db3.stb12 (now,1,1);"
"insert into long_db3.stb33 (now,1,1) (now, 1,1, 2);"
"insert into long_db3.tb1 (now, 1,2 ,3 ,4);"
"insert into long_db3.stb36 values (now, null);"
"insert into long_db3.stb35 values (1614530008000,a, b, %$, d, e, f, g);"
\ No newline at end of file
select * from db0.stb1 limit 1000;
select * from db0.stb3 limit 10000;
select * from db0.stb5 limit 100000;
"select avg(c1), max(c2), count(c3), sum(c4) from db0.stb1 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-08 00:00:00"" interval (1h);"
"select avg(c1), max(c2), count(c3), sum(c4) from db0.stb1 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-08 00:00:00"" group by c7;"
show db0.stables;
select count(tbname) from db1.tb1;
select count(*) from db2.tb2;
select * from db3.tb3 limit 10;
select * from db2.stb25 limit 10000;
select * from db3.stb31 limit 100000;
select * from db1.stb16 limit 100000;
"select avg(c1), max(c2), count(c3), sum(c4) from db3.stb31 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" group by c7;"
"select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (1s);"
"select avg(c1), max(c2), count(c3), sum(c4) from db1.stb13 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (10s);"
select count(tbname) from long_db1.tb1;
select count(*) from long_db2.tb2;
select * from long_db3.tb3 limit 10;
select * from long_db2.stb25 limit 10000;
select * from long_db3.stb31 limit 100000;
select * from long_db1.stb16 limit 100000;
"select avg(c1), max(c2), count(c3), sum(c4) from long_db3.stb31 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" group by c7;"
"select avg(c1), max(c2), count(c3), sum(c4) from long_db2.stb23 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (1s);"
"select avg(c1), max(c2), count(c3), sum(c4) from long_db1.stb13 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (10s);"
show taosadapter1_collectd.stables;
select count(*) from taosadapter1_collectd.`cpu.6.cpu.interrupt`;
select last(*) from taosadapter2_collectd.`cpu.2.cpu.interrupt`;
select * from taosadapter3_collectd.`cpu.2.cpu.system` limit 100;
select count(*) from taosadapter1_telegraf.mem;
select last(*) from taosadapter2_telegraf.cpu;
select * from taosadapter3_telegraf.kernel;
select count(*) from taosadapter1_tcollector.`net.stat.tcp.retransmit`;
select last(*) from taosadapter2_tcollector.`proc.meminfo.shmem`;
select * from taosadapter3_tcollector.`sys.numa.allocation`;
select count(*) from taosadapter1_icinga2.`icinga.host.rta_min`;
select last(*) from taosadapter2_icinga2.`icinga.host.acknowledgement`;
select * from taosadapter3_icinga2.`icinga.host.rta_crit`;
select count(*) from taosadapter1_node_exporter.`node_time_seconds`;
select last(*) from taosadapter2_node_exporter.`go_memstats_next_gc_bytes`;
select * from taosadapter3_node_exporter.`node_sockstat_TCP_mem`;
select count(*) from taosadapter2_statsd.taosadapter2_statsd_agent_count50;
select c199 from db0.stb2;
select null from db1.stb11;
select * from db2.tb0;
select blank from db3.stb31 limit 100000;
select null from long_db1.stb11;
select * from long_db2.tb0;
select blank from long_db3.stb31 limit 100000;
select count(*) from taosadapter1_collectd.cpu.6.cpu.interrupt;
select last(*) from taosadapter2_collectd.cpu.2.cpu.interr;
select * from taosadapter3_coll*.`cpu.2.cpu.system` limit 100;
select count(*) from taosadapter1_telegraf.`mem`;
select last(*) from `taosadapter2_telegraf`.cpu;
select * from taos_telegraf.kernel;
select count(*) from `taosadapter1_tcollector`.`net.stat.tcp.retransmit`;
select last(*) from taosadapter2_`tcollector`.`proc.meminfo.shmem`;
select * from taosadapter3_tcollector.sys.numa.`allocation`;
select count(*) from taosadapter1_icinga2.:icinga.host.rta_min;
"select last(*) from taosadapter2_icinga2.""""""`icinga.host.acknowledgement`;"
select * from taosadapter3_icinga2.```icinga.host.rta_crit`;
select count(*) from taosadapter1_node_exporter..`node_time_seconds`;
select last(*) from ..taosadapter2_node_exporter.`go_memstats_next_gc_bytes`;
select * from taosa%%dapter3___node_exporter.`node_sockstat_TCP_mem`;
select count(*) from taosadapter2_statsd%.taosadapter2_statsd_agent_count50;
\ No newline at end of file
#!/bin/bash
jmeter -n -t createStaticData.jmx >> createStaticData.log
jmeter -n -t longInsertData.jmx >> longInsertData.log
jmeter -n -t error_insert.jmx >> error_insert.log
jmeter -n -t query.jmx >> query.log
while true
do
jmeter -n -t shortInsertData.jmx >> ./shortInsertData.log
done
#!/bin/bash
while true
do
jmeter -n -t shortInsertData.jmx >> ./shortInsertData.log
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册