未验证 提交 3adbf18b 编写于 作者: Z zhuwenxing 提交者: GitHub

[test]Test rolling update for operator (#22268)

Signed-off-by: Nzhuwenxing <wenxing.zhu@zilliz.com>
上级 8487f53d
# Exit immediately for non zero status
set -e
release=${1:-"milvus-chaos"}
ns=${2:-"chaos-testing"}
kubectl delete milvus ${release} -n=${ns} || echo "delete milvus ${release} failed"
# uninstall helm release
helm_release_list=('minio' 'etcd' 'kafka' 'pulsar')
for helm_release in ${helm_release_list[*]}; do
echo "unistall helm release ${release}-${helm_release}"
helm uninstall ${release}-${helm_release} -n=${ns} || echo "delete helm release ${release}-${helm_release} failed"
done
# delete pvc for storage
pvc_list=('minio')
for pvc in ${pvc_list[*]}; do
echo "delete pvc with label release=${release}-${pvc}"
kubectl delete pvc -l release=${release}-${pvc} -n=${ns} || echo "delete pvc with label release=${release}-${pvc} failed"
done
# delete pvc of etcd and message queue
pvc_list=('etcd' 'kafka' 'pulsar')
for pvc in ${pvc_list[*]}; do
echo "delete pvc with label app.kubernetes.io/instance=${release}-${pvc}"
kubectl delete pvc -l app.kubernetes.io/instance=${release}-${pvc} -n=${ns} || echo "delete pvc with label release=${release}-${pvc} failed"
done
# This is a sample to deploy a milvus cluster using pulsar with minimum cost of resources.
apiVersion: milvus.io/v1beta1
kind: Milvus
metadata:
name: kafka-demo
namespace: chaos-testing
labels:
app: milvus
spec:
mode: standalone
config:
quotaAndLimits:
enable: false
milvus:
log:
level: debug
components:
enableRollingUpdate: true
imageUpdateMode: rollingUpgrade
image: milvusdb/milvus:2.2.0-20230208-2e4d64ec
disableMetric: false
dataNode:
replicas: 2
indexNode:
replicas: 2
queryNode:
replicas: 2
mixCoord:
replicas: 1
dependencies:
msgStreamType: kafka
etcd:
inCluster:
deletionPolicy: Retain
pvcDeletion: false
values:
replicaCount: 3
kafka:
inCluster:
deletionPolicy: Retain
pvcDeletion: false
values:
replicaCount: 3
defaultReplicationFactor: 2
metrics:
kafka:
enabled: true
serviceMonitor:
enabled: true
jmx:
enabled: true
pulsar:
inCluster:
deletionPolicy: Retain
pvcDeletion: false
values:
components:
autorecovery: false
functions: false
toolset: false
pulsar_manager: false
monitoring:
prometheus: true
grafana: false
node_exporter: false
alert_manager: false
proxy:
replicaCount: 1
resources:
requests:
cpu: 0.01
memory: 256Mi
configData:
PULSAR_MEM: >
-Xms256m -Xmx256m
PULSAR_GC: >
-XX:MaxDirectMemorySize=256m
bookkeeper:
replicaCount: 2
resources:
requests:
cpu: 0.01
memory: 256Mi
configData:
PULSAR_MEM: >
-Xms256m
-Xmx256m
-XX:MaxDirectMemorySize=256m
PULSAR_GC: >
-Dio.netty.leakDetectionLevel=disabled
-Dio.netty.recycler.linkCapacity=1024
-XX:+UseG1GC -XX:MaxGCPauseMillis=10
-XX:+ParallelRefProcEnabled
-XX:+UnlockExperimentalVMOptions
-XX:+DoEscapeAnalysis
-XX:ParallelGCThreads=32
-XX:ConcGCThreads=32
-XX:G1NewSizePercent=50
-XX:+DisableExplicitGC
-XX:-ResizePLAB
-XX:+ExitOnOutOfMemoryError
-XX:+PerfDisableSharedMem
-XX:+PrintGCDetails
zookeeper:
replicaCount: 1
resources:
requests:
cpu: 0.01
memory: 256Mi
configData:
PULSAR_MEM: >
-Xms256m
-Xmx256m
PULSAR_GC: >
-Dcom.sun.management.jmxremote
-Djute.maxbuffer=10485760
-XX:+ParallelRefProcEnabled
-XX:+UnlockExperimentalVMOptions
-XX:+DoEscapeAnalysis -XX:+DisableExplicitGC
-XX:+PerfDisableSharedMem
-Dzookeeper.forceSync=no
broker:
replicaCount: 1
resources:
requests:
cpu: 0.01
memory: 256Mi
configData:
PULSAR_MEM: >
-Xms256m
-Xmx256m
PULSAR_GC: >
-XX:MaxDirectMemorySize=256m
-Dio.netty.leakDetectionLevel=disabled
-Dio.netty.recycler.linkCapacity=1024
-XX:+ParallelRefProcEnabled
-XX:+UnlockExperimentalVMOptions
-XX:+DoEscapeAnalysis
-XX:ParallelGCThreads=32
-XX:ConcGCThreads=32
-XX:G1NewSizePercent=50
-XX:+DisableExplicitGC
-XX:-ResizePLAB
-XX:+ExitOnOutOfMemoryError
storage:
inCluster:
deletionPolicy: Retain
pvcDeletion: false
values:
mode: distributed
\ No newline at end of file
......@@ -3,8 +3,9 @@
# Exit immediately for non zero status
set -e
ns_name=$1
instance_name=$2
instance_name=$1
ns_name=${2:-"chaos-testing"}
log_dir=${3:-"k8s_logs"}
#show proxy pod log
......@@ -32,9 +33,10 @@ echo "check session done"
array_1=($(kubectl get pod -n ${ns_name} -l "app.kubernetes.io/instance=${instance_name}"| awk 'NR == 1 {next} {print $1}'))
array_2=($(kubectl get pod -n ${ns_name} -l "app.kubernetes.io/instance=${instance_name}-etcd"| awk 'NR == 1 {next} {print $1}'))
array_3=($(kubectl get pod -n ${ns_name} -l "release=${instance_name}-minio"| awk 'NR == 1 {next} {print $1}'))
array_4=($(kubectl get pod -n ${ns_name} -l "release=${instance_name}-pulsar"| awk 'NR == 1 {next} {print $1}'))
array_4=($(kubectl get pod -n ${ns_name} -l "app.kubernetes.io/instance=${instance_name}-pulsar"| awk 'NR == 1 {next} {print $1}'))
array_5=($(kubectl get pod -n ${ns_name} -l "app.kubernetes.io/instance=${instance_name}-kafka"| awk 'NR == 1 {next} {print $1}'))
array=(${array_1[@]} ${array_2[@]} ${array_3[@]} ${array_4[@]})
array=(${array_1[@]} ${array_2[@]} ${array_3[@]} ${array_4[@]} ${array_5[@]})
echo ${array[@]}
if [ ! -d $log_dir/pod_log ] || [ ! -d $log_dir/pod_describe ];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册