未验证 提交 27af87e4 编写于 作者: Q quicksilver 提交者: GitHub

Used for local development or CI in Kubernetes in Docker (#5061)

* Used for local development or CI in Kubernetes in Docker
上级 c64f3a2a
......@@ -59,7 +59,7 @@ jobs:
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
hadolint_ignore: DL3008
hadolint_ignore: DL3008 SC1091 DL3013 DL3003
- name: Code Check
env:
CHECK_BUILDER: "1"
......
......@@ -6,12 +6,12 @@ on:
push:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/env/**'
- 'build/docker/builder/**'
- '.github/workflows/publish-builder.yaml'
pull_request:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/env/**'
- 'build/docker/builder/**'
- '.github/workflows/publish-builder.yaml'
jobs:
......@@ -33,7 +33,7 @@ jobs:
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
hadolint_ignore: DL3008
hadolint_ignore: DL3008 SC1091 DL3013 DL3003
- name: Get version from system time after release step
id: extracter
run: echo "::set-output name=version::$(date +%Y%m%d-%H%M%S)"
......
......@@ -6,13 +6,13 @@ on:
push:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/test/Dockerfile'
- 'tests/docker/Dockerfile'
- 'tests/python_test/requirements.txt'
- '.github/workflows/publish-test-images.yaml'
pull_request:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/test/Dockerfile'
- 'tests/docker/Dockerfile'
- 'tests/python_test/requirements.txt'
- '.github/workflows/publish-test-images.yaml'
......@@ -29,7 +29,7 @@ jobs:
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check # Default is github-pr-check
hadolint_ignore: DL3008
hadolint_ignore: DL3008 SC1091 DL3013 DL3003
- name: Get version from system time after release step
id: extracter
run: |
......@@ -37,10 +37,11 @@ jobs:
echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
- name: Docker Build
shell: bash
working-directory: tests/python_test
working-directory: tests/docker
run: |
docker build -t milvusdb/pytest:${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }} .
docker tag milvusdb/pytest:${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }} milvusdb/pytest:latest
docker-compose pull --ignore-pull-failures pytest
TAG=${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }} docker-compose build pytest
TAG=latest docker-compose build pytest
- name: Docker Push
if: success() && github.event_name == 'push' && github.repository == 'milvus-io/milvus'
continue-on-error: true
......@@ -48,6 +49,6 @@ jobs:
run: |
docker login -u ${{ secrets.DOCKERHUB_USER }} \
-p ${{ secrets.DOCKERHUB_TOKEN }}
docker push milvusdb/pytest:${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }}
docker push milvusdb/pytest:latest
echo "Push milvusdb/pytest:${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }} Succeeded"
TAG=${{ steps.extracter.outputs.version }}-${{ steps.extracter.outputs.sha_short }} docker-compose push pytest
TAG=latest docker-compose push pytest
echo "Push pytest image Succeeded"
......@@ -9,7 +9,7 @@ pushd "${toplevel}"
if [ "${1-}" = "pull" ]; then
docker-compose pull --ignore-pull-failures ubuntu
docker-compose pull --ignore-pull-failures gdbserver
# docker-compose pull --ignore-pull-failures gdbserver
exit 0
fi
......
apiVersion: v1
kind: Namespace
metadata:
name: logging
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cron-logging
namespace: logging
labels:
k8s-app: cron-logging
spec:
selector:
matchLabels:
k8s-app: cron-logging
template:
metadata:
labels:
k8s-app: cron-logging
spec:
containers:
- name: rsync
image: "eeacms/rsync:latest"
imagePullPolicy: IfNotPresent
args:
- /bin/sh
- -c
- >
mkdir -p /var/log/history/;
while true;
do
rsync -ax /var/log/pods/ /var/log/history/;
sleep 1;
done
volumeMounts:
- mountPath: /var/log
name: varlog
volumes:
- hostPath:
path: /var/log
name: varlog
\ No newline at end of file
# from https://github.com/metallb/metallb/tree/v0.9.3/manifests namespace.yaml and metallb.yaml
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- update
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: speaker
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: METALLB_ML_LABELS
value: "app=metallb,component=speaker"
- name: METALLB_ML_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: memberlist
key: secretkey
image: metallb/speaker:v0.9.3
imagePullPolicy: Always
name: speaker
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 2
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
image: metallb/controller:v0.9.3
imagePullPolicy: Always
name: controller
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
beta.kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.2
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
[
{
"kind": "Kubernetes",
"clusterName": "primary",
"podSubnet": "10.10.0.0/16",
"svcSubnet": "10.255.10.0/24",
"network": "network-1"
},
{
"kind": "Kubernetes",
"clusterName": "remote",
"podSubnet": "10.20.0.0/16",
"svcSubnet": "10.255.20.0/24",
"network": "network-1",
"primaryClusterName": "primary",
"meta": {
"fakeVM": false
}
},
{
"kind": "Kubernetes",
"clusterName": "cross-network-primary",
"podSubnet": "10.30.0.0/16",
"svcSubnet": "10.255.30.0/24",
"network": "network-2"
}
]
# This configs KinD to spin up a k8s cluster with trustworthy jwt (Service Account Token Volume Projection) feature.
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
ipFamily: "ipv4"
kubeProxyMode: "iptables"
nodes:
- role: control-plane
- role: worker
- role: worker
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
metadata:
name: config
controllerManager:
extraArgs:
"v": "4"
scheduler:
extraArgs:
"v": "4"
etcd:
local:
# Run etcd in a tmpfs (in RAM) for performance improvements
dataDir: /tmp/kind-cluster-etcd
apiServer:
extraArgs:
"v": "4"
"service-account-issuer": "kubernetes.default.svc"
"service-account-signing-key-file": "/etc/kubernetes/pki/sa.key"
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
endpoint = ["http://kind-registry:5000"]
......@@ -60,7 +60,7 @@ RUN mkdir -p /home/milvus/.vscode-server/extensions \
/home/milvus/.vscode-server-insiders/extensions \
&& chmod -R 777 /home/milvus
COPY --chown=0:0 build/docker/env/entrypoint.sh /
COPY --chown=0:0 build/docker/builder/entrypoint.sh /
RUN wget -qO- "https://github.com/benesch/autouseradd/releases/download/1.2.0/autouseradd-1.2.0-amd64.tar.gz" | tar xz -C / --strip-components 1
......
FROM debian:buster
# arg that specifies the image name (for debugging)
ARG IMAGE_ARG
# arg that specifies the go version to install
ARG GO_VERSION
# add envs:
# - so we can debug with the image name:tag
# - adding gsutil etc. to path (where we will install them)
# - disabling prompts when installing gsutil etc.
# - hinting that we are in a docker container
ENV KRTE_IMAGE=${IMAGE_ARG} \
GOPATH=/home/go \
PATH=/home/go/bin:/usr/local/go/bin:${PATH} \
KIND_VERSION=0.10.0 \
DOCKER_COMPOSE_VERSION=1.29.1 \
CONTAINER=docker
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# Install tools needed to:
# - install docker
# - build kind (dockerized)
# - build kubernetes (dockerized, or with bazel)
#
# TODO: the `sed` is a bit of a hack, look into alternatives.
# Why this exists: `docker service start` on debian runs a `cgroupfs_mount` method,
# We're already inside docker though so we can be sure these are already mounted.
# Trying to remount these makes for a very noisy error block in the beginning of
# the pod logs, so we just comment out the call to it... :shrug:
RUN echo "Installing Packages ..." \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
apt-transport-https \
build-essential \
ca-certificates \
curl \
file \
git \
gnupg2 \
kmod \
lsb-release \
mercurial \
pkg-config \
procps \
python3 \
python3-dev \
python3-pip \
python3-setuptools \
rsync \
software-properties-common \
unzip \
jq \
&& python3 -m pip install --no-cache-dir --upgrade pip \
&& rm -rf /var/lib/apt/lists/* \
&& echo "Installing Go ..." \
&& export GO_TARBALL="go${GO_VERSION}.linux-amd64.tar.gz"\
&& curl -fsSL "https://golang.org/dl/${GO_TARBALL}" --output "${GO_TARBALL}" \
&& tar xzf "${GO_TARBALL}" -C /usr/local \
&& rm "${GO_TARBALL}"\
&& mkdir -p "${GOPATH}/bin" \
&& echo "Installing kubectl, helm ..." \
&& curl -fsSL "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" --output /usr/local/bin/kubectl \
&& chmod 755 /usr/local/bin/kubectl \
&& ln -s /usr/local/bin/kubectl /usr/bin/kubectl \
&& curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 \
&& chmod 700 get_helm.sh \
&& ./get_helm.sh \
&& rm ./get_helm.sh \
&& echo "Installing Docker ..." \
&& curl -fsSL "https://download.docker.com/linux/$(. /etc/os-release; echo "$ID")/gpg" | apt-key add - \
&& add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \
$(lsb_release -cs) stable" \
&& apt-get update \
&& apt-get install -y --no-install-recommends docker-ce \
&& rm -rf /var/lib/apt/lists/* \
&& sed -i 's/cgroupfs_mount$/#cgroupfs_mount\n/' /etc/init.d/docker \
&& echo "Installing Docker Compose ..." \
&& curl -fsSL "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
&& chmod +x /usr/local/bin/docker-compose \
&& ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose \
&& echo "Installing KinD ..." \
&& curl -fsSL -o /usr/local/bin/kind "https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-linux-amd64" \
&& chmod +x /usr/local/bin/kind \
&& echo "Ensuring Legacy Iptables ..." \
&& update-alternatives --set iptables /usr/sbin/iptables-legacy \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
# copy in image utility scripts
COPY wrapper.sh /usr/local/bin/
# entrypoint is our wrapper script, in Prow you will need to explicitly re-specify this
ENTRYPOINT ["wrapper.sh"]
# volume for docker in docker, use an emptyDir in Prow
VOLUME ["/var/lib/docker"]
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# wrapper.sh handles setting up things before / after the test command $@
#
# usage: wrapper.sh my-test-command [my-test-args]
#
# Things wrapper.sh handles:
# - starting / stopping docker-in-docker
# -- configuring the docker daemon for IPv6
# - activating GCP service account credentials
# - ensuring GOPATH/bin is in PATH
#
# After handling these things / before cleanup, my-test-command will be invoked,
# and the exit code of my-test-command will be preserved by wrapper.sh
set -o errexit
set -o pipefail
set -o nounset
>&2 echo "wrapper.sh] [INFO] Wrapping Test Command: \`$*\`"
>&2 echo "wrapper.sh] [INFO] Running in: ${KRTE_IMAGE}"
>&2 echo "wrapper.sh] [INFO] See: https://github.com/kubernetes/test-infra/blob/master/images/krte/wrapper.sh"
printf '%0.s=' {1..80} >&2; echo >&2
>&2 echo "wrapper.sh] [SETUP] Performing pre-test setup ..."
cleanup(){
if [[ "${DOCKER_IN_DOCKER_ENABLED:-false}" == "true" ]]; then
>&2 echo "wrapper.sh] [CLEANUP] Cleaning up after Docker in Docker ..."
docker ps -aq | xargs -r docker rm -f || true
service docker stop || true
>&2 echo "wrapper.sh] [CLEANUP] Done cleaning up after Docker in Docker."
fi
}
early_exit_handler() {
>&2 echo "wrapper.sh] [EARLY EXIT] Interrupted, entering handler ..."
if [ -n "${EXIT_VALUE:-}" ]; then
>&2 echo "Original exit code was ${EXIT_VALUE}, not preserving due to interrupt signal"
fi
cleanup
>&2 echo "wrapper.sh] [EARLY EXIT] Completed handler ..."
exit 1
}
trap early_exit_handler TERM INT
# optionally enable ipv6 docker
export DOCKER_IN_DOCKER_IPV6_ENABLED=${DOCKER_IN_DOCKER_IPV6_ENABLED:-false}
if [[ "${DOCKER_IN_DOCKER_IPV6_ENABLED}" == "true" ]]; then
>&2 echo "wrapper.sh] [SETUP] Enabling IPv6 in Docker config ..."
# enable ipv6
sysctl net.ipv6.conf.all.disable_ipv6=0
sysctl net.ipv6.conf.all.forwarding=1
# enable ipv6 iptables
modprobe -v ip6table_nat
>&2 echo "wrapper.sh] [SETUP] Done enabling IPv6 in Docker config."
fi
# optionally enable iptables-nft
export DOCKER_IN_DOCKER_NFT_ENABLED=${DOCKER_IN_DOCKER_NFT_ENABLED:-false}
if [[ "${DOCKER_IN_DOCKER_NFT_ENABLED}" == "true" ]]; then
>&2 echo "wrapper.sh] [SETUP] Enabling iptables-nft ..."
# enable iptables-nft
update-alternatives --set iptables /usr/sbin/iptables-nft
update-alternatives --set ip6tables /usr/sbin/ip6tables-nft
# enable nft iptables module
modprobe -v nf_tables
>&2 echo "wrapper.sh] [SETUP] Done enabling iptables-nft by default."
fi
# Check if the job has opted-in to docker-in-docker
export DOCKER_IN_DOCKER_ENABLED=${DOCKER_IN_DOCKER_ENABLED:-false}
if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then
>&2 echo "wrapper.sh] [SETUP] Docker in Docker enabled, initializing ..."
# If we have opted in to docker in docker, start the docker daemon,
service docker start
# the service can be started but the docker socket not ready, wait for ready
WAIT_N=0
while true; do
# docker ps -q should only work if the daemon is ready
docker ps -q > /dev/null 2>&1 && break
if [[ ${WAIT_N} -lt 5 ]]; then
WAIT_N=$((WAIT_N+1))
echo "wrapper.sh] [SETUP] Waiting for Docker to be ready, sleeping for ${WAIT_N} seconds ..."
sleep ${WAIT_N}
else
echo "wrapper.sh] [SETUP] Reached maximum attempts, not waiting any longer ..."
break
fi
done
echo "wrapper.sh] [SETUP] Done setting up Docker in Docker."
fi
# add $GOPATH/bin to $PATH
export GOPATH="${GOPATH:-${HOME}/go}"
export PATH="${GOPATH}/bin:${PATH}"
mkdir -p "${GOPATH}/bin"
# Authenticate gcloud, allow failures
if [[ -n "${GOOGLE_APPLICATION_CREDENTIALS:-}" ]]; then
>&2 echo "wrapper.sh] activating service account from GOOGLE_APPLICATION_CREDENTIALS ..."
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" || true
fi
if git rev-parse --is-inside-work-tree >/dev/null; then
>&2 echo "wrapper.sh] [SETUP] Setting SOURCE_DATE_EPOCH for build reproducibility ..."
# Use a reproducible build date based on the most recent git commit timestamp.
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
export SOURCE_DATE_EPOCH
>&2 echo "wrapper.sh] [SETUP] exported SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH}"
fi
# actually run the user supplied command
printf '%0.s=' {1..80}; echo
>&2 echo "wrapper.sh] [TEST] Running Test Command: \`$*\` ..."
set +o errexit
"$@"
EXIT_VALUE=$?
set -o errexit
>&2 echo "wrapper.sh] [TEST] Test Command exit code: ${EXIT_VALUE}"
# cleanup
cleanup
# preserve exit value from user supplied command
printf '%0.s=' {1..80} >&2; echo >&2
>&2 echo "wrapper.sh] Exiting ${EXIT_VALUE}"
exit ${EXIT_VALUE}
FROM ubuntu:bionic-20200921
# pipefail is enabled for proper error detection in the `wget | apt-key add`
# step
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates gnupg2 && \
g++ gcc gfortran git make && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/* && \
wget https://github.com/xianyi/OpenBLAS/archive/v0.3.9.tar.gz && \
tar zxvf v0.3.9.tar.gz && cd OpenBLAS-0.3.9 && \
make TARGET=CORE2 DYNAMIC_ARCH=1 DYNAMIC_OLDER=1 USE_THREAD=0 USE_OPENMP=0 FC=gfortran CC=gcc COMMON_OPT="-O3 -g -fPIC" FCOMMON_OPT="-O3 -g -fPIC -frecursive" NMAX="NUM_THREADS=128" LIBPREFIX="libopenblas" LAPACKE="NO_LAPACKE=1" INTERFACE64=0 NO_STATIC=1 && \
make -j4 PREFIX=/usr NO_STATIC=1 install && \
cd .. && rm -rf OpenBLAS-0.3.9 && rm v0.3.9.tar.gz
#!/bin/bash
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
# WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY
#
# The original version of this file is located in the https://github.com/istio/common-files repo.
# If you're looking at this file in a different repo and want to make a change, please go to the
# common-files repo, make the change there and check it in. Then come back to this repo and run
# "make update-common".
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -x
# The purpose of this file is to unify prow/lib.sh in both istio and istio.io
# repos to avoid code duplication.
####################################################################
################# COMMON SECTION ###############################
####################################################################
# DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s)
DEFAULT_KIND_IMAGE="kindest/node:v1.20.2"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
ROOT="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
UNAME="$(uname -s)"
case "${UNAME}" in
Linux*) MACHINE=Linux;;
Darwin*) MACHINE=Mac;;
CYGWIN*) MACHINE=Cygwin;;
MINGW*) MACHINE=MinGw;;
*) MACHINE="UNKNOWN:${UNAME}"
esac
# load_cluster_topology function reads cluster configuration topology file and
# sets up environment variables used by other functions. So this should be called
# before anything else.
#
# Note: Cluster configuration topology file specifies basic configuration of each
# KinD cluster like its name, pod and service subnets and network_id. If two cluster
# have the same network_id then they belong to the same network and their pods can
# talk to each other directly.
#
# [{ "cluster_name": "cluster1","pod_subnet": "10.10.0.0/16","svc_subnet": "10.255.10.0/24","network_id": "0" },
# { "cluster_name": "cluster2","pod_subnet": "10.20.0.0/16","svc_subnet": "10.255.20.0/24","network_id": "0" },
# { "cluster_name": "cluster3","pod_subnet": "10.30.0.0/16","svc_subnet": "10.255.30.0/24","network_id": "1" }]
function load_cluster_topology() {
CLUSTER_TOPOLOGY_CONFIG_FILE="${1}"
if [[ ! -f "${CLUSTER_TOPOLOGY_CONFIG_FILE}" ]]; then
echo 'cluster topology configuration file is not specified'
exit 1
fi
export CLUSTER_NAMES
export CLUSTER_POD_SUBNETS
export CLUSTER_SVC_SUBNETS
export CLUSTER_NETWORK_ID
KUBE_CLUSTERS=$(jq '.[] | select(.kind == "Kubernetes" or .kind == null)' "${CLUSTER_TOPOLOGY_CONFIG_FILE}")
while read -r value; do
CLUSTER_NAMES+=("$value")
done < <(echo "${KUBE_CLUSTERS}" | jq -r '.cluster_name // .clusterName')
while read -r value; do
CLUSTER_POD_SUBNETS+=("$value")
done < <(echo "${KUBE_CLUSTERS}" | jq -r '.pod_subnet // .podSubnet')
while read -r value; do
CLUSTER_SVC_SUBNETS+=("$value")
done < <(echo "${KUBE_CLUSTERS}" | jq -r '.svc_subnet // .svcSubnet')
while read -r value; do
CLUSTER_NETWORK_ID+=("$value")
done < <(echo "${KUBE_CLUSTERS}" | jq -r '.network_id // .network')
export NUM_CLUSTERS
NUM_CLUSTERS=$(echo "${KUBE_CLUSTERS}" | jq -s 'length')
echo "${CLUSTER_NAMES[@]}"
echo "${CLUSTER_POD_SUBNETS[@]}"
echo "${CLUSTER_SVC_SUBNETS[@]}"
echo "${CLUSTER_NETWORK_ID[@]}"
echo "${NUM_CLUSTERS}"
}
#####################################################################
################### SINGLE-CLUSTER SECTION ######################
#####################################################################
# cleanup_kind_cluster takes a single parameter NAME
# and deletes the KinD cluster with that name
function cleanup_kind_cluster() {
echo "Test exited with exit code $?."
NAME="${1}"
kind export logs --name "${NAME}" "${ARTIFACTS}/kind" -v9 || true
if [[ -z "${SKIP_CLEANUP:-}" ]]; then
echo "Cleaning up kind cluster"
kind delete cluster --name "${NAME}" -v9 || true
docker network rm kind || true
fi
}
# check_default_cluster_yaml checks the presence of default cluster YAML
# It returns 1 if it is not present
function check_default_cluster_yaml() {
if [[ -z "${DEFAULT_CLUSTER_YAML}" ]]; then
echo 'DEFAULT_CLUSTER_YAML file must be specified. Exiting...'
return 1
fi
}
# setup_kind_cluster creates new KinD cluster with given name, image and configuration
# 1. NAME: Name of the Kind cluster (optional)
# 2. IMAGE: Node image used by KinD (optional)
# 3. CONFIG: KinD cluster configuration YAML file. If not specified then DEFAULT_CLUSTER_YAML is used
# 4. NOMETALBINSTALL: Dont install matllb if set true.
# 5. CRON_LOGGER_INSTALL: Install Cron Logger if set true.
# This function returns 0 when everything goes well, or 1 otherwise
# If Kind cluster was already created then it would be cleaned up in case of errors
function setup_kind_cluster() {
NAME="${1:-kind}"
IMAGE="${2:-"${DEFAULT_KIND_IMAGE}"}"
CONFIG="${3:-}"
NOMETALBINSTALL="${4:-false}"
CRON_LOGGER_INSTALL="${5:-true}"
check_default_cluster_yaml
# Delete any previous KinD cluster
echo "Deleting previous KinD cluster with name=${NAME}"
if ! (kind delete cluster --name="${NAME}" -v9) > /dev/null; then
echo "No existing kind cluster with name ${NAME}. Continue..."
else
docker network rm kind || true
fi
# explicitly disable shellcheck since we actually want $NAME to expand now
# shellcheck disable=SC2064
trap "cleanup_kind_cluster ${NAME}" EXIT
# If config not explicitly set, then use defaults
if [[ -z "${CONFIG}" ]]; then
# Kubernetes 1.15+
CONFIG=${DEFAULT_CLUSTER_YAML}
# Configure the cluster IP Family only for default configs
if [ "${IP_FAMILY}" = "ipv6" ]; then
grep 'ipFamily: ipv6' "${CONFIG}" || \
cat <<EOF >> "${CONFIG}"
networking:
ipFamily: ipv6
EOF
fi
fi
# Create KinD cluster
if ! (kind create cluster --name="${NAME}" --config "${CONFIG}" -v9 --retain --image "${IMAGE}" --wait=60s); then
echo "Could not setup KinD environment. Something wrong with KinD setup. Exporting logs."
exit 1
fi
# If metrics server configuration directory is specified then deploy in
# the cluster just created
if [[ -n ${METRICS_SERVER_CONFIG_DIR} ]]; then
kubectl apply -f "${METRICS_SERVER_CONFIG_DIR}"
fi
# Install Metallb if not set to install explicitly
if [[ "${NOMETALBINSTALL}" != "true" ]]; then
install_metallb ""
fi
# Install Cron logger if set to install explicitly'
if [[ "${CRON_LOGGER_INSTALL}" == "true" ]]; then
install_cron_logger ""
fi
}
###############################################################################
#################### MULTICLUSTER SECTION ###############################
###############################################################################
# Cleans up the clusters created by setup_kind_clusters
# It expects CLUSTER_NAMES to be present which means that
# load_cluster_topology must be called before invoking it
function cleanup_kind_clusters() {
echo "Test exited with exit code $?."
for c in "${CLUSTER_NAMES[@]}"; do
cleanup_kind_cluster "${c}"
done
}
# setup_kind_clusters sets up a given number of kind clusters with given topology
# as specified in cluster topology configuration file.
# 1. IMAGE = docker image used as node by KinD
# 2. IP_FAMILY = either ipv4 or ipv6
#
# NOTE: Please call load_cluster_topology before calling this method as it expects
# cluster topology information to be loaded in advance
function setup_kind_clusters() {
IMAGE="${1:-"${DEFAULT_KIND_IMAGE}"}"
KUBECONFIG_DIR="${ARTIFACTS:-$(mktemp -d)}/kubeconfig"
IP_FAMILY="${2:-ipv4}"
check_default_cluster_yaml
# Trap replaces any previous trap's, so we need to explicitly cleanup both clusters here
trap cleanup_kind_clusters EXIT
function deploy_kind() {
IDX="${1}"
CLUSTER_NAME="${CLUSTER_NAMES[$IDX]}"
CLUSTER_POD_SUBNET="${CLUSTER_POD_SUBNETS[$IDX]}"
CLUSTER_SVC_SUBNET="${CLUSTER_SVC_SUBNETS[$IDX]}"
CLUSTER_YAML="${ARTIFACTS}/config-${CLUSTER_NAME}.yaml"
if [ ! -f "${CLUSTER_YAML}" ]; then
cp "${DEFAULT_CLUSTER_YAML}" "${CLUSTER_YAML}"
cat <<EOF >> "${CLUSTER_YAML}"
networking:
podSubnet: ${CLUSTER_POD_SUBNET}
serviceSubnet: ${CLUSTER_SVC_SUBNET}
EOF
fi
CLUSTER_KUBECONFIG="${KUBECONFIG_DIR}/${CLUSTER_NAME}"
# Create the clusters.
KUBECONFIG="${CLUSTER_KUBECONFIG}" setup_kind_cluster "${CLUSTER_NAME}" "${IMAGE}" "${CLUSTER_YAML}" "true" "true"
# Kind currently supports getting a kubeconfig for internal or external usage. To simplify our tests,
# its much simpler if we have a single kubeconfig that can be used internally and externally.
# To do this, we can replace the server with the IP address of the docker container
# https://github.com/kubernetes-sigs/kind/issues/1558 tracks this upstream
CONTAINER_IP=$(docker inspect "${CLUSTER_NAME}-control-plane" --format "{{ .NetworkSettings.Networks.kind.IPAddress }}")
kind get kubeconfig --name "${CLUSTER_NAME}" --internal | \
sed "s/${CLUSTER_NAME}-control-plane/${CONTAINER_IP}/g" > "${CLUSTER_KUBECONFIG}"
# Enable core dumps
docker exec "${CLUSTER_NAME}"-control-plane bash -c "sysctl -w kernel.core_pattern=/var/lib/istio/data/core.proxy && ulimit -c unlimited"
}
# Now deploy the specified number of KinD clusters and
# wait till they are provisioned successfully.
declare -a DEPLOY_KIND_JOBS
for i in "${!CLUSTER_NAMES[@]}"; do
deploy_kind "${i}" & DEPLOY_KIND_JOBS+=("${!}")
done
for pid in "${DEPLOY_KIND_JOBS[@]}"; do
wait "${pid}" || exit 1
done
# Install MetalLB for LoadBalancer support. Must be done synchronously since METALLB_IPS is shared.
# and keep track of the list of Kubeconfig files that will be exported later
export KUBECONFIGS
for CLUSTER_NAME in "${CLUSTER_NAMES[@]}"; do
KUBECONFIG_FILE="${KUBECONFIG_DIR}/${CLUSTER_NAME}"
if [[ ${NUM_CLUSTERS} -gt 1 ]]; then
install_metallb "${KUBECONFIG_FILE}"
# Install Cron logger if set to install explicitly'
if [[ -n ${CRON_LOGGER_INSTALL} ]]; then
install_cron_logger "${KUBECONFIG_FILE}"
fi
fi
KUBECONFIGS+=("${KUBECONFIG_FILE}")
done
ITER_END=$((NUM_CLUSTERS-1))
for i in $(seq 0 "$ITER_END"); do
for j in $(seq 0 "$ITER_END"); do
if [[ "${j}" -gt "${i}" ]]; then
NETWORK_ID_I="${CLUSTER_NETWORK_ID[i]}"
NETWORK_ID_J="${CLUSTER_NETWORK_ID[j]}"
if [[ "$NETWORK_ID_I" == "$NETWORK_ID_J" ]]; then
POD_TO_POD_AND_SERVICE_CONNECTIVITY=1
else
POD_TO_POD_AND_SERVICE_CONNECTIVITY=0
fi
connect_kind_clusters \
"${CLUSTER_NAMES[i]}" "${KUBECONFIGS[i]}" \
"${CLUSTER_NAMES[j]}" "${KUBECONFIGS[j]}" \
"${POD_TO_POD_AND_SERVICE_CONNECTIVITY}"
fi
done
done
}
function connect_kind_clusters() {
C1="${1}"
C1_KUBECONFIG="${2}"
C2="${3}"
C2_KUBECONFIG="${4}"
POD_TO_POD_AND_SERVICE_CONNECTIVITY="${5}"
C1_NODE="${C1}-control-plane"
C2_NODE="${C2}-control-plane"
C1_DOCKER_IP=$(docker inspect -f "{{ .NetworkSettings.Networks.kind.IPAddress }}" "${C1_NODE}")
C2_DOCKER_IP=$(docker inspect -f "{{ .NetworkSettings.Networks.kind.IPAddress }}" "${C2_NODE}")
if [ "${POD_TO_POD_AND_SERVICE_CONNECTIVITY}" -eq 1 ]; then
# Set up routing rules for inter-cluster direct pod to pod & service communication
C1_POD_CIDR=$(KUBECONFIG="${C1_KUBECONFIG}" kubectl get node -ojsonpath='{.items[0].spec.podCIDR}')
C2_POD_CIDR=$(KUBECONFIG="${C2_KUBECONFIG}" kubectl get node -ojsonpath='{.items[0].spec.podCIDR}')
C1_SVC_CIDR=$(KUBECONFIG="${C1_KUBECONFIG}" kubectl cluster-info dump | sed -n 's/^.*--service-cluster-ip-range=\([^"]*\).*$/\1/p' | head -n 1)
C2_SVC_CIDR=$(KUBECONFIG="${C2_KUBECONFIG}" kubectl cluster-info dump | sed -n 's/^.*--service-cluster-ip-range=\([^"]*\).*$/\1/p' | head -n 1)
docker exec "${C1_NODE}" ip route add "${C2_POD_CIDR}" via "${C2_DOCKER_IP}"
docker exec "${C1_NODE}" ip route add "${C2_SVC_CIDR}" via "${C2_DOCKER_IP}"
docker exec "${C2_NODE}" ip route add "${C1_POD_CIDR}" via "${C1_DOCKER_IP}"
docker exec "${C2_NODE}" ip route add "${C1_SVC_CIDR}" via "${C1_DOCKER_IP}"
fi
# Set up routing rules for inter-cluster pod to MetalLB LoadBalancer communication
connect_metallb "$C1_NODE" "$C2_KUBECONFIG" "$C2_DOCKER_IP"
connect_metallb "$C2_NODE" "$C1_KUBECONFIG" "$C1_DOCKER_IP"
}
function install_kind() {
KIND_DIR=$1
KIND_VERSION=$2
echo 'Installing kind...'
mkdir -p "${KIND_DIR}"
if [[ "${MACHINE}" == "Linux" ]]; then
curl -sSLo "${KIND_DIR}/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64"
elif [[ "${MACHINE}" == "Mac" ]]; then
curl -sSLo "${KIND_DIR}/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-darwin-amd64"
else
echo "Error Download kind ..."
exit 1
fi
chmod +x "${KIND_DIR}/kind"
}
function install_kubectl() {
KUBECTL_DIR=$1
KUBECTL_VERSION=$2
echo 'Installing kubectl...'
mkdir -p "${KUBECTL_DIR}"
if [[ "${MACHINE}" == "Linux" ]]; then
curl -sSLo "${KUBECTL_DIR}/kubectl" "https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
elif [[ "${MACHINE}" == "Mac" ]]; then
curl -sSLo "${KUBECTL_DIR}/kubectl" "https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/darwin/amd64/kubectl"
else
echo "Error Download kubectl ..."
exit 1
fi
chmod +x "${KUBECTL_DIR}/kubectl"
}
function install_helm() {
HELM_DIR=$1
HELM_VERSION=$2
echo 'Installing helm...'
mkdir -p "${HELM_DIR}"
OS_NAME="unknown"
if [[ "${MACHINE}" == "Linux" ]]; then
OS_NAME="linux"
elif [[ "${MACHINE}" == "Mac" ]]; then
OS_NAME="darwin"
else
echo "Error Download helm ..."
exit 1
fi
curl -sSLo "${HELM_DIR}/helm.tar.gz" "https://get.helm.sh/helm-${HELM_VERSION}-${OS_NAME}-amd64.tar.gz"
tar zxvf "${HELM_DIR}/helm.tar.gz" -C "${HELM_DIR}"
mv "${HELM_DIR}/${OS_NAME}-amd64/helm" "${HELM_DIR}"
chmod +x "${HELM_DIR}/helm"
}
function install_metallb() {
KUBECONFIG="${1}"
kubectl apply --kubeconfig="$KUBECONFIG" -f "${ROOT}/build/config/metallb.yaml"
kubectl create --kubeconfig="$KUBECONFIG" secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
if [ -z "${METALLB_IPS[*]-}" ]; then
# Take IPs from the end of the docker kind network subnet to use for MetalLB IPs
DOCKER_KIND_SUBNET="$(docker inspect kind | jq '.[0].IPAM.Config[0].Subnet' -r)"
METALLB_IPS=()
while read -r ip; do
METALLB_IPS+=("$ip")
done < <(cidr_to_ips "$DOCKER_KIND_SUBNET" | tail -n 100)
fi
# Give this cluster of those IPs
RANGE="${METALLB_IPS[0]}-${METALLB_IPS[9]}"
METALLB_IPS=("${METALLB_IPS[@]:10}")
echo 'apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- '"$RANGE" | kubectl apply --kubeconfig="$KUBECONFIG" -f -
}
function install_cron_logger() {
KUBECONFIG="${1}"
kubectl apply --kubeconfig="$KUBECONFIG" -f "${ROOT}/build/config/logging/"
}
function connect_metallb() {
REMOTE_NODE=$1
METALLB_KUBECONFIG=$2
METALLB_DOCKER_IP=$3
IP_REGEX='(([0-9]{1,3}\.?){4})'
LB_CONFIG="$(kubectl --kubeconfig="${METALLB_KUBECONFIG}" -n metallb-system get cm config -o jsonpath="{.data.config}")"
if [[ "$LB_CONFIG" =~ $IP_REGEX-$IP_REGEX ]]; then
while read -r lb_cidr; do
docker exec "${REMOTE_NODE}" ip route add "${lb_cidr}" via "${METALLB_DOCKER_IP}"
done < <(ips_to_cidrs "${BASH_REMATCH[1]}" "${BASH_REMATCH[3]}")
fi
}
function cidr_to_ips() {
CIDR="$1"
python3 - <<EOF
from ipaddress import IPv4Network; [print(str(ip)) for ip in IPv4Network('$CIDR').hosts()]
EOF
}
function ips_to_cidrs() {
IP_RANGE_START="$1"
IP_RANGE_END="$2"
python3 - <<EOF
from ipaddress import summarize_address_range, IPv4Address
[ print(n.compressed) for n in summarize_address_range(IPv4Address(u'$IP_RANGE_START'), IPv4Address(u'$IP_RANGE_END')) ]
EOF
}
#!/bin/bash
# Copyright 2018 Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Output a message, with a timestamp matching istio log format
function log() {
echo -e "$(date -u '+%Y-%m-%dT%H:%M:%S.%NZ')\t$*"
}
# Trace runs the provided command and records additional timing information
# NOTE: to avoid spamming the logs, we disable xtrace and re-enable it before executing the function
# and after completion. If xtrace was never set, this will result in xtrace being enabled.
# Ideally we would restore the old xtrace setting, but I don't think its possible to do that without also log-spamming
# If we need to call it from a context without xtrace we can just make a new function.
function trace() {
{ set +x; } 2>/dev/null
log "Running '${1}'"
start="$(date -u +%s)"
{ set -x; } 2>/dev/null
"${@:2}"
{ set +x; } 2>/dev/null
end="$(date -u +%s)"
elapsed=$((end - start))
log "Command '${1}' complete in ${elapsed}s"
# Write to YAML file as well for easy reading by tooling
echo "'${1}': $elapsed" >> "${ARTIFACTS}/trace.yaml"
{ set -x; } 2>/dev/null
}
function setup_and_export_git_sha() {
if [[ -n "${CI:-}" ]]; then
if [ -z "${PULL_PULL_SHA:-}" ]; then
if [ -z "${PULL_BASE_SHA:-}" ]; then
GIT_SHA="$(git rev-parse --verify HEAD)"
export GIT_SHA
else
export GIT_SHA="${PULL_BASE_SHA}"
fi
else
export GIT_SHA="${PULL_PULL_SHA}"
fi
else
# Use the current commit.
GIT_SHA="$(git rev-parse --verify HEAD)"
export GIT_SHA
fi
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
export GIT_BRANCH
}
# Creates a local registry for kind nodes to pull images from. Expects that the "kind" network already exists.
function setup_kind_registry() {
# create a registry container if it not running already
running="$(docker inspect -f '{{.State.Running}}' "${KIND_REGISTRY_NAME}" 2>/dev/null || true)"
if [[ "${running}" != 'true' ]]; then
docker run \
-d --restart=always -p "${KIND_REGISTRY_PORT}:5000" --name "${KIND_REGISTRY_NAME}" \
registry:2
# Allow kind nodes to reach the registry
docker network connect "kind" "${KIND_REGISTRY_NAME}"
fi
# https://docs.tilt.dev/choosing_clusters.html#discovering-the-registry
for cluster in $(kind get clusters); do
# TODO get context/config from existing variables
kind export kubeconfig --name="${cluster}"
for node in $(kind get nodes --name="${cluster}"); do
kubectl annotate node "${node}" "kind.x-k8s.io/registry=localhost:${KIND_REGISTRY_PORT}" --overwrite;
done
done
}
# setup_cluster_reg is used to set up a cluster registry for multicluster testing
function setup_cluster_reg () {
MAIN_CONFIG=""
for context in "${CLUSTERREG_DIR}"/*; do
if [[ -z "${MAIN_CONFIG}" ]]; then
MAIN_CONFIG="${context}"
fi
export KUBECONFIG="${context}"
kubectl delete ns istio-system-multi --ignore-not-found
kubectl delete clusterrolebinding istio-multi-test --ignore-not-found
kubectl create ns istio-system-multi
kubectl create sa istio-multi-test -n istio-system-multi
kubectl create clusterrolebinding istio-multi-test --clusterrole=cluster-admin --serviceaccount=istio-system-multi:istio-multi-test
CLUSTER_NAME=$(kubectl config view --minify=true -o "jsonpath={.clusters[].name}")
gen_kubeconf_from_sa istio-multi-test "${context}"
done
export KUBECONFIG="${MAIN_CONFIG}"
}
function gen_kubeconf_from_sa () {
local service_account=$1
local filename=$2
SERVER=$(kubectl config view --minify=true -o "jsonpath={.clusters[].cluster.server}")
SECRET_NAME=$(kubectl get sa "${service_account}" -n istio-system-multi -o jsonpath='{.secrets[].name}')
CA_DATA=$(kubectl get secret "${SECRET_NAME}" -n istio-system-multi -o "jsonpath={.data['ca\\.crt']}")
TOKEN=$(kubectl get secret "${SECRET_NAME}" -n istio-system-multi -o "jsonpath={.data['token']}" | base64 --decode)
cat <<EOF > "${filename}"
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${CA_DATA}
server: ${SERVER}
name: ${CLUSTER_NAME}
contexts:
- context:
cluster: ${CLUSTER_NAME}
user: ${CLUSTER_NAME}
name: ${CLUSTER_NAME}
current-context: ${CLUSTER_NAME}
kind: Config
preferences: {}
users:
- name: ${CLUSTER_NAME}
user:
token: ${TOKEN}
EOF
}
# gives a copy of a given topology JSON editing the given key on the entry with the given cluster name
function set_topology_value() {
local JSON="$1"
local CLUSTER_NAME="$2"
local KEY="$3"
local VALUE="$4"
VALUE=$(echo "${VALUE}" | awk '{$1=$1};1')
echo "${JSON}" | jq '(.[] | select(.clusterName =="'"${CLUSTER_NAME}"'") | .'"${KEY}"') |="'"${VALUE}"'"'
}
\ No newline at end of file
......@@ -13,7 +13,7 @@ services:
# Build devcontainer
build:
context: .
dockerfile: build/docker/env/cpu/ubuntu${UBUNTU}/Dockerfile
dockerfile: build/docker/builder/cpu/ubuntu${UBUNTU}/Dockerfile
cache_from:
- ${REPO}:${ARCH}-ubuntu${UBUNTU}-${LATEST_DATE_VERSION}
# user: {{ CURRENT_ID }}
......
## Tests
### E2E Test
#### 配置清单
##### 操作系统
| 操作系统 | 版本 |
| ------ | --------- |
| CentOS | 7.5 或以上 |
| Ubuntu | 16.04 或以上 |
| Mac | 10.14 或以上 |
##### 硬件
| 硬件名称 | 建议配置 |
| ---- | --------------------------------------------------------------------------------------------------- |
| CPU | x86_64 平台<br> Intel CPU Sandy Bridge 或以上<br> CPU 指令集<br> _ SSE42<br> _ AVX<br> _ AVX2<br> _ AVX512 |
| 内存 | 16 GB 或以上 |
##### 软件
| 软件名称 | 版本 |
| -------------- | ---------- |
| Docker | 19.05 或以上 |
| Docker Compose | 1.25.5 或以上 |
| jq | 1.3 或以上 |
| kubectl | 1.14 或以上 |
| helm | 3.0 或以上 |
| kind | 0.10.0 或以上 |
#### 安装依赖
##### 检查 Docker 和 Docker Compose 状态
1. 确认 Docker Daemon 正在运行:
```shell
$ docker info
```
- 安装 Docker 步骤见 [Docker CE/EE 官方安装说明](https://docs.docker.com/get-docker/)进行安装
- 如果无法正常打印 Docker 相关信息,请启动 Docker Daemon。
- 要在没有 `root` 权限的情况下运行 Docker 命令,请创建 `docker` 组并添加用户,以运行:`sudo usermod -aG docker $USER`, 退出终端并重新登录,以使更改生效 ,详见 [使用非 root 用户管理 docker](https://docs.docker.com/install/linux/linux-postinstall/)
2. 确认 Docker Compose 版本
```shell
$ docker-compose version
docker-compose version 1.25.5, build 8a1c60f6
docker-py version: 4.1.0
CPython version: 3.7.5
OpenSSL version: OpenSSL 1.1.1f 31 Mar 2020
```
- 安装 Docker Compose 步骤见 [Install Docker Compose](https://docs.docker.com/compose/install/)
##### 安装 jq
- 安装方式见 <https://stedolan.github.io/jq/download/>
##### 安装 kubectl
- 安装方式见 <https://kubernetes.io/docs/tasks/tools/>
##### 安装 helm
- 安装方式见 <https://helm.sh/docs/intro/install/>
##### 安装 kind
- 安装方式见 <https://kind.sigs.k8s.io/docs/user/quick-start/#installation>
#### 运行 E2E Test
```shell
$ cd tests/scripts
$ ./e2e-k8s.sh
```
> Getting help
>
> 你可以执行以下命令获取帮助
>
> ```shell
> $ ./e2e-k8s.sh --help
> ```
SERVICE_IP=127.0.0.1
SERVICE_PORT=19530
REPO=milvusdb/pytest
TAG=20210331-546c8df
PRE_EXIST_NETWORK=bridge
\ No newline at end of file
......@@ -11,7 +11,7 @@
FROM python:3.6.8-jessie
COPY ./requirements.txt /requirements.txt
COPY ./python_test/requirements.txt /requirements.txt
RUN python3 -m pip install --no-cache-dir -r /requirements.txt
......
version: '3.5'
services:
pytest:
image: ${REPO}:${TAG}
build:
context: ..
dockerfile: ./docker/Dockerfile
cache_from:
- ${REPO}:latest
shm_size: 2G
environment:
SERVICE_IP: ${SERVICE_IP}
SERVICE_PORT: ${SERVICE_PORT}
volumes:
- ../python_test:/pytest:delegated
working_dir: "/pytest"
networks:
default:
external:
name: ${PRE_EXIST_NETWORK}
version: '3.5'
services:
regression:
image: milvusdb/pytest:latest
volumes:
- ../..:/milvus:delegated
working_dir: "/milvus/tests/python_test"
networks:
- milvus
networks:
milvus:
\ No newline at end of file
#!/bin/bash
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
ROOT="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
# Exit immediately for non zero status
set -e
# Check unset variables
set -u
# Print commands
set -x
# shellcheck source=build/lib.sh
source "${ROOT}/build/lib.sh"
setup_and_export_git_sha
# shellcheck source=build/kind_provisioner.sh
source "${ROOT}/build/kind_provisioner.sh"
TOPOLOGY=SINGLE_CLUSTER
NODE_IMAGE="kindest/node:v1.20.2"
KIND_CONFIG=""
INSTALL_EXTRA_ARG=""
TEST_EXTRA_ARG=""
CLUSTER_TOPOLOGY_CONFIG_FILE="${ROOT}/build/config/topology/multicluster.json"
while (( "$#" )); do
case "$1" in
# Node images can be found at https://github.com/kubernetes-sigs/kind/releases
# For example, kindest/node:v1.14.0
--node-image)
NODE_IMAGE=$2
shift 2
;;
# Config for enabling different Kubernetes features in KinD (see prow/config{endpointslice.yaml,trustworthy-jwt.yaml}).
--kind-config)
KIND_CONFIG=$2
shift 2
;;
--build-command)
BUILD_COMMAND=$2
shift 2
;;
--install-extra-arg)
INSTALL_EXTRA_ARG=$2
shift 2
;;
--test-extra-arg)
TEST_EXTRA_ARG=$2
shift 2
;;
--skip-setup)
SKIP_SETUP=true
shift
;;
--skip-install)
SKIP_INSTALL=true
shift
;;
--skip-cleanup)
SKIP_CLEANUP=true
shift
;;
--install-logger)
CRON_LOGGER_INSTALL=true
shift
;;
--skip-build)
SKIP_BUILD=true
shift
;;
--skip-test)
SKIP_TEST=true
shift
;;
--manual)
MANUAL=true
shift
;;
--topology)
case $2 in
SINGLE_CLUSTER | MULTICLUSTER_SINGLE_NETWORK | MULTICLUSTER )
TOPOLOGY=$2
echo "Running with topology ${TOPOLOGY}"
;;
*)
echo "Error: Unsupported topology ${TOPOLOGY}" >&2
exit 1
;;
esac
shift 2
;;
--topology-config)
CLUSTER_TOPOLOGY_CONFIG_FILE="${ROOT}/${2}"
shift 2
;;
-h|--help)
{ set +x; } 2>/dev/null
HELP="
Usage:
$0 [flags] [Arguments]
--node-image Kubernetes in Docker (KinD) Node image
The image is a Docker image for running nested containers, systemd, and Kubernetes components.
Node images can be found at https://github.com/kubernetes-sigs/kind/releases.
Default: \"kindest/node:v1.20.2\"
--kind-config Config for enabling different Kubernetes features in KinD
--build-command Specified build milvus command
--install-extra-arg Install Milvus Helm Chart extra configuration. (see https://github.com/zilliztech/milvus-helm-charts/blob/main/charts/milvus-ha/values.yaml)
To override values in a chart, use either the '--values' flag and pass in a file or use the '--set' flag and pass configuration from the command line, to force a string value use '--set-string'.
Refer: https://helm.sh/docs/helm/helm_install/#helm-install
--test-extra-arg Run e2e test extra configuration
For example, \"--tag=smoke\"
--topology KinD cluster topology of deployments
Provides three classes: \"SINGLE_CLUSTER\", \"MULTICLUSTER_SINGLE_NETWORK\", \"MULTICLUSTER\"
Default: \"SINGLE_CLUSTER\"
--topology-config KinD cluster topology configuration file
--skip-setup Skip setup KinD cluster
--skip-install Skip install Milvus Helm Chart
--skip-cleanup Skip cleanup KinD cluster
--skip-build Skip build Milvus image
--skip-test Skip e2e test
--manual Manual Mode
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
echo -e "${HELP}" ; exit 0
;;
-*)
echo "Error: Unsupported flag $1" >&2
exit 1
;;
*) # preserve positional arguments
PARAMS+=("$1")
shift
;;
esac
done
export BUILD_COMMAND="${BUILD_COMMAND:-make install}"
export MANUAL="${MANUAL:-}"
# Default IP family of the cluster is IPv4
export IP_FAMILY="${IP_FAMILY:-ipv4}"
# KinD will not have a LoadBalancer, so we need to disable it
export TEST_ENV=kind
# LoadBalancer in Kind is supported using metallb if not ipv6.
if [ "${IP_FAMILY}" != "ipv6" ]; then
export TEST_ENV=kind-metallb
fi
# See https://kind.sigs.k8s.io/docs/user/quick-start/#loading-an-image-into-your-cluster
export PULL_POLICY=IfNotPresent
# We run a local-registry in a docker container that KinD nodes pull from
# These values are must match what is in config/trustworthy-jwt.yaml
export KIND_REGISTRY_NAME="kind-registry"
export KIND_REGISTRY_PORT="5000"
export KIND_REGISTRY="localhost:${KIND_REGISTRY_PORT}"
export ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}"
export SINGLE_CLUSTER_NAME="${SINGLE_CLUSTER_NAME:-kind}"
export HUB="${HUB:-milvusdb}"
export TAG="${TAG:-latest}"
# If we're not intending to pull from an actual remote registry, use the local kind registry
if [[ -z "${SKIP_BUILD:-}" ]]; then
HUB="${KIND_REGISTRY}"
export HUB
fi
export CI="true"
if [[ ! -d "${ARTIFACTS}" ]];then
mkdir -p "${ARTIFACTS}"
fi
if [[ ! -x "$(command -v kind)" ]]; then
KIND_DIR="${KIND_DIR:-"${HOME}/tool_cache/kind"}"
KIND_VERSION="v0.10.0"
export PATH="${KIND_DIR}:${PATH}"
if [[ ! -x "$(command -v kind)" ]]; then
install_kind "${KIND_DIR}" "${KIND_VERSION}"
fi
fi
if [[ ! -x "$(command -v kubectl)" ]]; then
KUBECTL_DIR="${KUBECTL_DIR:-"${HOME}/tool_cache/kubectl"}"
KUBECTL_VERSION="v1.20.2"
export PATH="${KUBECTL_DIR}:${PATH}"
if [[ ! -x "$(command -v kubectl)" ]]; then
install_kubectl "${KUBECTL_DIR}" "${KUBECTL_VERSION}"
fi
fi
if [[ ! -x "$(command -v helm)" ]]; then
HELM_DIR="${HELM_DIR:-"${HOME}/tool_cache/helm"}"
HELM_VERSION="v3.5.4"
export PATH="${HELM_DIR}:${PATH}"
if [[ ! -x "$(command -v helm)" ]]; then
install_helm "${HELM_DIR}" "${HELM_VERSION}"
fi
fi
if [[ -z "${SKIP_SETUP:-}" ]]; then
export DEFAULT_CLUSTER_YAML="${ROOT}/build/config/topology/trustworthy-jwt.yaml"
export METRICS_SERVER_CONFIG_DIR="${ROOT}/build/config/metrics"
if [[ "${TOPOLOGY}" == "SINGLE_CLUSTER" ]]; then
trace "setup kind cluster" setup_kind_cluster "${SINGLE_CLUSTER_NAME}" "${NODE_IMAGE}" "${KIND_CONFIG}"
else
trace "load cluster topology" load_cluster_topology "${CLUSTER_TOPOLOGY_CONFIG_FILE}"
trace "setup kind clusters" setup_kind_clusters "${NODE_IMAGE}" "${IP_FAMILY}"
TOPOLOGY_JSON=$(cat "${CLUSTER_TOPOLOGY_CONFIG_FILE}")
for i in $(seq 0 $((${#CLUSTER_NAMES[@]} - 1))); do
CLUSTER="${CLUSTER_NAMES[i]}"
KCONFIG="${KUBECONFIGS[i]}"
TOPOLOGY_JSON=$(set_topology_value "${TOPOLOGY_JSON}" "${CLUSTER}" "meta.kubeconfig" "${KCONFIG}")
done
RUNTIME_TOPOLOGY_CONFIG_FILE="${ARTIFACTS}/topology-config.json"
echo "${TOPOLOGY_JSON}" > "${RUNTIME_TOPOLOGY_CONFIG_FILE}"
export INTEGRATION_TEST_TOPOLOGY_FILE
INTEGRATION_TEST_TOPOLOGY_FILE="${RUNTIME_TOPOLOGY_CONFIG_FILE}"
export INTEGRATION_TEST_KUBECONFIG
INTEGRATION_TEST_KUBECONFIG=NONE
fi
fi
if [[ -z "${SKIP_BUILD:-}" ]]; then
export MILVUS_IMAGE_REPO="${HUB}/milvus"
export MILVUS_IMAGE_TAG="${TAG}"
trace "setup kind registry" setup_kind_registry
pushd "${ROOT}"
source "${ROOT}/scripts/before-install.sh"
trace "build milvus" "${ROOT}/build/builder.sh" /bin/bash -c "${BUILD_COMMAND}"
trace "build milvus image" docker build -f "${ROOT}/build/docker/milvus/Dockerfile" -t "${MILVUS_IMAGE_REPO}:${MILVUS_IMAGE_TAG}" .
trace "push milvus image" docker push "${MILVUS_IMAGE_REPO}:${MILVUS_IMAGE_TAG}"
popd
fi
if [[ -z "${SKIP_INSTALL:-}" ]]; then
trace "install milvus helm chart" "${ROOT}/tests/scripts/install_milvus.sh" "${INSTALL_EXTRA_ARG}"
fi
if [[ -z "${SKIP_TEST:-}" ]]; then
trace "test" "${ROOT}/tests/scripts/e2e.sh" "${TEST_EXTRA_ARG}"
fi
# Check if the user is running the clusters in manual mode.
if [[ -n "${MANUAL:-}" ]]; then
echo "Running cluster(s) in manual mode. Press any key to shutdown and exit..."
read -rsn1
exit 0
fi
#!/bin/bash
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
set -e
set -x
MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}"
MILVUS_STANDALONE_ENABLED="${MILVUS_STANDALONE_ENABLED:-true}"
MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}"
PARALLEL_NUM="${PARALLEL_NUM:-4}"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
ROOT="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
if [[ "${MILVUS_STANDALONE_ENABLED}" == "true" ]]; then
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=standalone"
else
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=proxynode"
fi
SERVICE_TYPE=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.type}')
if [[ "${SERVICE_TYPE}" == "LoadBalancer" ]]; then
SERVICE_IP=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
elif [[ "${SERVICE_TYPE}" == "NodePort" ]]; then
SERVICE_IP=$(kubectl get nodes --namespace "${MILVUS_HELM_NAMESPACE}" -o jsonpath='{.items[0].status.addresses[0].address}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].nodePort}')
else
SERVICE_IP="127.0.0.1"
POD_NAME=$(kubectl get pods --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].metadata.name}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
kubectl --namespace "${MILVUS_HELM_NAMESPACE}" port-forward "${POD_NAME}" "${SERVICE_PORT}" &
PORT_FORWARD_PID=$!
trap "kill -TERM ${PORT_FORWARD_PID}" EXIT
fi
pushd "${ROOT}/tests/docker"
docker-compose pull --ignore-pull-failures pytest
if [[ "${TEST_ENV:-}" =~ ^kind* ]]; then
export PRE_EXIST_NETWORK="kind"
fi
export SERVICE_IP="${SERVICE_IP:-127.0.0.1}"
export SERVICE_PORT="${SERVICE_PORT:-19530}"
if [[ "${MANUAL:-}" == "true" ]]; then
docker-compose up -d
else
docker-compose run --rm pytest /bin/bash -c "python3 -m pip install --no-cache-dir -r requirements.txt && \
pytest -n ${PARALLEL_NUM} --ip ${SERVICE_IP} --port ${SERVICE_PORT} ${@:-}"
fi
popd
#!/bin/bash
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
set -e
set -x
MILVUS_HELM_REPO="https://github.com/zilliztech/milvus-helm-charts.git"
MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}"
MILVUS_STANDALONE_ENABLED="${MILVUS_STANDALONE_ENABLED:-true}"
MILVUS_IMAGE_REPO="${MILVUS_IMAGE_REPO:-milvusdb/milvus}"
MILVUS_IMAGE_TAG="${MILVUS_IMAGE_TAG:-latest}"
MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}"
MILVUS_INSTALL_TIMEOUT="${MILVUS_INSTALL_TIMEOUT:-300s}"
# Delete any previous Milvus cluster
echo "Deleting previous Milvus cluster with name=${MILVUS_HELM_RELEASE_NAME}"
if ! (helm uninstall -n "${MILVUS_HELM_NAMESPACE}" "${MILVUS_HELM_RELEASE_NAME}") > /dev/null; then
echo "No existing Milvus cluster with name ${MILVUS_HELM_RELEASE_NAME}. Continue..."
else
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME}"
kubectl delete pvc -n "${MILVUS_HELM_NAMESPACE}" $(kubectl get pvc -n "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{range.items[*]}{.metadata.name} ')
fi
if [[ "${TEST_ENV}" == "kind-metallb" ]]; then
MILVUS_SERVICE_TYPE="${MILVUS_SERVICE_TYPE:-LoadBalancer}"
else
MILVUS_SERVICE_TYPE="${MILVUS_SERVICE_TYPE:-ClusterIP}"
fi
TMP_DIR="$(mktemp -d)"
git clone --depth=1 -b "${MILVUS_HELM_BRANCH:-main}" "${MILVUS_HELM_REPO}" "${TMP_DIR}"
kubectl create namespace "${MILVUS_HELM_NAMESPACE}" || true
if [[ "${MILVUS_STANDALONE_ENABLED}" == "false" ]]; then
helm install --wait --timeout "${MILVUS_INSTALL_TIMEOUT}" \
--set image.all.repository="${MILVUS_IMAGE_REPO}" \
--set image.all.tag="${MILVUS_IMAGE_TAG}" \
--set image.all.pullPolicy="${MILVUS_PULL_POLICY:-Always}" \
--set standalone.enabled="${MILVUS_STANDALONE_ENABLED}" \
--set proxynode.service.type="${MILVUS_SERVICE_TYPE}" \
--namespace "${MILVUS_HELM_NAMESPACE}" \
"${MILVUS_HELM_RELEASE_NAME}" \
${@:-} "${TMP_DIR}/charts/milvus-ha"
else
helm install --wait --timeout "${MILVUS_INSTALL_TIMEOUT}" \
--set image.all.repository="${MILVUS_IMAGE_REPO}" \
--set image.all.tag="${MILVUS_IMAGE_TAG}" \
--set image.all.pullPolicy="${MILVUS_PULL_POLICY:-Always}" \
--set standalone.enabled="${MILVUS_STANDALONE_ENABLED}" \
--set standalone.service.type="${MILVUS_SERVICE_TYPE}" \
--namespace "${MILVUS_HELM_NAMESPACE}" \
"${MILVUS_HELM_RELEASE_NAME}" \
${@:-} "${TMP_DIR}/charts/milvus-ha"
fi
#!/bin/bash
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
set -e
set -x
MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}"
MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}"
helm uninstall -n "${MILVUS_HELM_NAMESPACE}" "${MILVUS_HELM_RELEASE_NAME}"
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME}"
kubectl delete pvc -n "${MILVUS_HELM_NAMESPACE}" $(kubectl get pvc -n "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{range.items[*]}{.metadata.name} ')
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册