提交 9e95435e 编写于 作者: T Thomas Stromberg

Add --cni flag, fix --network-plugin=kubenet

上级 4f6a9f65
......@@ -70,9 +70,7 @@ var nodeAddCmd = &cobra.Command{
}
}
// Add CNI config if it's not already there
// We need to run kubeadm.init here as well
if err := config.MultiNodeCNIConfig(cc); err != nil {
if err := config.SaveProfile(cc.Name, cc); err != nil {
exit.WithError("failed to save config", err)
}
......
......@@ -29,6 +29,7 @@ import (
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
......@@ -54,6 +55,7 @@ const (
criSocket = "cri-socket"
networkPlugin = "network-plugin"
enableDefaultCNI = "enable-default-cni"
cniFlag = "cni"
hypervVirtualSwitch = "hyperv-virtual-switch"
hypervUseExternalSwitch = "hyperv-use-external-switch"
hypervExternalAdapter = "hyperv-external-adapter"
......@@ -130,8 +132,9 @@ func initMinikubeFlags() {
startCmd.Flags().String(mountString, constants.DefaultMountDir+":/minikube-host", "The argument to pass the minikube mount command on start.")
startCmd.Flags().StringArrayVar(&config.AddonList, "addons", nil, "Enable addons. see `minikube addons list` for a list of valid addon names.")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
startCmd.Flags().String(networkPlugin, "", "Kubelet network plug-in to use (default: auto)")
startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=custom")
startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto)")
startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of Kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ",")))
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
......@@ -237,21 +240,6 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
if err != nil {
return cc, config.Node{}, errors.Wrap(err, "new runtime manager")
}
// Pick good default values for --network-plugin and --enable-default-cni based on runtime.
selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI)
selectedNetworkPlugin := viper.GetString(networkPlugin)
if r.DefaultCNI() && !cmd.Flags().Changed(networkPlugin) {
selectedNetworkPlugin = "cni"
if !cmd.Flags().Changed(enableDefaultCNI) {
selectedEnableDefaultCNI = true
}
}
repository := viper.GetString(imageRepository)
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") {
......@@ -275,6 +263,13 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
out.T(out.SuccessType, "Using image repository {{.name}}", out.V{"name": repository})
}
// Backwards compatibility with --enable-default-cni
chosenCNI := viper.GetString(cniFlag)
if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
chosenCNI = "bridge"
}
cc = config.ClusterConfig{
Name: ClusterFlagValue(),
KeepContext: viper.GetBool(keepContext),
......@@ -318,16 +313,26 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime),
CRISocket: viper.GetString(criSocket),
NetworkPlugin: selectedNetworkPlugin,
NetworkPlugin: viper.GetString(networkPlugin),
ServiceCIDR: viper.GetString(serviceCIDR),
ImageRepository: repository,
ExtraOptions: config.ExtraOptions,
ShouldLoadCachedImages: viper.GetBool(cacheImages),
EnableDefaultCNI: selectedEnableDefaultCNI,
CNI: chosenCNI,
NodePort: viper.GetInt(apiServerPort),
},
}
cc.VerifyComponents = interpretWaitFlag(*cmd)
cnm, err := cni.New(cc)
if err != nil {
return cc, config.Node{}, errors.Wrap(err, "cni")
}
if _, ok := cnm.(cni.Disabled); !ok {
glog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
cc.KubernetesConfig.NetworkPlugin = "cni"
}
}
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
......@@ -354,6 +359,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
validateFlags(cmd, existing.Driver)
cc := *existing
if cmd.Flags().Changed(containerRuntime) {
cc.KubernetesConfig.ContainerRuntime = viper.GetString(containerRuntime)
}
......@@ -514,10 +520,6 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
cc.KubernetesConfig.CRISocket = viper.GetString(criSocket)
}
if cmd.Flags().Changed(criSocket) {
cc.KubernetesConfig.NetworkPlugin = viper.GetString(criSocket)
}
if cmd.Flags().Changed(networkPlugin) {
cc.KubernetesConfig.NetworkPlugin = viper.GetString(networkPlugin)
}
......@@ -534,8 +536,15 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
cc.KubernetesConfig.ImageRepository = viper.GetString(imageRepository)
}
if cmd.Flags().Changed(enableDefaultCNI) {
cc.KubernetesConfig.EnableDefaultCNI = viper.GetBool(enableDefaultCNI)
if cmd.Flags().Changed(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
if viper.GetBool(enableDefaultCNI) {
glog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
cc.KubernetesConfig.CNI = "bridge"
}
}
if cmd.Flags().Changed(cniFlag) {
cc.KubernetesConfig.CNI = viper.GetString(cniFlag)
}
if cmd.Flags().Changed(waitComponents) {
......
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the none Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="none"
JOB_NAME="none_cloudshell"
SUDO_PREFIX="sudo -E "
export KUBECONFIG="/root/.kube/config"
gcloud alpha cloud-shell ssh --boosted "uptime"
gcloud alpha cloud-shell scp --boosted ""
mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
source ./common.sh
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on a Linux machine for the KVM Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
JOB_NAME="KVM_Linux"
EXPECTED_DEFAULT_DRIVER="kvm2"
# We pick kvm as our gvisor testbed because it is fast & reliable
EXTRA_TEST_ARGS="-gvisor"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
source ./common.sh
......@@ -65,8 +65,9 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
if err != nil {
return errors.Wrap(err, "kubeadm images")
}
if containerRuntime != "docker" { // kic overlay image is only needed by containerd and cri-o https://github.com/kubernetes/minikube/issues/7428
imgs = append(imgs, kic.OverlayImage)
imgs = append(imgs, images.KindNet(""))
}
runner := command.NewKICRunner(profile, driver.OCIBinary)
......
......@@ -27,9 +27,6 @@ const (
Version = "v0.0.10"
// SHA of the kic base image
baseImageSHA = "f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438"
// OverlayImage is the cni plugin used for overlay image, created by kind.
// CNI plugin image used for kic drivers created by kind.
OverlayImage = "kindest/kindnetd:0.5.4"
)
var (
......
......@@ -18,8 +18,12 @@ limitations under the License.
package bsutil
import (
"os/exec"
"path"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/vmpath"
)
......@@ -27,8 +31,6 @@ import (
var KubeadmYamlPath = path.Join(vmpath.GuestEphemeralDir, "kubeadm.yaml")
const (
//DefaultCNIConfigPath is the configuration file for CNI networks
DefaultCNIConfigPath = "/etc/cni/net.d/1-k8s.conf"
// KubeletServiceFile is the file for the systemd kubelet.service
KubeletServiceFile = "/lib/systemd/system/kubelet.service"
// KubeletSystemdConfFile is config for the systemd kubelet.service
......@@ -38,3 +40,22 @@ const (
// KubeletInitPath is where Sys-V style init script is installed
KubeletInitPath = "/etc/init.d/kubelet"
)
// CopyFiles combines mkdir requests into a single call to reduce load
func CopyFiles(runner command.Runner, files []assets.CopyableFile) error {
dirs := []string{}
for _, f := range files {
dirs = append(dirs, f.GetTargetDir())
}
args := append([]string{"mkdir", "-p"}, dirs...)
if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil {
return errors.Wrap(err, "mkdir")
}
for _, f := range files {
if err := runner.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
}
return nil
}
......@@ -26,6 +26,7 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
......@@ -65,6 +66,18 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
return nil, errors.Wrap(err, "generating extra component config for kubeadm")
}
cnm, err := cni.New(cc)
if err != nil {
return nil, errors.Wrap(err, "cni")
}
podCIDR := cnm.CIDR()
overrideCIDR := k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm)
if overrideCIDR != "" {
podCIDR = overrideCIDR
}
glog.Infof("Using pod CIDR: %s", podCIDR)
opts := struct {
CertDir string
ServiceCIDR string
......@@ -87,7 +100,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
}{
CertDir: vmpath.GuestKubernetesCertsDir,
ServiceCIDR: constants.DefaultServiceCIDR,
PodSubnet: k8s.ExtraOptions.Get("pod-network-cidr", Kubeadm),
PodSubnet: podCIDR,
AdvertiseAddress: n.IP,
APIServerPort: nodePort,
KubernetesVersion: k8s.KubernetesVersion,
......
......@@ -25,6 +25,7 @@ import (
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
......@@ -53,7 +54,12 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
}
if k8s.NetworkPlugin != "" {
extraOpts["network-plugin"] = k8s.NetworkPlugin
if k8s.NetworkPlugin == "kubenet" {
extraOpts["pod-cidr"] = cni.DefaultPodCIDR
}
}
if _, ok := extraOpts["node-ip"]; !ok {
extraOpts["node-ip"] = nc.IP
}
......
......@@ -32,7 +32,7 @@ import (
// WaitForNodeReady waits till kube client reports node status as "ready"
func WaitForNodeReady(cs *kubernetes.Clientset, timeout time.Duration) error {
glog.Info("waiting for node status to be ready ...")
glog.Infof("waiting %s for node status to be ready ...", timeout)
start := time.Now()
defer func() {
glog.Infof("duration metric: took %s to wait for WaitForNodeReady...", time.Since(start))
......
......@@ -36,6 +36,7 @@ import (
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/logs"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util/retry"
)
// WaitForSystemPods verifies essential pods for running kurnetes is running
......@@ -83,11 +84,15 @@ func ExpectAppsRunning(cs *kubernetes.Clientset, expected []string) error {
if err != nil {
return err
}
glog.Infof("%d kube-system pods found", len(pods.Items))
for _, pod := range pods.Items {
glog.Infof(podStatusMsg(pod))
if pod.Status.Phase != core.PodRunning {
continue
}
for k, v := range pod.ObjectMeta.Labels {
if k == "component" || k == "k8s-app" {
found[v] = true
......@@ -112,15 +117,16 @@ func WaitForAppsRunning(cs *kubernetes.Clientset, expected []string, timeout tim
glog.Info("waiting for k8s-apps to be running ...")
start := time.Now()
checkRunning := func() (bool, error) {
if err := ExpectAppsRunning(cs, expected); err != nil {
return false, nil
checkRunning := func() error {
err := ExpectAppsRunning(cs, expected)
if err != nil {
glog.Warningf("expect apps running failed: %v", err)
}
return true, nil
return err
}
if err := wait.PollImmediate(kconst.APICallRetryInterval, timeout, checkRunning); err != nil {
return errors.Wrapf(err, "checking k8s-apps to be running")
if err := retry.Expo(checkRunning, kconst.APICallRetryInterval, timeout); err != nil {
return errors.Wrapf(err, "expected k8s-apps")
}
glog.Infof("duration metric: took %s to wait for k8s-apps to be running ...", time.Since(start))
return nil
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -43,7 +43,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -36,7 +36,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -40,7 +40,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.12.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -43,7 +43,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -35,7 +35,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -36,7 +36,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -40,7 +40,7 @@ schedulerExtraArgs:
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -52,7 +52,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -68,6 +68,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -43,7 +43,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -59,5 +59,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -49,7 +49,7 @@ scheduler:
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -65,6 +65,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -52,7 +52,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -68,6 +68,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -43,7 +43,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -59,5 +59,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -49,7 +49,7 @@ scheduler:
kubernetesVersion: v1.15.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -65,6 +65,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -52,7 +52,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -68,6 +68,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -42,7 +42,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -58,5 +58,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -43,7 +43,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -59,5 +59,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -49,7 +49,7 @@ scheduler:
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -65,6 +65,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -50,7 +50,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -66,6 +66,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -41,7 +41,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -57,5 +57,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -47,7 +47,7 @@ scheduler:
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -63,6 +63,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -50,7 +50,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -66,6 +66,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -41,7 +41,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -57,5 +57,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -47,7 +47,7 @@ scheduler:
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -63,6 +63,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -50,7 +50,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -66,6 +66,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -40,7 +40,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: 1.1.1.1
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -56,5 +56,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -41,7 +41,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -57,5 +57,5 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
......@@ -47,7 +47,7 @@ scheduler:
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: ""
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
......@@ -63,6 +63,6 @@ failSwapOn: false
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: ""
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"
......@@ -121,6 +121,7 @@ func auxiliary(mirror string) []string {
storageProvisioner(mirror),
dashboardFrontend(mirror),
dashboardMetrics(mirror),
// NOTE: kindnet is also used when the Docker driver is used with a non-Docker runtime
}
}
......@@ -146,3 +147,11 @@ func dashboardMetrics(repo string) string {
// See 'dashboard-metrics-scraper' in deploy/addons/dashboard/dashboard-dp.yaml
return path.Join(repo, "metrics-scraper:v1.0.2")
}
// KindNet returns the image used for kindnet
func KindNet(repo string) string {
if repo == "" {
repo = "kindest"
}
return path.Join(repo, "kindnetd:0.5.4")
}
......@@ -17,7 +17,6 @@ limitations under the License.
package kubeadm
import (
"bytes"
"context"
"os/exec"
"path"
......@@ -41,7 +40,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/assets"
......@@ -49,6 +47,7 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
......@@ -231,6 +230,25 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
return errors.Wrap(err, "run")
}
cnm, err := cni.New(cfg)
if err != nil {
return errors.Wrap(err, "cni config")
}
if _, ok := cnm.(cni.Disabled); !ok {
out.T(out.CNI, "Configuring {{.name}} (Container Networking Interface) ...", out.V{"name": cnm.String()})
if err := cnm.Apply(k.c, []cni.Runner{k.c}); err != nil {
return errors.Wrap(err, "cni apply")
}
if cfg.KubernetesConfig.ContainerRuntime == "crio" {
if err := sysinit.New(k.c).Restart("crio"); err != nil {
glog.Errorf("failed to restart CRI: %v", err)
}
}
}
var wg sync.WaitGroup
wg.Add(3)
......@@ -239,12 +257,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
glog.Errorf("unable to create cluster role binding, some addons might not work: %v", err)
}
// the overlay is required for containerd and cri-o runtime: see #7428
if config.MultiNode(cfg) || (driver.IsKIC(cfg.Driver) && cfg.KubernetesConfig.ContainerRuntime != "docker") {
if err := k.applyKICOverlay(cfg); err != nil {
glog.Errorf("failed to apply kic overlay: %v", err)
}
}
wg.Done()
}()
......@@ -769,13 +781,6 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, bsutil.KubeadmYamlPath+".new", "0640"))
}
// Copy the default CNI config (k8s.conf), so that kubelet can successfully
// start a Pod in the case a user hasn't manually installed any CNI plugin
// and minikube was started with "--extra-config=kubelet.network-plugin=cni".
if cfg.KubernetesConfig.EnableDefaultCNI && !config.MultiNode(cfg) {
files = append(files, assets.NewMemoryAssetTarget([]byte(defaultCNIConfig), bsutil.DefaultCNIConfigPath, "0644"))
}
// Installs compatibility shims for non-systemd environments
kubeletPath := path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubelet")
shims, err := sm.GenerateInitShim("kubelet", kubeletPath, bsutil.KubeletSystemdConfFile)
......@@ -784,7 +789,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
}
files = append(files, shims...)
if err := copyFiles(k.c, files); err != nil {
if err := bsutil.CopyFiles(k.c, files); err != nil {
return errors.Wrap(err, "copy")
}
......@@ -800,65 +805,11 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
return sm.Start("kubelet")
}
func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
// Combine mkdir request into a single call to reduce load
dirs := []string{}
for _, f := range files {
dirs = append(dirs, f.GetTargetDir())
}
args := append([]string{"mkdir", "-p"}, dirs...)
if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil {
return errors.Wrap(err, "mkdir")
}
for _, f := range files {
if err := runner.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
}
return nil
}
// kubectlPath returns the path to the kubelet
func kubectlPath(cfg config.ClusterConfig) string {
return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl")
}
// applyKICOverlay applies the CNI plugin needed to make kic work
func (k *Bootstrapper) applyKICOverlay(cfg config.ClusterConfig) error {
b := bytes.Buffer{}
if err := kicCNIConfig.Execute(&b, struct{ ImageName string }{ImageName: kic.OverlayImage}); err != nil {
return err
}
ko := path.Join(vmpath.GuestEphemeralDir, "kic_overlay.yaml")
f := assets.NewMemoryAssetTarget(b.Bytes(), ko, "0644")
if err := k.c.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), "apply",
fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")),
"-f", ko)
if rr, err := k.c.RunCmd(cmd); err != nil {
return errors.Wrapf(err, "cmd: %s output: %s", rr.Command(), rr.Output())
}
// Inform cri-o that the CNI has changed
if cfg.KubernetesConfig.ContainerRuntime == "crio" {
if err := sysinit.New(k.c).Restart("crio"); err != nil {
return errors.Wrap(err, "restart crio")
}
}
return nil
}
// applyNodeLabels applies minikube labels to all the nodes
func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
// time cluster was created. time format is based on ISO 8601 (RFC 3339)
......
***************
*** 307,312 ****
return nil
}
out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr})
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
glog.Warningf("delete failed: %v", err)
--- 307,313 ----
return nil
}
+ panic(fmt.Errorf("restart failed: %v", err))
out.ErrT(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr})
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
glog.Warningf("delete failed: %v", err)
......@@ -62,19 +62,20 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.Cluster
}
// ControlPlaneBootstrapper returns the bootstrapper for the cluster's control plane
func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, command.Runner, error) {
cp, err := config.PrimaryControlPlane(cc)
if err != nil {
return nil, errors.Wrap(err, "getting primary control plane")
return nil, nil, errors.Wrap(err, "getting primary control plane")
}
h, err := machine.LoadHost(mAPI, driver.MachineName(*cc, cp))
if err != nil {
return nil, errors.Wrap(err, "getting control plane host")
return nil, nil, errors.Wrap(err, "getting control plane host")
}
cpr, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "getting control plane command runner")
return nil, nil, errors.Wrap(err, "getting control plane command runner")
}
return Bootstrapper(mAPI, bootstrapperName, *cc, cpr)
bs, err := Bootstrapper(mAPI, bootstrapperName, *cc, cpr)
return bs, cpr, err
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"bytes"
"text/template"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
)
// bridge is what minikube defaulted to when `--enable-default-cni=true`
// https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md
var bridgeConf = template.Must(template.New("bridge").Parse(`
{
"cniVersion": "0.3.1",
"name": "bridge",
"type": "bridge",
"bridge": "bridge",
"addIf": "true",
"isDefaultGateway": true,
"forceAddress": false,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"subnet": "{{.PodCIDR}}"
}
}
`))
// Bridge is a CNI manager than does nothing
type Bridge struct {
cc config.ClusterConfig
}
// String returns a string representation of this CNI
func (c Bridge) String() string {
return "Bridge CNI"
}
func (c Bridge) netconf() (assets.CopyableFile, error) {
input := &tmplInput{PodCIDR: DefaultPodCIDR}
b := bytes.Buffer{}
if err := bridgeConf.Execute(&b, input); err != nil {
return nil, err
}
return assets.NewMemoryAssetTarget(b.Bytes(), "/etc/cni/net.d/1-k8s.conf", "0644"), nil
}
// Apply enables the CNI
func (c Bridge) Apply(_ Runner, nodes []Runner) error {
f, err := c.netconf()
if err != nil {
return errors.Wrap(err, "netconf")
}
return applyNetConf(nodes, f)
}
// CIDR returns the default CIDR used by this CNI
func (c Bridge) CIDR() string {
return DefaultPodCIDR
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cni configures the Container Networking Interface
package cni
import (
"context"
"fmt"
"os/exec"
"path"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/vmpath"
)
const (
// DefaultPodCIDR is the default CIDR to use in minikube CNI's.
DefaultPodCIDR = "10.244.0.0/16"
)
// Runner is the subset of command.Runner this package consumes
type Runner interface {
RunCmd(cmd *exec.Cmd) (*command.RunResult, error)
Copy(assets.CopyableFile) error
}
// Manager is a common interface for CNI
type Manager interface {
// Enable enables the CNI
Apply(Runner, []Runner) error
// CIDR returns the default CIDR used by this CNI
CIDR() string
// String representation
String() string
}
// tmplInputs are inputs to CNI templates
type tmplInput struct {
ImageName string
PodCIDR string
DefaultRoute string
}
// New returns a new CNI manager
func New(cc config.ClusterConfig) (Manager, error) {
if cc.KubernetesConfig.NetworkPlugin != "" && cc.KubernetesConfig.NetworkPlugin != "cni" {
glog.Infof("network plugin configured as %q, returning disabled", cc.KubernetesConfig.NetworkPlugin)
return Disabled{}, nil
}
glog.Infof("Creating CNI manager for %q", cc.KubernetesConfig.CNI)
switch cc.KubernetesConfig.CNI {
case "", "auto":
return chooseDefault(cc), nil
case "false":
return Disabled{cc: cc}, nil
case "kindnet", "true":
return KindNet{cc: cc}, nil
case "bridge":
return Bridge{cc: cc}, nil
case "flannel":
return Flannel{cc: cc}, nil
default:
return NewCustom(cc, cc.KubernetesConfig.CNI)
}
}
func chooseDefault(cc config.ClusterConfig) Manager {
// For backwards compatibility with older profiles using --enable-default-cni
if cc.KubernetesConfig.EnableDefaultCNI {
glog.Infof("EnableDefaultCNI is true, recommending bridge")
return Bridge{}
}
if cc.KubernetesConfig.ContainerRuntime != "docker" {
if driver.IsKIC(cc.Driver) {
glog.Infof("%q driver + %s runtime found, recommending kindnet", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return KindNet{cc: cc}
}
glog.Infof("%q driver + %s runtime found, recommending bridge", cc.Driver, cc.KubernetesConfig.ContainerRuntime)
return Bridge{cc: cc}
}
if len(cc.Nodes) > 1 {
glog.Infof("%d nodes found, recommending kindnet", len(cc.Nodes))
return KindNet{cc: cc}
}
glog.Infof("CNI unnecessary in this configuration, recommending no CNI")
return Disabled{}
}
// manifestPath returns the path to the CNI manifest
func manifestPath() string {
return path.Join(vmpath.GuestEphemeralDir, "cni.yaml")
}
// manifestAsset returns a copyable asset for the CNI manifest
func manifestAsset(b []byte) assets.CopyableFile {
return assets.NewMemoryAssetTarget(b, manifestPath(), "0644")
}
// kubectlPath returns the path to the kubelet
func kubectlPath(cc config.ClusterConfig) string {
return path.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion, "kubectl")
}
// applyManifest applies a CNI manifest
func applyManifest(cc config.ClusterConfig, r Runner, f assets.CopyableFile) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
kubectl := kubectlPath(cc)
glog.Infof("applying CNI manifest using %s ...", kubectl)
if err := r.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
cmd := exec.CommandContext(ctx, "sudo", kubectl, "apply", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), "-f", manifestPath())
if rr, err := r.RunCmd(cmd); err != nil {
return errors.Wrapf(err, "cmd: %s output: %s", rr.Command(), rr.Output())
}
return nil
}
// applyNetConf applies a netconf file across nodes
func applyNetConf(rs []Runner, f assets.CopyableFile) error {
for _, r := range rs {
if err := r.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
}
return nil
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
"os"
"path"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
)
// Custom is a CNI manager than applies a user-specified manifest
type Custom struct {
cc config.ClusterConfig
manifest string
}
// String returns a string representation of this CNI
func (c Custom) String() string {
return fmt.Sprintf("Custom (%s)", c.manifest)
}
// NewCustom returns a well-formed Custom CNI manager
func NewCustom(cc config.ClusterConfig, manifest string) (Custom, error) {
_, err := os.Stat(manifest)
if err != nil {
return Custom{}, errors.Wrap(err, "stat")
}
return Custom{
cc: cc,
manifest: manifest,
}, nil
}
// Apply enables the CNI
func (c Custom) Apply(master Runner, nodes []Runner) error {
m, err := assets.NewFileAsset(c.manifest, path.Dir(manifestPath()), path.Base(manifestPath()), "0644")
if err != nil {
return errors.Wrap(err, "manifest")
}
return applyManifest(c.cc, master, m)
}
// CIDR returns the default CIDR used by this CNI
func (c Custom) CIDR() string {
return DefaultPodCIDR
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"github.com/golang/glog"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
)
// Disabled is a CNI manager than does nothing
type Disabled struct {
cc config.ClusterConfig
}
// String returns a string representation
func (c Disabled) String() string {
return "Disabled"
}
// Apply enables the CNI
func (c Disabled) Apply(master Runner, nodes []Runner) error {
if driver.IsKIC(c.cc.Driver) && c.cc.KubernetesConfig.ContainerRuntime != "docker" {
glog.Warningf("CNI is recommended for %q driver and %q runtime - expect networking issues", c.cc.Driver, c.cc.KubernetesConfig.ContainerRuntime)
}
if len(c.cc.Nodes) > 1 {
glog.Warningf("CNI is recommended for multi-node clusters - expect networking issues")
}
return nil
}
// CIDR returns the default CIDR used by this CNI
func (c Disabled) CIDR() string {
return ""
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"k8s.io/minikube/pkg/minikube/config"
)
// From https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
var flannelTmpl = `---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
`
// Flannel is the Flannel CNI manager
type Flannel struct {
cc config.ClusterConfig
}
// String returns a string representation of this CNI
func (c Flannel) String() string {
return "Flannel"
}
// Apply enables the CNI
func (c Flannel) Apply(master Runner, nodes []Runner) error {
return applyManifest(c.cc, master, manifestAsset([]byte(flannelTmpl)))
}
// CIDR returns the default CIDR used by this CNI
func (c Flannel) CIDR() string {
return DefaultPodCIDR
}
/*
Copyright 2018 The Kubernetes Authors All rights reserved.
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
......@@ -14,39 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeadm
package cni
import "html/template"
import (
"bytes"
"text/template"
// defaultCNIConfig is the CNI config which is provisioned when --enable-default-cni
// has been passed to `minikube start`.
//
// The config is being written to /etc/cni/net.d/k8s.conf.
const defaultCNIConfig = `
{
"cniVersion": "0.3.0",
"name": "rkt.kubernetes.io",
"type": "bridge",
"bridge": "mybridge",
"mtu": 1460,
"addIf": "true",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.1.0.0/16",
"gateway": "10.1.0.1",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
}
`
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
)
// kicCNIConfig is the cni plugin needed for kic uses cni plugin created by kind https://github.com/kubernetes-sigs/kind/blob/03a4b519067dc308308cce735065c47a6fda1583/pkg/build/node/cni.go
var kicCNIConfig = template.Must(template.New("kubeletServiceTemplate").Parse(`---
var kindNetManifest = template.Must(template.New("kindnet").Parse(`---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
......@@ -125,7 +105,7 @@ spec:
fieldRef:
fieldPath: status.podIP
- name: POD_SUBNET
value: 10.244.0.0/16
value: {{.PodCIDR}}
volumeMounts:
- name: cni-cfg
mountPath: /etc/cni/net.d
......@@ -160,3 +140,42 @@ spec:
---
`))
// KindNet is the KindNet CNI manager
type KindNet struct {
cc config.ClusterConfig
}
// String returns a string representation of this CNI
func (c KindNet) String() string {
return "CNI"
}
// manifest returns a Kubernetes manifest for a CNI
func (c KindNet) manifest() (assets.CopyableFile, error) {
input := &tmplInput{
DefaultRoute: "0.0.0.0/0", // assumes IPv4
PodCIDR: DefaultPodCIDR,
ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository),
}
b := bytes.Buffer{}
if err := kindNetManifest.Execute(&b, input); err != nil {
return nil, err
}
return manifestAsset(b.Bytes()), nil
}
// Apply enables the CNI
func (c KindNet) Apply(master Runner, nodes []Runner) error {
m, err := c.manifest()
if err != nil {
return errors.Wrap(err, "manifest")
}
return applyManifest(c.cc, master, m)
}
// CIDR returns the default CIDR used by this CNI
func (c KindNet) CIDR() string {
return DefaultPodCIDR
}
......@@ -202,18 +202,6 @@ func (c *simpleConfigLoader) WriteConfigToFile(profileName string, cc *ClusterCo
return ioutil.WriteFile(path, contents, 0644)
}
// MultiNodeCNIConfig add default CNI config needed for multinode clusters and saves off the config
func MultiNodeCNIConfig(cc *ClusterConfig) error {
if cc.KubernetesConfig.ExtraOptions.Get("pod-network-cidr", "kubeadm") == "" {
cc.KubernetesConfig.NetworkPlugin = "cni"
if err := cc.KubernetesConfig.ExtraOptions.Set(fmt.Sprintf("kubeadm.pod-network-cidr=%s", DefaultPodCIDR)); err != nil {
return err
}
return SaveProfile(cc.Name, cc)
}
return nil
}
// MultiNode returns true if the cluster has multiple nodes or if the request is asking for multinode
func MultiNode(cc ClusterConfig) bool {
if len(cc.Nodes) > 1 {
......
......@@ -137,12 +137,6 @@ func SaveNode(cfg *ClusterConfig, node *Node) error {
cfg.Nodes = append(cfg.Nodes, *node)
}
if MultiNode(*cfg) {
if err := MultiNodeCNIConfig(cfg); err != nil {
return err
}
}
return SaveProfile(viper.GetString(ProfileName), cfg)
}
......
......@@ -22,14 +22,6 @@ import (
"github.com/blang/semver"
)
const (
// DefaultNetwork is the Docker default bridge network named "bridge"
// (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
DefaultNetwork = "bridge"
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
DefaultPodCIDR = "10.244.0.0/16"
)
// Profile represents a minikube profile
type Profile struct {
Name string
......@@ -96,7 +88,9 @@ type KubernetesConfig struct {
ExtraOptions ExtraOptionSlice
ShouldLoadCachedImages bool
EnableDefaultCNI bool
EnableDefaultCNI bool // deprecated in preference to CNI
CNI string // CNI to use
// We need to keep these in the short term for backwards compatibility
NodeIP string
......
......@@ -157,11 +157,6 @@ func (r *Containerd) SocketPath() string {
return "/run/containerd/containerd.sock"
}
// DefaultCNI returns whether to use CNI networking by default
func (r *Containerd) DefaultCNI() bool {
return true
}
// Active returns if containerd is active on the host
func (r *Containerd) Active() bool {
return r.Init.Active("containerd")
......
......@@ -89,11 +89,6 @@ func (r *CRIO) SocketPath() string {
return "/var/run/crio/crio.sock"
}
// DefaultCNI returns whether to use CNI networking by default
func (r *CRIO) DefaultCNI() bool {
return true
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *CRIO) Available() error {
c := exec.Command("which", "crio")
......
......@@ -79,8 +79,6 @@ type Manager interface {
KubeletOptions() map[string]string
// SocketPath returns the path to the socket file for a given runtime
SocketPath() string
// DefaultCNI returns whether to use CNI networking by default
DefaultCNI() bool
// Load an image idempotently into the runtime on a host
LoadImage(string) error
......
......@@ -86,11 +86,6 @@ func (r *Docker) SocketPath() string {
return r.Socket
}
// DefaultCNI returns whether to use CNI networking by default
func (r *Docker) DefaultCNI() bool {
return false
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *Docker) Available() error {
_, err := exec.LookPath("docker")
......
......@@ -169,9 +169,6 @@ func FlagDefaults(name string) FlagHints {
fh := FlagHints{}
if name != None {
fh.CacheImages = true
if name == Docker {
fh.ExtraOptions = append(fh.ExtraOptions, fmt.Sprintf("kubeadm.pod-network-cidr=%s", config.DefaultPodCIDR))
}
return fh
}
......
......@@ -39,6 +39,7 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/cni"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
......@@ -153,17 +154,18 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
prepareNone()
}
glog.Infof("Will wait %s for node ...", waitTimeout)
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
return nil, errors.Wrap(err, "Wait failed")
return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout))
}
} else {
if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil {
return nil, errors.Wrap(err, "Updating node")
return nil, errors.Wrap(err, "update node")
}
// Make sure to use the command runner for the control plane to generate the join token
cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
return nil, errors.Wrap(err, "getting control plane bootstrapper")
}
......@@ -176,8 +178,18 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil {
return nil, errors.Wrap(err, "joining cluster")
}
cnm, err := cni.New(*starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "cni")
}
if err := cnm.Apply(cpr, []cni.Runner{cpr, starter.Runner}); err != nil {
return nil, errors.Wrap(err, "cni apply")
}
}
glog.Infof("waiting for startup goroutines ...")
wg.Wait()
// Write enabled addons to the config before completion
......
......@@ -122,6 +122,7 @@ var styles = map[StyleEnum]style{
Unmount: {Prefix: "🔥 "},
VerifyingNoLine: {Prefix: "🤔 ", OmitNewline: true},
Verifying: {Prefix: "🤔 "},
CNI: {Prefix: "🔗 "},
}
// Add a prefix to a string
......
......@@ -93,4 +93,5 @@ const (
WaitingPods
Warning
Workaround
CNI
)
......@@ -29,6 +29,7 @@ minikube start [flags]
--auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true)
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438")
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
--cni string CNI plug-in to use. Valid options: auto, calico, custom, flannel, kindnet (default: auto)
--container-runtime string The container runtime to be used (docker, crio, containerd). (default "docker")
--cpus int Number of CPUs allocated to Kubernetes. (default 2)
--cri-socket string The cri socket path to be used.
......@@ -43,7 +44,7 @@ minikube start [flags]
--driver string Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system.
--dry-run dry-run mode. Validates configuration, but does not mutate system state
--embed-certs if true, will embed the certs in kubeconfig.
--enable-default-cni Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with "--network-plugin=cni".
--enable-default-cni DEPRECATED: Replaced by --cni=custom
--extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components.
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler
......@@ -77,7 +78,7 @@ minikube start [flags]
--mount-string string The argument to pass the minikube mount command on start.
--nat-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio")
--native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true)
--network-plugin string The name of the network plugin.
--network-plugin string Kubelet network plug-in to use (default: auto)
--nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only)
--nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares")
--no-vtx-check Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)
......
......@@ -107,7 +107,6 @@ func TestFunctional(t *testing.T) {
{"ComponentHealth", validateComponentHealth},
{"ConfigCmd", validateConfigCmd},
{"DashboardCmd", validateDashboardCmd},
{"DNS", validateDNS},
{"DryRun", validateDryRun},
{"StatusCmd", validateStatusCmd},
{"LogsCmd", validateLogsCmd},
......@@ -419,36 +418,6 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
}
}
// validateDNS asserts that all Kubernetes DNS is healthy
func validateDNS(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml")))
if err != nil {
t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err)
}
names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4))
if err != nil {
t.Fatalf("failed waiting for busybox pod : %v", err)
}
nslookup := func() error {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "nslookup", "kubernetes.default"))
return err
}
// If the coredns process was stable, this retry wouldn't be necessary.
if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil {
t.Errorf("failed to do nslookup on kubernetes.default: %v", err)
}
want := []byte("10.96.0.1")
if !bytes.Contains(rr.Stdout.Bytes(), want) {
t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
}
}
// validateDryRun asserts that the dry-run mode quickly exits with the right code
func validateDryRun(ctx context.Context, t *testing.T, profile string) {
// dry-run mode should always be able to finish quickly (<5s)
......
......@@ -167,6 +167,7 @@ func (ss *StartSession) Stop(t *testing.T) {
func Cleanup(t *testing.T, profile string, cancel context.CancelFunc) {
// No helper because it makes the call log confusing.
if *cleanup {
t.Logf("Cleaning up %q profile ...", profile)
_, err := Run(t, exec.Command(Target(), "delete", "-p", profile))
if err != nil {
t.Logf("failed cleanup: %v", err)
......
// +build integration
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"bytes"
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/util/retry"
)
func TestNetworkPlugins(t *testing.T) {
MaybeParallel(t)
t.Run("group", func(t *testing.T) {
tests := []struct {
name string
args []string
kubeletPlugin string
podLabel string
hairpin bool
}{
{"auto", []string{}, "", "", false},
{"kubenet", []string{"--network-plugin=kubenet"}, "kubenet", "", true},
{"bridge", []string{"--cni=bridge"}, "cni", "", true},
{"enable-default-cni", []string{"--enable-default-cni=true"}, "cni", "", true},
{"flannel", []string{"--cni=flannel"}, "cni", "app=flannel", true},
{"kindnet", []string{"--cni=kindnet"}, "cni", "app=kindnet", true},
{"false", []string{"--cni=false"}, "", "", false},
{"custom-weave", []string{fmt.Sprintf("--cni=%s", filepath.Join(*testdataDir, "weavenet.yaml"))}, "cni", "", true},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
start := time.Now()
MaybeParallel(t)
profile := UniqueProfileName(tc.name)
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer Cleanup(t, profile, cancel)
startArgs := append([]string{"start", "-p", profile, "--memory=1800", "--alsologtostderr", "--wait=true", "--wait-timeout=20m"}, tc.args...)
startArgs = append(startArgs, StartArgs()...)
t.Run("Start", func(t *testing.T) {
_, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed start: %v", err)
}
})
if !t.Failed() && tc.podLabel != "" {
t.Run("ControllerPod", func(t *testing.T) {
if _, err := PodWait(ctx, t, profile, "kube-system", tc.podLabel, Minutes(8)); err != nil {
t.Fatalf("failed waiting for %s labeled pod: %v", tc.podLabel, err)
}
})
}
if !t.Failed() {
t.Run("KubeletFlags", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "pgrep -a kubelet"))
if err != nil {
t.Fatalf("ssh failed: %v", err)
}
out := rr.Stdout.String()
if tc.kubeletPlugin == "" {
if strings.Contains(out, "--network-plugin") {
t.Errorf("expected no network plug-in, got %s", out)
}
} else {
if !strings.Contains(out, fmt.Sprintf("--network-plugin=%s", tc.kubeletPlugin)) {
t.Errorf("expected --network-plugin=%s, got %s", tc.kubeletPlugin, out)
}
}
})
}
if !t.Failed() {
t.Run("NetCatPod", func(t *testing.T) {
_, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "netcat-deployment.yaml")))
if err != nil {
t.Errorf("failed to apply netcat manifest: %v", err)
}
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
}
if err := kapi.WaitForDeploymentToStabilize(client, "default", "netcat", Minutes(12)); err != nil {
t.Errorf("failed waiting for netcat deployment to stabilize: %v", err)
}
if _, err := PodWait(ctx, t, profile, "default", "app=netcat", Minutes(12)); err != nil {
t.Fatalf("failed waiting for netcat pod: %v", err)
}
})
}
if !t.Failed() {
t.Run("DNS", func(t *testing.T) {
var rr *RunResult
var err error
nslookup := func() error {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "nslookup", "kubernetes.default"))
return err
}
// If the coredns process was stable, this retry wouldn't be necessary.
if err := retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil {
t.Errorf("failed to do nslookup on kubernetes.default: %v", err)
}
want := []byte("10.96.0.1")
if !bytes.Contains(rr.Stdout.Bytes(), want) {
t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
}
})
}
if !t.Failed() {
t.Run("Localhost", func(t *testing.T) {
tryLocal := func() error {
_, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "/bin/sh", "-c", "nc -w 5 -i 5 -z localhost 8080"))
return err
}
if err := retry.Expo(tryLocal, 1*time.Second, Seconds(30)); err != nil {
t.Errorf("failed to connect via localhost: %v", err)
}
})
}
if !t.Failed() {
t.Run("HairPin", func(t *testing.T) {
tryHairPin := func() error {
_, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", "deployment/netcat", "--", "/bin/sh", "-c", "nc -w 5 -i 5 -z netcat 8080"))
return err
}
if tc.hairpin {
if err := retry.Expo(tryHairPin, 1*time.Second, Seconds(30)); err != nil {
t.Errorf("failed to connect via pod host: %v", err)
}
} else {
if tryHairPin() == nil {
t.Fatalf("hairpin connection unexpectedly succeeded - misconfigured test?")
}
}
})
}
t.Logf("%q test finished in %s, failed=%v", tc.name, time.Since(start), t.Failed())
})
}
})
}
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册