From 44c8dd52a8b7a7573d1649d42450c7b61b0b3478 Mon Sep 17 00:00:00 2001 From: tstromberg Date: Fri, 12 Jul 2019 11:50:57 -0700 Subject: [PATCH] Add --wait flag to allow users to skip pod health checks --- cmd/minikube/cmd/start.go | 14 ++++++++--- test/integration/addons_test.go | 36 +++++++++++++-------------- test/integration/cluster_env_test.go | 3 +-- test/integration/cluster_logs_test.go | 6 ++--- test/integration/cluster_ssh_test.go | 4 +-- test/integration/docker_test.go | 4 +-- test/integration/flags.go | 5 ++-- test/integration/iso_test.go | 18 +++++++------- test/integration/mount_test.go | 18 +++++++------- test/integration/persistence_test.go | 18 +++++++------- test/integration/proxy_test.go | 8 +++--- test/integration/util/util.go | 2 +- 12 files changed, 72 insertions(+), 64 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 7920d575b..b54583e30 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -102,6 +102,7 @@ const ( downloadOnly = "download-only" dnsProxy = "dns-proxy" hostDNSResolver = "host-dns-resolver" + waitUntilHealthy = "wait" ) var ( @@ -163,6 +164,7 @@ func init() { startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).") startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox)") startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox)") + startCmd.Flags().Bool(waitUntilHealthy, true, "Wait until Kubernetes core services are healthy before exiting") startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox)") if err := viper.BindPFlags(startCmd.Flags()); err != nil { exit.WithError("unable to bind flags", err) @@ -273,8 +275,10 @@ func runStart(cmd *cobra.Command, args []string) { prepareNone() } - if err := bs.WaitCluster(config.KubernetesConfig); err != nil { - exit.WithError("Wait failed", err) + if viper.GetBool(waitUntilHealthy) { + if err := bs.WaitCluster(config.KubernetesConfig); err != nil { + exit.WithError("Wait failed", err) + } } showKubectlConnectInfo(kubeconfig) @@ -310,7 +314,11 @@ func showKubectlConnectInfo(kubeconfig *pkgutil.KubeConfigSetup) { if kubeconfig.KeepContext { console.OutT(console.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", console.Arg{"name": kubeconfig.ClusterName}) } else { - console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()}) + if !viper.GetBool(waitUntilHealthy) { + console.OutT(console.Ready, "kubectl has been configured configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()}) + } else { + console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()}) + } } _, err := exec.LookPath("kubectl") if err != nil { diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 85b56a1c7..dfcbc9605 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -76,8 +76,8 @@ func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error) func testDashboard(t *testing.T) { t.Parallel() - minikubeRunner := NewMinikubeRunner(t) - cmd, out := minikubeRunner.RunDaemon("dashboard --url") + mk := NewMinikubeRunner(t, "--wait=false") + cmd, out := mk.RunDaemon("dashboard --url") defer func() { err := cmd.Process.Kill() if err != nil { @@ -121,10 +121,10 @@ func testDashboard(t *testing.T) { func testIngressController(t *testing.T) { t.Parallel() - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t) kubectlRunner := util.NewKubectlRunner(t) - minikubeRunner.RunCommand("addons enable ingress", true) + mk.RunCommand("addons enable ingress", true) if err := util.WaitForIngressControllerRunning(t); err != nil { t.Fatalf("waiting for ingress-controller to be up: %v", err) } @@ -154,7 +154,7 @@ func testIngressController(t *testing.T) { checkIngress := func() error { expectedStr := "Welcome to nginx!" runCmd := fmt.Sprintf("curl http://127.0.0.1:80 -H 'Host: nginx.example.com'") - sshCmdOutput, _ := minikubeRunner.SSH(runCmd) + sshCmdOutput, _ := mk.SSH(runCmd) if !strings.Contains(sshCmdOutput, expectedStr) { return fmt.Errorf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput) } @@ -172,15 +172,15 @@ func testIngressController(t *testing.T) { } } }() - minikubeRunner.RunCommand("addons disable ingress", true) + mk.RunCommand("addons disable ingress", true) } func testServicesList(t *testing.T) { t.Parallel() - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t) checkServices := func() error { - output := minikubeRunner.RunCommand("service list", false) + output := mk.RunCommand("service list", false) if !strings.Contains(output, "kubernetes") { return fmt.Errorf("Error, kubernetes service missing from output %s", output) } @@ -192,8 +192,8 @@ func testServicesList(t *testing.T) { } func testGvisor(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) - minikubeRunner.RunCommand("addons enable gvisor", true) + mk := NewMinikubeRunner(t) + mk.RunCommand("addons enable gvisor", true) t.Log("waiting for gvisor controller to come up") if err := util.WaitForGvisorControllerRunning(t); err != nil { @@ -208,7 +208,7 @@ func testGvisor(t *testing.T) { } t.Log("disabling gvisor addon") - minikubeRunner.RunCommand("addons disable gvisor", true) + mk.RunCommand("addons disable gvisor", true) t.Log("waiting for gvisor controller pod to be deleted") if err := util.WaitForGvisorControllerDeleted(); err != nil { t.Fatalf("waiting for gvisor controller to be deleted: %v", err) @@ -224,9 +224,9 @@ func testGvisor(t *testing.T) { } func testGvisorRestart(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) - minikubeRunner.EnsureRunning() - minikubeRunner.RunCommand("addons enable gvisor", true) + mk := NewMinikubeRunner(t) + mk.EnsureRunning() + mk.RunCommand("addons enable gvisor", true) t.Log("waiting for gvisor controller to come up") if err := util.WaitForGvisorControllerRunning(t); err != nil { @@ -234,10 +234,10 @@ func testGvisorRestart(t *testing.T) { } // TODO: @priyawadhwa to add test for stop as well - minikubeRunner.RunCommand("delete", false) - minikubeRunner.CheckStatus(state.None.String()) - minikubeRunner.Start() - minikubeRunner.CheckStatus(state.Running.String()) + mk.RunCommand("delete", false) + mk.CheckStatus(state.None.String()) + mk.Start() + mk.CheckStatus(state.Running.String()) t.Log("waiting for gvisor controller to come up") if err := util.WaitForGvisorControllerRunning(t); err != nil { diff --git a/test/integration/cluster_env_test.go b/test/integration/cluster_env_test.go index 132ece7d4..cf1fb101a 100644 --- a/test/integration/cluster_env_test.go +++ b/test/integration/cluster_env_test.go @@ -30,8 +30,7 @@ import ( // Assert that docker-env subcommand outputs usable information for "docker ps" func testClusterEnv(t *testing.T) { t.Parallel() - - r := NewMinikubeRunner(t) + r := NewMinikubeRunner(t, "--wait=false") // Set a specific shell syntax so that we don't have to handle every possible user shell envOut := r.RunCommand("docker-env --shell=bash", true) diff --git a/test/integration/cluster_logs_test.go b/test/integration/cluster_logs_test.go index e5675fa2f..4879d5f4d 100644 --- a/test/integration/cluster_logs_test.go +++ b/test/integration/cluster_logs_test.go @@ -25,9 +25,9 @@ import ( func testClusterLogs(t *testing.T) { t.Parallel() - minikubeRunner := NewMinikubeRunner(t) - minikubeRunner.EnsureRunning() - logsCmdOutput := minikubeRunner.GetLogs() + mk := NewMinikubeRunner(t) + mk.EnsureRunning() + logsCmdOutput := mk.GetLogs() // check for # of lines or check for strings logWords := []string{"minikube", ".go"} diff --git a/test/integration/cluster_ssh_test.go b/test/integration/cluster_ssh_test.go index 5c57f6727..246ed93e5 100644 --- a/test/integration/cluster_ssh_test.go +++ b/test/integration/cluster_ssh_test.go @@ -25,9 +25,9 @@ import ( func testClusterSSH(t *testing.T) { t.Parallel() - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") expectedStr := "hello" - sshCmdOutput := minikubeRunner.RunCommand("ssh echo "+expectedStr, true) + sshCmdOutput := mk.RunCommand("ssh echo "+expectedStr, true) if !strings.Contains(sshCmdOutput, expectedStr) { t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput) } diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index b16e1ad9a..0cd537ba2 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -27,7 +27,7 @@ import ( ) func TestDocker(t *testing.T) { - mk := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") if strings.Contains(mk.StartArgs, "--vm-driver=none") { t.Skip("skipping test as none driver does not bundle docker") } @@ -41,7 +41,7 @@ func TestDocker(t *testing.T) { } startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.Args, - "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr --v=5") + "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true") stdout, stderr, err := mk.RunWithContext(ctx, startCmd) if err != nil { t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) diff --git a/test/integration/flags.go b/test/integration/flags.go index 5a5746ac7..77bd5577b 100644 --- a/test/integration/flags.go +++ b/test/integration/flags.go @@ -19,6 +19,7 @@ package integration import ( "flag" "os" + "strings" "testing" "k8s.io/minikube/test/integration/util" @@ -37,9 +38,9 @@ var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to min var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives") // NewMinikubeRunner creates a new MinikubeRunner -func NewMinikubeRunner(t *testing.T) util.MinikubeRunner { +func NewMinikubeRunner(t *testing.T, extraArgs ...string) util.MinikubeRunner { return util.MinikubeRunner{ - Args: *args, + Args: *args + strings.Join(extraArgs, " "), BinaryPath: *binaryPath, StartArgs: *startArgs, MountArgs: *mountArgs, diff --git a/test/integration/iso_test.go b/test/integration/iso_test.go index 4a7806abf..0f7dcb6da 100644 --- a/test/integration/iso_test.go +++ b/test/integration/iso_test.go @@ -26,10 +26,10 @@ import ( func TestISO(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") - minikubeRunner.RunCommand("delete", false) - minikubeRunner.Start() + mk.RunCommand("delete", false) + mk.Start() t.Run("permissions", testMountPermissions) t.Run("packages", testPackages) @@ -37,14 +37,14 @@ func TestISO(t *testing.T) { } func testMountPermissions(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") // test mount permissions mountPoints := []string{"/Users", "/hosthome"} perms := "drwxr-xr-x" foundMount := false for _, dir := range mountPoints { - output, err := minikubeRunner.SSH(fmt.Sprintf("ls -l %s", dir)) + output, err := mk.SSH(fmt.Sprintf("ls -l %s", dir)) if err != nil { continue } @@ -59,7 +59,7 @@ func testMountPermissions(t *testing.T) { } func testPackages(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") packages := []string{ "git", @@ -73,7 +73,7 @@ func testPackages(t *testing.T) { } for _, pkg := range packages { - if output, err := minikubeRunner.SSH(fmt.Sprintf("which %s", pkg)); err != nil { + if output, err := mk.SSH(fmt.Sprintf("which %s", pkg)); err != nil { t.Errorf("Error finding package: %s. Error: %v. Output: %s", pkg, err, output) } } @@ -81,7 +81,7 @@ func testPackages(t *testing.T) { } func testPersistence(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") for _, dir := range []string{ "/data", @@ -92,7 +92,7 @@ func testPersistence(t *testing.T) { "/var/lib/toolbox", "/var/lib/boot2docker", } { - output, err := minikubeRunner.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir)) + output, err := mk.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir)) if err != nil { t.Errorf("Error checking device for %s. Error: %v", dir, err) } diff --git a/test/integration/mount_test.go b/test/integration/mount_test.go index a55d04e41..f181a2029 100644 --- a/test/integration/mount_test.go +++ b/test/integration/mount_test.go @@ -43,7 +43,7 @@ func testMounting(t *testing.T) { } t.Parallel() - minikubeRunner := NewMinikubeRunner(t) + mk := NewMinikubeRunner(t, "--wait=false") tempDir, err := ioutil.TempDir("", "mounttest") if err != nil { @@ -51,8 +51,8 @@ func testMounting(t *testing.T) { } defer os.RemoveAll(tempDir) - mountCmd := getMountCmd(minikubeRunner, tempDir) - cmd, _, _ := minikubeRunner.RunDaemon2(mountCmd) + mountCmd := getMountCmd(mk, tempDir) + cmd, _, _ := mk.RunDaemon2(mountCmd) defer func() { err := cmd.Process.Kill() if err != nil { @@ -99,7 +99,7 @@ func testMounting(t *testing.T) { t.Logf("Pods appear to be running") mountTest := func() error { - if err := verifyFiles(minikubeRunner, kubectlRunner, tempDir, podName, expected); err != nil { + if err := verifyFiles(mk, kubectlRunner, tempDir, podName, expected); err != nil { t.Fatalf(err.Error()) } @@ -111,10 +111,10 @@ func testMounting(t *testing.T) { } -func getMountCmd(minikubeRunner util.MinikubeRunner, mountDir string) string { +func getMountCmd(mk util.MinikubeRunner, mountDir string) string { var mountCmd string - if len(minikubeRunner.MountArgs) > 0 { - mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", minikubeRunner.MountArgs, mountDir) + if len(mk.MountArgs) > 0 { + mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", mk.MountArgs, mountDir) } else { mountCmd = fmt.Sprintf("mount %s:/mount-9p", mountDir) } @@ -144,7 +144,7 @@ func waitForPods(s map[string]string) error { return nil } -func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error { +func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error { path := filepath.Join(tempDir, "frompod") out, err := ioutil.ReadFile(path) if err != nil { @@ -167,7 +167,7 @@ func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.Kubectl files := []string{"fromhost", "frompod"} for _, file := range files { statCmd := fmt.Sprintf("stat /mount-9p/%s", file) - statOutput, err := minikubeRunner.SSH(statCmd) + statOutput, err := mk.SSH(statCmd) if err != nil { return fmt.Errorf("Unable to stat %s via SSH. error %v, %s", file, err, statOutput) } diff --git a/test/integration/persistence_test.go b/test/integration/persistence_test.go index 158c70668..7b6c65168 100644 --- a/test/integration/persistence_test.go +++ b/test/integration/persistence_test.go @@ -30,11 +30,11 @@ import ( ) func TestPersistence(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) - if strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") { + mk := NewMinikubeRunner(t, "--wait=false") + if strings.Contains(mk.StartArgs, "--vm-driver=none") { t.Skip("skipping test as none driver does not support persistence") } - minikubeRunner.EnsureRunning() + mk.EnsureRunning() kubectlRunner := util.NewKubectlRunner(t) curdir, err := filepath.Abs("") @@ -59,19 +59,19 @@ func TestPersistence(t *testing.T) { verify(t) // Now restart minikube and make sure the pod is still there. - // minikubeRunner.RunCommand("stop", true) - // minikubeRunner.CheckStatus("Stopped") + // mk.RunCommand("stop", true) + // mk.CheckStatus("Stopped") checkStop := func() error { - minikubeRunner.RunCommand("stop", true) - return minikubeRunner.CheckStatusNoFail(state.Stopped.String()) + mk.RunCommand("stop", true) + return mk.CheckStatusNoFail(state.Stopped.String()) } if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil { t.Fatalf("timed out while checking stopped status: %v", err) } - minikubeRunner.Start() - minikubeRunner.CheckStatus(state.Running.String()) + mk.Start() + mk.CheckStatus(state.Running.String()) // Make sure the same things come up after we've restarted. verify(t) diff --git a/test/integration/proxy_test.go b/test/integration/proxy_test.go index 7172528cb..3ef86e750 100644 --- a/test/integration/proxy_test.go +++ b/test/integration/proxy_test.go @@ -71,7 +71,7 @@ func TestProxy(t *testing.T) { t.Fatalf("Failed to set up the test proxy: %s", err) } - // making sure there is no running miniukube to avoid https://github.com/kubernetes/minikube/issues/4132 + // making sure there is no running minikube to avoid https://github.com/kubernetes/minikube/issues/4132 r := NewMinikubeRunner(t) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() @@ -109,7 +109,7 @@ func TestProxy(t *testing.T) { // testProxyWarning checks user is warned correctly about the proxy related env vars func testProxyWarning(t *testing.T) { - r := NewMinikubeRunner(t) + r := NewMinikubeRunner(t, "--wait=false") ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() startCmd := fmt.Sprintf("start %s %s %s", r.StartArgs, r.Args, "--alsologtostderr --v=5") @@ -131,8 +131,8 @@ func testProxyWarning(t *testing.T) { // testProxyDashboard checks if dashboard URL is accessible if proxy is set func testProxyDashboard(t *testing.T) { - minikubeRunner := NewMinikubeRunner(t) - cmd, out := minikubeRunner.RunDaemon("dashboard --url") + mk := NewMinikubeRunner(t) + cmd, out := mk.RunDaemon("dashboard --url") defer func() { err := cmd.Process.Kill() if err != nil { diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 08e1cebb2..69f4c7960 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -208,7 +208,7 @@ func (m *MinikubeRunner) SSH(command string) (string, error) { return string(stdout), nil } -// Start starts the container runtime +// Start starts the cluster func (m *MinikubeRunner) Start(opts ...string) { cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " ")) m.RunCommand(cmd, true) -- GitLab