提交 44c8dd52 编写于 作者: T tstromberg

Add --wait flag to allow users to skip pod health checks

上级 71ab89df
...@@ -102,6 +102,7 @@ const ( ...@@ -102,6 +102,7 @@ const (
downloadOnly = "download-only" downloadOnly = "download-only"
dnsProxy = "dns-proxy" dnsProxy = "dns-proxy"
hostDNSResolver = "host-dns-resolver" hostDNSResolver = "host-dns-resolver"
waitUntilHealthy = "wait"
) )
var ( var (
...@@ -163,6 +164,7 @@ func init() { ...@@ -163,6 +164,7 @@ func init() {
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).") startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).")
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox)") startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox)")
startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox)") startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox)")
startCmd.Flags().Bool(waitUntilHealthy, true, "Wait until Kubernetes core services are healthy before exiting")
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox)") startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox)")
if err := viper.BindPFlags(startCmd.Flags()); err != nil { if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.WithError("unable to bind flags", err) exit.WithError("unable to bind flags", err)
...@@ -273,8 +275,10 @@ func runStart(cmd *cobra.Command, args []string) { ...@@ -273,8 +275,10 @@ func runStart(cmd *cobra.Command, args []string) {
prepareNone() prepareNone()
} }
if err := bs.WaitCluster(config.KubernetesConfig); err != nil { if viper.GetBool(waitUntilHealthy) {
exit.WithError("Wait failed", err) if err := bs.WaitCluster(config.KubernetesConfig); err != nil {
exit.WithError("Wait failed", err)
}
} }
showKubectlConnectInfo(kubeconfig) showKubectlConnectInfo(kubeconfig)
...@@ -310,7 +314,11 @@ func showKubectlConnectInfo(kubeconfig *pkgutil.KubeConfigSetup) { ...@@ -310,7 +314,11 @@ func showKubectlConnectInfo(kubeconfig *pkgutil.KubeConfigSetup) {
if kubeconfig.KeepContext { if kubeconfig.KeepContext {
console.OutT(console.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", console.Arg{"name": kubeconfig.ClusterName}) console.OutT(console.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", console.Arg{"name": kubeconfig.ClusterName})
} else { } else {
console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()}) if !viper.GetBool(waitUntilHealthy) {
console.OutT(console.Ready, "kubectl has been configured configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()})
} else {
console.OutT(console.Ready, "Done! kubectl is now configured to use {{.name}}", console.Arg{"name": cfg.GetMachineName()})
}
} }
_, err := exec.LookPath("kubectl") _, err := exec.LookPath("kubectl")
if err != nil { if err != nil {
......
...@@ -76,8 +76,8 @@ func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error) ...@@ -76,8 +76,8 @@ func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error)
func testDashboard(t *testing.T) { func testDashboard(t *testing.T) {
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
cmd, out := minikubeRunner.RunDaemon("dashboard --url") cmd, out := mk.RunDaemon("dashboard --url")
defer func() { defer func() {
err := cmd.Process.Kill() err := cmd.Process.Kill()
if err != nil { if err != nil {
...@@ -121,10 +121,10 @@ func testDashboard(t *testing.T) { ...@@ -121,10 +121,10 @@ func testDashboard(t *testing.T) {
func testIngressController(t *testing.T) { func testIngressController(t *testing.T) {
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
kubectlRunner := util.NewKubectlRunner(t) kubectlRunner := util.NewKubectlRunner(t)
minikubeRunner.RunCommand("addons enable ingress", true) mk.RunCommand("addons enable ingress", true)
if err := util.WaitForIngressControllerRunning(t); err != nil { if err := util.WaitForIngressControllerRunning(t); err != nil {
t.Fatalf("waiting for ingress-controller to be up: %v", err) t.Fatalf("waiting for ingress-controller to be up: %v", err)
} }
...@@ -154,7 +154,7 @@ func testIngressController(t *testing.T) { ...@@ -154,7 +154,7 @@ func testIngressController(t *testing.T) {
checkIngress := func() error { checkIngress := func() error {
expectedStr := "Welcome to nginx!" expectedStr := "Welcome to nginx!"
runCmd := fmt.Sprintf("curl http://127.0.0.1:80 -H 'Host: nginx.example.com'") runCmd := fmt.Sprintf("curl http://127.0.0.1:80 -H 'Host: nginx.example.com'")
sshCmdOutput, _ := minikubeRunner.SSH(runCmd) sshCmdOutput, _ := mk.SSH(runCmd)
if !strings.Contains(sshCmdOutput, expectedStr) { if !strings.Contains(sshCmdOutput, expectedStr) {
return fmt.Errorf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput) return fmt.Errorf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput)
} }
...@@ -172,15 +172,15 @@ func testIngressController(t *testing.T) { ...@@ -172,15 +172,15 @@ func testIngressController(t *testing.T) {
} }
} }
}() }()
minikubeRunner.RunCommand("addons disable ingress", true) mk.RunCommand("addons disable ingress", true)
} }
func testServicesList(t *testing.T) { func testServicesList(t *testing.T) {
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
checkServices := func() error { checkServices := func() error {
output := minikubeRunner.RunCommand("service list", false) output := mk.RunCommand("service list", false)
if !strings.Contains(output, "kubernetes") { if !strings.Contains(output, "kubernetes") {
return fmt.Errorf("Error, kubernetes service missing from output %s", output) return fmt.Errorf("Error, kubernetes service missing from output %s", output)
} }
...@@ -192,8 +192,8 @@ func testServicesList(t *testing.T) { ...@@ -192,8 +192,8 @@ func testServicesList(t *testing.T) {
} }
func testGvisor(t *testing.T) { func testGvisor(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
minikubeRunner.RunCommand("addons enable gvisor", true) mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up") t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil { if err := util.WaitForGvisorControllerRunning(t); err != nil {
...@@ -208,7 +208,7 @@ func testGvisor(t *testing.T) { ...@@ -208,7 +208,7 @@ func testGvisor(t *testing.T) {
} }
t.Log("disabling gvisor addon") t.Log("disabling gvisor addon")
minikubeRunner.RunCommand("addons disable gvisor", true) mk.RunCommand("addons disable gvisor", true)
t.Log("waiting for gvisor controller pod to be deleted") t.Log("waiting for gvisor controller pod to be deleted")
if err := util.WaitForGvisorControllerDeleted(); err != nil { if err := util.WaitForGvisorControllerDeleted(); err != nil {
t.Fatalf("waiting for gvisor controller to be deleted: %v", err) t.Fatalf("waiting for gvisor controller to be deleted: %v", err)
...@@ -224,9 +224,9 @@ func testGvisor(t *testing.T) { ...@@ -224,9 +224,9 @@ func testGvisor(t *testing.T) {
} }
func testGvisorRestart(t *testing.T) { func testGvisorRestart(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
minikubeRunner.EnsureRunning() mk.EnsureRunning()
minikubeRunner.RunCommand("addons enable gvisor", true) mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up") t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil { if err := util.WaitForGvisorControllerRunning(t); err != nil {
...@@ -234,10 +234,10 @@ func testGvisorRestart(t *testing.T) { ...@@ -234,10 +234,10 @@ func testGvisorRestart(t *testing.T) {
} }
// TODO: @priyawadhwa to add test for stop as well // TODO: @priyawadhwa to add test for stop as well
minikubeRunner.RunCommand("delete", false) mk.RunCommand("delete", false)
minikubeRunner.CheckStatus(state.None.String()) mk.CheckStatus(state.None.String())
minikubeRunner.Start() mk.Start()
minikubeRunner.CheckStatus(state.Running.String()) mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up") t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil { if err := util.WaitForGvisorControllerRunning(t); err != nil {
......
...@@ -30,8 +30,7 @@ import ( ...@@ -30,8 +30,7 @@ import (
// Assert that docker-env subcommand outputs usable information for "docker ps" // Assert that docker-env subcommand outputs usable information for "docker ps"
func testClusterEnv(t *testing.T) { func testClusterEnv(t *testing.T) {
t.Parallel() t.Parallel()
r := NewMinikubeRunner(t, "--wait=false")
r := NewMinikubeRunner(t)
// Set a specific shell syntax so that we don't have to handle every possible user shell // Set a specific shell syntax so that we don't have to handle every possible user shell
envOut := r.RunCommand("docker-env --shell=bash", true) envOut := r.RunCommand("docker-env --shell=bash", true)
......
...@@ -25,9 +25,9 @@ import ( ...@@ -25,9 +25,9 @@ import (
func testClusterLogs(t *testing.T) { func testClusterLogs(t *testing.T) {
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
minikubeRunner.EnsureRunning() mk.EnsureRunning()
logsCmdOutput := minikubeRunner.GetLogs() logsCmdOutput := mk.GetLogs()
// check for # of lines or check for strings // check for # of lines or check for strings
logWords := []string{"minikube", ".go"} logWords := []string{"minikube", ".go"}
......
...@@ -25,9 +25,9 @@ import ( ...@@ -25,9 +25,9 @@ import (
func testClusterSSH(t *testing.T) { func testClusterSSH(t *testing.T) {
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
expectedStr := "hello" expectedStr := "hello"
sshCmdOutput := minikubeRunner.RunCommand("ssh echo "+expectedStr, true) sshCmdOutput := mk.RunCommand("ssh echo "+expectedStr, true)
if !strings.Contains(sshCmdOutput, expectedStr) { if !strings.Contains(sshCmdOutput, expectedStr) {
t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput) t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput)
} }
......
...@@ -27,7 +27,7 @@ import ( ...@@ -27,7 +27,7 @@ import (
) )
func TestDocker(t *testing.T) { func TestDocker(t *testing.T) {
mk := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
if strings.Contains(mk.StartArgs, "--vm-driver=none") { if strings.Contains(mk.StartArgs, "--vm-driver=none") {
t.Skip("skipping test as none driver does not bundle docker") t.Skip("skipping test as none driver does not bundle docker")
} }
...@@ -41,7 +41,7 @@ func TestDocker(t *testing.T) { ...@@ -41,7 +41,7 @@ func TestDocker(t *testing.T) {
} }
startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.Args, startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.Args,
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr --v=5") "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
stdout, stderr, err := mk.RunWithContext(ctx, startCmd) stdout, stderr, err := mk.RunWithContext(ctx, startCmd)
if err != nil { if err != nil {
t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
......
...@@ -19,6 +19,7 @@ package integration ...@@ -19,6 +19,7 @@ package integration
import ( import (
"flag" "flag"
"os" "os"
"strings"
"testing" "testing"
"k8s.io/minikube/test/integration/util" "k8s.io/minikube/test/integration/util"
...@@ -37,9 +38,9 @@ var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to min ...@@ -37,9 +38,9 @@ var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to min
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives") var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
// NewMinikubeRunner creates a new MinikubeRunner // NewMinikubeRunner creates a new MinikubeRunner
func NewMinikubeRunner(t *testing.T) util.MinikubeRunner { func NewMinikubeRunner(t *testing.T, extraArgs ...string) util.MinikubeRunner {
return util.MinikubeRunner{ return util.MinikubeRunner{
Args: *args, Args: *args + strings.Join(extraArgs, " "),
BinaryPath: *binaryPath, BinaryPath: *binaryPath,
StartArgs: *startArgs, StartArgs: *startArgs,
MountArgs: *mountArgs, MountArgs: *mountArgs,
......
...@@ -26,10 +26,10 @@ import ( ...@@ -26,10 +26,10 @@ import (
func TestISO(t *testing.T) { func TestISO(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
minikubeRunner.RunCommand("delete", false) mk.RunCommand("delete", false)
minikubeRunner.Start() mk.Start()
t.Run("permissions", testMountPermissions) t.Run("permissions", testMountPermissions)
t.Run("packages", testPackages) t.Run("packages", testPackages)
...@@ -37,14 +37,14 @@ func TestISO(t *testing.T) { ...@@ -37,14 +37,14 @@ func TestISO(t *testing.T) {
} }
func testMountPermissions(t *testing.T) { func testMountPermissions(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
// test mount permissions // test mount permissions
mountPoints := []string{"/Users", "/hosthome"} mountPoints := []string{"/Users", "/hosthome"}
perms := "drwxr-xr-x" perms := "drwxr-xr-x"
foundMount := false foundMount := false
for _, dir := range mountPoints { for _, dir := range mountPoints {
output, err := minikubeRunner.SSH(fmt.Sprintf("ls -l %s", dir)) output, err := mk.SSH(fmt.Sprintf("ls -l %s", dir))
if err != nil { if err != nil {
continue continue
} }
...@@ -59,7 +59,7 @@ func testMountPermissions(t *testing.T) { ...@@ -59,7 +59,7 @@ func testMountPermissions(t *testing.T) {
} }
func testPackages(t *testing.T) { func testPackages(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
packages := []string{ packages := []string{
"git", "git",
...@@ -73,7 +73,7 @@ func testPackages(t *testing.T) { ...@@ -73,7 +73,7 @@ func testPackages(t *testing.T) {
} }
for _, pkg := range packages { for _, pkg := range packages {
if output, err := minikubeRunner.SSH(fmt.Sprintf("which %s", pkg)); err != nil { if output, err := mk.SSH(fmt.Sprintf("which %s", pkg)); err != nil {
t.Errorf("Error finding package: %s. Error: %v. Output: %s", pkg, err, output) t.Errorf("Error finding package: %s. Error: %v. Output: %s", pkg, err, output)
} }
} }
...@@ -81,7 +81,7 @@ func testPackages(t *testing.T) { ...@@ -81,7 +81,7 @@ func testPackages(t *testing.T) {
} }
func testPersistence(t *testing.T) { func testPersistence(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
for _, dir := range []string{ for _, dir := range []string{
"/data", "/data",
...@@ -92,7 +92,7 @@ func testPersistence(t *testing.T) { ...@@ -92,7 +92,7 @@ func testPersistence(t *testing.T) {
"/var/lib/toolbox", "/var/lib/toolbox",
"/var/lib/boot2docker", "/var/lib/boot2docker",
} { } {
output, err := minikubeRunner.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir)) output, err := mk.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir))
if err != nil { if err != nil {
t.Errorf("Error checking device for %s. Error: %v", dir, err) t.Errorf("Error checking device for %s. Error: %v", dir, err)
} }
......
...@@ -43,7 +43,7 @@ func testMounting(t *testing.T) { ...@@ -43,7 +43,7 @@ func testMounting(t *testing.T) {
} }
t.Parallel() t.Parallel()
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
tempDir, err := ioutil.TempDir("", "mounttest") tempDir, err := ioutil.TempDir("", "mounttest")
if err != nil { if err != nil {
...@@ -51,8 +51,8 @@ func testMounting(t *testing.T) { ...@@ -51,8 +51,8 @@ func testMounting(t *testing.T) {
} }
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
mountCmd := getMountCmd(minikubeRunner, tempDir) mountCmd := getMountCmd(mk, tempDir)
cmd, _, _ := minikubeRunner.RunDaemon2(mountCmd) cmd, _, _ := mk.RunDaemon2(mountCmd)
defer func() { defer func() {
err := cmd.Process.Kill() err := cmd.Process.Kill()
if err != nil { if err != nil {
...@@ -99,7 +99,7 @@ func testMounting(t *testing.T) { ...@@ -99,7 +99,7 @@ func testMounting(t *testing.T) {
t.Logf("Pods appear to be running") t.Logf("Pods appear to be running")
mountTest := func() error { mountTest := func() error {
if err := verifyFiles(minikubeRunner, kubectlRunner, tempDir, podName, expected); err != nil { if err := verifyFiles(mk, kubectlRunner, tempDir, podName, expected); err != nil {
t.Fatalf(err.Error()) t.Fatalf(err.Error())
} }
...@@ -111,10 +111,10 @@ func testMounting(t *testing.T) { ...@@ -111,10 +111,10 @@ func testMounting(t *testing.T) {
} }
func getMountCmd(minikubeRunner util.MinikubeRunner, mountDir string) string { func getMountCmd(mk util.MinikubeRunner, mountDir string) string {
var mountCmd string var mountCmd string
if len(minikubeRunner.MountArgs) > 0 { if len(mk.MountArgs) > 0 {
mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", minikubeRunner.MountArgs, mountDir) mountCmd = fmt.Sprintf("mount %s %s:/mount-9p", mk.MountArgs, mountDir)
} else { } else {
mountCmd = fmt.Sprintf("mount %s:/mount-9p", mountDir) mountCmd = fmt.Sprintf("mount %s:/mount-9p", mountDir)
} }
...@@ -144,7 +144,7 @@ func waitForPods(s map[string]string) error { ...@@ -144,7 +144,7 @@ func waitForPods(s map[string]string) error {
return nil return nil
} }
func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error { func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error {
path := filepath.Join(tempDir, "frompod") path := filepath.Join(tempDir, "frompod")
out, err := ioutil.ReadFile(path) out, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
...@@ -167,7 +167,7 @@ func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.Kubectl ...@@ -167,7 +167,7 @@ func verifyFiles(minikubeRunner util.MinikubeRunner, kubectlRunner *util.Kubectl
files := []string{"fromhost", "frompod"} files := []string{"fromhost", "frompod"}
for _, file := range files { for _, file := range files {
statCmd := fmt.Sprintf("stat /mount-9p/%s", file) statCmd := fmt.Sprintf("stat /mount-9p/%s", file)
statOutput, err := minikubeRunner.SSH(statCmd) statOutput, err := mk.SSH(statCmd)
if err != nil { if err != nil {
return fmt.Errorf("Unable to stat %s via SSH. error %v, %s", file, err, statOutput) return fmt.Errorf("Unable to stat %s via SSH. error %v, %s", file, err, statOutput)
} }
......
...@@ -30,11 +30,11 @@ import ( ...@@ -30,11 +30,11 @@ import (
) )
func TestPersistence(t *testing.T) { func TestPersistence(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t, "--wait=false")
if strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") { if strings.Contains(mk.StartArgs, "--vm-driver=none") {
t.Skip("skipping test as none driver does not support persistence") t.Skip("skipping test as none driver does not support persistence")
} }
minikubeRunner.EnsureRunning() mk.EnsureRunning()
kubectlRunner := util.NewKubectlRunner(t) kubectlRunner := util.NewKubectlRunner(t)
curdir, err := filepath.Abs("") curdir, err := filepath.Abs("")
...@@ -59,19 +59,19 @@ func TestPersistence(t *testing.T) { ...@@ -59,19 +59,19 @@ func TestPersistence(t *testing.T) {
verify(t) verify(t)
// Now restart minikube and make sure the pod is still there. // Now restart minikube and make sure the pod is still there.
// minikubeRunner.RunCommand("stop", true) // mk.RunCommand("stop", true)
// minikubeRunner.CheckStatus("Stopped") // mk.CheckStatus("Stopped")
checkStop := func() error { checkStop := func() error {
minikubeRunner.RunCommand("stop", true) mk.RunCommand("stop", true)
return minikubeRunner.CheckStatusNoFail(state.Stopped.String()) return mk.CheckStatusNoFail(state.Stopped.String())
} }
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil { if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err) t.Fatalf("timed out while checking stopped status: %v", err)
} }
minikubeRunner.Start() mk.Start()
minikubeRunner.CheckStatus(state.Running.String()) mk.CheckStatus(state.Running.String())
// Make sure the same things come up after we've restarted. // Make sure the same things come up after we've restarted.
verify(t) verify(t)
......
...@@ -71,7 +71,7 @@ func TestProxy(t *testing.T) { ...@@ -71,7 +71,7 @@ func TestProxy(t *testing.T) {
t.Fatalf("Failed to set up the test proxy: %s", err) t.Fatalf("Failed to set up the test proxy: %s", err)
} }
// making sure there is no running miniukube to avoid https://github.com/kubernetes/minikube/issues/4132 // making sure there is no running minikube to avoid https://github.com/kubernetes/minikube/issues/4132
r := NewMinikubeRunner(t) r := NewMinikubeRunner(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel() defer cancel()
...@@ -109,7 +109,7 @@ func TestProxy(t *testing.T) { ...@@ -109,7 +109,7 @@ func TestProxy(t *testing.T) {
// testProxyWarning checks user is warned correctly about the proxy related env vars // testProxyWarning checks user is warned correctly about the proxy related env vars
func testProxyWarning(t *testing.T) { func testProxyWarning(t *testing.T) {
r := NewMinikubeRunner(t) r := NewMinikubeRunner(t, "--wait=false")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel() defer cancel()
startCmd := fmt.Sprintf("start %s %s %s", r.StartArgs, r.Args, "--alsologtostderr --v=5") startCmd := fmt.Sprintf("start %s %s %s", r.StartArgs, r.Args, "--alsologtostderr --v=5")
...@@ -131,8 +131,8 @@ func testProxyWarning(t *testing.T) { ...@@ -131,8 +131,8 @@ func testProxyWarning(t *testing.T) {
// testProxyDashboard checks if dashboard URL is accessible if proxy is set // testProxyDashboard checks if dashboard URL is accessible if proxy is set
func testProxyDashboard(t *testing.T) { func testProxyDashboard(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t) mk := NewMinikubeRunner(t)
cmd, out := minikubeRunner.RunDaemon("dashboard --url") cmd, out := mk.RunDaemon("dashboard --url")
defer func() { defer func() {
err := cmd.Process.Kill() err := cmd.Process.Kill()
if err != nil { if err != nil {
......
...@@ -208,7 +208,7 @@ func (m *MinikubeRunner) SSH(command string) (string, error) { ...@@ -208,7 +208,7 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
return string(stdout), nil return string(stdout), nil
} }
// Start starts the container runtime // Start starts the cluster
func (m *MinikubeRunner) Start(opts ...string) { func (m *MinikubeRunner) Start(opts ...string) {
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " ")) cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " "))
m.RunCommand(cmd, true) m.RunCommand(cmd, true)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册