// +build integration /* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package integration import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "testing" "time" "github.com/google/go-cmp/cmp" "k8s.io/minikube/pkg/minikube/localpath" "github.com/elazarl/goproxy" "github.com/hashicorp/go-retryablehttp" "github.com/otiai10/copy" "github.com/phayes/freeport" "github.com/pkg/errors" "golang.org/x/build/kubernetes/api" "k8s.io/minikube/pkg/util/retry" ) // validateFunc are for subtests that share a single setup type validateFunc func(context.Context, *testing.T, string) // TestFunctional are functionality tests which can safely share a profile in parallel func TestFunctional(t *testing.T) { profile := UniqueProfileName("functional") ctx, cancel := context.WithTimeout(context.Background(), 40*time.Minute) defer func() { p := localSyncTestPath() if err := os.Remove(p); err != nil { t.Logf("unable to remove %s: %v", p, err) } CleanupWithLogs(t, profile, cancel) }() // Serial tests t.Run("serial", func(t *testing.T) { tests := []struct { name string validator validateFunc }{ {"CopySyncFile", setupFileSync}, // Set file for the file sync test case {"StartWithProxy", validateStartWithProxy}, // Set everything else up for success {"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start" {"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up {"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy {"MinikubeKubectlCmd", validateMinikubeKubectl}, // Make sure `minikube kubectl` works } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { tc.validator(ctx, t, profile) }) } }) // Now that we are out of the woods, lets go. MaybeParallel(t) // Parallelized tests t.Run("parallel", func(t *testing.T) { tests := []struct { name string validator validateFunc }{ {"ComponentHealth", validateComponentHealth}, {"ConfigCmd", validateConfigCmd}, {"DashboardCmd", validateDashboardCmd}, {"DNS", validateDNS}, {"DryRun", validateDryRun}, {"StatusCmd", validateStatusCmd}, {"LogsCmd", validateLogsCmd}, {"MountCmd", validateMountCmd}, {"ProfileCmd", validateProfileCmd}, {"ServiceCmd", validateServiceCmd}, {"AddonsCmd", validateAddonsCmd}, {"PersistentVolumeClaim", validatePersistentVolumeClaim}, {"TunnelCmd", validateTunnelCmd}, {"SSHCmd", validateSSHCmd}, {"MySQL", validateMySQL}, {"FileSync", validateFileSync}, {"UpdateContextCmd", validateUpdateContextCmd}, {"DockerEnv", validateDockerEnv}, {"NodeLabels", validateNodeLabels}, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { MaybeParallel(t) tc.validator(ctx, t, profile) }) } }) } // validateNodeLabels checks if minikube cluster is created with correct kubernetes's node label func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output", "jsonpath={.items[0].metadata.labels}")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } var labels []string // json output: // [beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux minikube.k8s.io/commit:aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name:p1 minikube.k8s.io/updated_at:2020_02_20T12_05_35_0700 minikube.k8s.io/version:v1.7.3 kubernetes.io/arch:amd64 kubernetes.io/hostname:p1 kubernetes.io/os:linux node-role.kubernetes.io/master:] err = json.Unmarshal(rr.Stdout.Bytes(), &labels) if err != nil { t.Errorf("%s umarshaling node label from json failed: %v", rr.Args, err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { found := false for _, l := range labels { if strings.Contains(l, el) { found = true break } } if !found { t.Errorf("Failed to have label %q in node labels %+v", expectedLabels, labels) } } } // check functionality of minikube after evaling docker-env func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { mctx, cancel := context.WithTimeout(ctx, 13*time.Second) defer cancel() // we should be able to get minikube status with a bash which evaled docker-env c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile) rr, err := Run(t, c) if err != nil { t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err) } if !strings.Contains(rr.Output(), "Running") { t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output()) } mctx, cancel = context.WithTimeout(ctx, 13*time.Second) defer cancel() // do a eval $(minikube -p profile docker-env) and check if we are point to docker inside minikube c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { t.Fatalf("Failed to test eval docker-evn %s", err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" if !strings.Contains(rr.Output(), expectedImgInside) { t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output()) } } func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { srv, err := startHTTPProxy(t) if err != nil { t.Fatalf("Failed to set up the test proxy: %s", err) } // Use more memory so that we may reliably fit MySQL and nginx startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr)) env = append(env, "NO_PROXY=") c.Env = env rr, err := Run(t, c) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } want := "Found network options:" if !strings.Contains(rr.Stdout.String(), want) { t.Errorf("start stdout=%s, want: *%s*", rr.Stdout.String(), want) } want = "You appear to be using a proxy" if !strings.Contains(rr.Stderr.String(), want) { t.Errorf("start stderr=%s, want: *%s*", rr.Stderr.String(), want) } } // validateKubeContext asserts that kubectl is properly configured (race-condition prone!) func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), profile) { t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile) } } // validateKubectlGetPods asserts that `kubectl get pod -A` returns non-zero content func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if rr.Stderr.String() != "" { t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), "kube-system") { t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout) } } // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { kubectlArgs := []string{"kubectl", "--", "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } } // validateComponentHealth asserts that all Kubernetes components are healthy func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { t.Fatalf("decode: %v", err) } for _, i := range cs.Items { status := api.ConditionFalse for _, c := range i.Conditions { if c.Type != api.ComponentHealthy { continue } status = c.Status } if status != api.ConditionTrue { t.Errorf("unexpected status: %v - item: %+v", status, i) } } } func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) if !match { t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if _, ok := jsonObject["Host"]; !ok { t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") } } // validateDashboardCmd asserts that the dashboard command works func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("%s failed: %v", args, err) } defer func() { ss.Stop(t) }() start := time.Now() s, err := ReadLineWithTimeout(ss.Stdout, 300*time.Second) if err != nil { if runtime.GOOS == "windows" { t.Skipf("failed to read url within %s: %v\noutput: %q\n", time.Since(start), err, s) } t.Fatalf("failed to read url within %s: %v\noutput: %q\n", time.Since(start), err, s) } u, err := url.Parse(strings.TrimSpace(s)) if err != nil { t.Fatalf("failed to parse %q: %v", s, err) } resp, err := retryablehttp.Get(u.String()) if err != nil { t.Errorf("failed get: %v", err) } if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Errorf("Unable to read http response body: %v", err) } t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body) } } // validateDNS asserts that all Kubernetes DNS is healthy func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 5*time.Minute) if err != nil { t.Fatalf("wait: %v", err) } nslookup := func() error { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "nslookup", "kubernetes.default")) return err } // If the coredns process was stable, this retry wouldn't be necessary. if err = retry.Expo(nslookup, 1*time.Second, 1*time.Minute); err != nil { t.Errorf("nslookup failing: %v", err) } want := []byte("10.96.0.1") if !bytes.Contains(rr.Stdout.Bytes(), want) { t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } } // validateDryRun asserts that the dry-run mode quickly exits with the right code func validateDryRun(ctx context.Context, t *testing.T, profile string) { // dry-run mode should always be able to finish quickly (<5s) mctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() // Too little memory! startArgs := append([]string{"start", "-p", profile, "--dry-run", "--memory", "250MB", "--alsologtostderr", "-v=1"}, StartArgs()...) c := exec.CommandContext(mctx, Target(), startArgs...) rr, err := Run(t, c) wantCode := 78 // exit.Config if rr.ExitCode != wantCode { t.Errorf("dry-run(250MB) exit code = %d, wanted = %d: %v", rr.ExitCode, wantCode, err) } dctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() startArgs = append([]string{"start", "-p", profile, "--dry-run", "--alsologtostderr", "-v=1"}, StartArgs()...) c = exec.CommandContext(dctx, Target(), startArgs...) rr, err = Run(t, c) if rr.ExitCode != 0 || err != nil { t.Errorf("dry-run exit code = %d, wanted = %d: %v", rr.ExitCode, 0, err) } } // validateCacheCmd tests functionality of cache command (cache add, delete, list) func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: cache unsupported by none") } t.Run("cache", func(t *testing.T) { t.Run("add", func(t *testing.T) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { t.Errorf("Failed to cache image %q", img) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { _, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { t.Errorf("cache list failed: %v", err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { t.Errorf("cache list did not include k8s.gcr.io/pause") } if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") { t.Errorf("cache list should not include busybox:1.28.4-glibc") } }) t.Run("verify_cache_inside_node", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "images")) if err != nil { t.Errorf("failed to get images by %q ssh %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "1.28.4-glibc") { t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output()) } }) t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back img := "busybox:latest" // deleting image inside minikube node manually rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "docker", "rmi", img)) // for some reason crictl rmi doesn't work if err != nil { t.Errorf("failed to delete inside the node %q : %v", rr.Command(), err) } // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err == nil { t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command()) } // minikube cache reload. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload")) if err != nil { t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err) } // make sure 'cache reload' brought back the manually deleted image. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err != nil { t.Errorf("expected to get no error for %q but got %v", rr.Command(), err) } }) }) } // validateConfigCmd asserts basic "config" command functionality func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { tests := []struct { args []string wantOut string wantErr string }{ {[]string{"unset", "cpus"}, "", ""}, {[]string{"get", "cpus"}, "", "Error: specified key could not be found in config"}, {[]string{"set", "cpus", "2"}, "! These changes will take effect upon a minikube delete and then a minikube start", ""}, {[]string{"get", "cpus"}, "2", ""}, {[]string{"unset", "cpus"}, "", ""}, {[]string{"get", "cpus"}, "", "Error: specified key could not be found in config"}, } for _, tc := range tests { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { t.Errorf("unexpected failure: %s failed: %v", rr.Args, err) } got := strings.TrimSpace(rr.Stdout.String()) if got != tc.wantOut { t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut) } got = strings.TrimSpace(rr.Stderr.String()) if got != tc.wantErr { t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr) } } } // validateLogsCmd asserts basic "logs" command functionality func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { t.Errorf("minikube logs missing expected word: %q", word) } } } // validateProfileCmd asserts "profile" command functionality func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { t.Run("profile_not_create", func(t *testing.T) { // Profile command should not create a nonexistent profile nonexistentProfile := "lis" rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile)) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } var profileJSON map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } for profileK := range profileJSON { for _, p := range profileJSON[profileK] { var name = p["Name"] if name == nonexistentProfile { t.Errorf("minikube profile %s should not exist", nonexistentProfile) } } } }) t.Run("profile_list", func(t *testing.T) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } // Table output listLines := strings.Split(strings.TrimSpace(rr.Stdout.String()), "\n") profileExists := false for i := 3; i < (len(listLines) - 1); i++ { profileLine := listLines[i] if strings.Contains(profileLine, profile) { profileExists = true break } } if !profileExists { t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) } }) t.Run("profile_json_output", func(t *testing.T) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } validProfiles := jsonObject["valid"] profileExists := false for _, profileObject := range validProfiles { if profileObject["Name"] == profile { profileExists = true break } } if !profileExists { t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) } }) } // validateServiceCmd asserts basic "service" command functionality func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { t.Logf("%s failed: %v (may not be an error)", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { t.Logf("%s failed: %v (may not be an error)", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", 10*time.Minute); err != nil { t.Fatalf("wait: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String()) } // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } if rr.Stderr.String() != "" { t.Errorf("unexpected stderr output: %s", rr.Stderr) } endpoint := strings.TrimSpace(rr.Stdout.String()) u, err := url.Parse(endpoint) if err != nil { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "https" { t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https") } // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname()) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } endpoint = strings.TrimSpace(rr.Stdout.String()) u, err = url.Parse(endpoint) if err != nil { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http") } t.Logf("url: %s", endpoint) resp, err := retryablehttp.Get(endpoint) if err != nil { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK) } } // validateAddonsCmd asserts basic "addon" command functionality func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output()) } } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } } // validateSSHCmd asserts basic "ssh" command functionality func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } want := "hello\r\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } if rr.Stdout.String() != want { t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want) } } // validateMySQL validates a minimalist MySQL deployment func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", 10*time.Minute) if err != nil { t.Fatalf("podwait: %v", err) } // Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule. mysql := func() error { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } if err = retry.Expo(mysql, 5*time.Second, 180*time.Second); err != nil { t.Errorf("mysql failing: %v", err) } } // vmSyncTestPath is where the test file will be synced into the VM func vmSyncTestPath() string { return fmt.Sprintf("/etc/test/nested/copy/%d/hosts", os.Getpid()) } // localSyncTestPath is where the test file will be synced into the VM func localSyncTestPath() string { return filepath.Join(localpath.MiniPath(), "/files", vmSyncTestPath()) } // Copy extra file into minikube home folder for file sync test func setupFileSync(ctx context.Context, t *testing.T, profile string) { p := localSyncTestPath() t.Logf("local sync path: %s", p) err := copy.Copy("./testdata/sync.test", p) if err != nil { t.Fatalf("copy: %v", err) } } // validateFileSync to check existence of the test file func validateFileSync(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } vp := vmSyncTestPath() t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } got := rr.Stdout.String() t.Logf("file sync test content: %s", got) expected, err := ioutil.ReadFile("./testdata/sync.test") if err != nil { t.Errorf("test file not found: %v", err) } if diff := cmp.Diff(string(expected), got); diff != "" { t.Errorf("/etc/sync.test content mismatch (-want +got):\n%s", diff) } } // validateUpdateContextCmd asserts basic "update-context" command functionality func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } want := []byte("IP was already correctly configured") if !bytes.Contains(rr.Stdout.Bytes(), want) { t.Errorf("update-context: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } } // startHTTPProxy runs a local http proxy and sets the env vars for it. func startHTTPProxy(t *testing.T) (*http.Server, error) { port, err := freeport.GetFreePort() if err != nil { return nil, errors.Wrap(err, "Failed to get an open port") } addr := fmt.Sprintf("localhost:%d", port) proxy := goproxy.NewProxyHttpServer() srv := &http.Server{Addr: addr, Handler: proxy} go func(s *http.Server, t *testing.T) { if err := s.ListenAndServe(); err != http.ErrServerClosed { t.Errorf("Failed to start http server for proxy mock") } }(srv, t) return srv, nil }