未验证 提交 f872767a 编写于 作者: M Medya Ghazizadeh 提交者: GitHub

Merge pull request #4948 from medyagh/paralell_integration_tests4

refactor integration tests to run in parallel
......@@ -42,3 +42,5 @@ deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/etc/VERSION
/.idea
/.vscode
test/integration/testdata/minikube-linux-amd64-latest-stable
......@@ -8,6 +8,7 @@ require (
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/blang/semver v3.5.0+incompatible
github.com/c4milo/gotoolkit v0.0.0-20170318115440-bcc06269efa9 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/cloudfoundry-attic/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect
github.com/docker/docker v1.13.1 // indirect
......@@ -15,7 +16,6 @@ require (
github.com/docker/machine v0.16.1-0.20190718054102-a555e4f7a8f5
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
......@@ -24,9 +24,9 @@ require (
github.com/google/go-github/v25 v25.0.2
github.com/gorilla/mux v1.7.1 // indirect
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
github.com/hashicorp/go-getter v1.3.0
github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 // indirect
github.com/hashicorp/go-retryablehttp v0.5.4
github.com/hashicorp/go-version v1.1.0 // indirect
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
github.com/hooklift/iso9660 v0.0.0-20170318115843-1cf07e5970d8
github.com/intel-go/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 // indirect
......@@ -36,7 +36,6 @@ require (
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-isatty v0.0.5
github.com/mattn/go-runewidth v0.0.0-20161012013512-737072b4e32b // indirect
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
github.com/moby/hyperkit v0.0.0-20171020124204-a12cd7250bcd
github.com/olekukonko/tablewriter v0.0.0-20160923125401-bdcc175572fd
......@@ -62,7 +61,6 @@ require (
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f
golang.org/x/text v0.3.2
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/cheggaaa/pb.v1 v1.0.6 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
......
此差异已折叠。
......@@ -23,7 +23,7 @@
# EXTRA_START_ARGS: additional flags to pass into minikube start
# EXTRA_ARGS: additional flags to pass into minikube
# JOB_NAME: the name of the logfile and check name to update on github
#
# PARALLEL_COUNT: number of tests to run in parallel
readonly TEST_ROOT="${HOME}/minikube-integration"
......@@ -75,6 +75,7 @@ gsutil -qm cp \
gsutil -qm cp "gs://minikube-builds/${MINIKUBE_LOCATION}/testdata"/* testdata/
# Set the executable bit on the e2e binary and out binary
export MINIKUBE_BIN="out/minikube-${OS_ARCH}"
export E2E_BIN="out/e2e-${OS_ARCH}"
......@@ -82,7 +83,7 @@ chmod +x "${MINIKUBE_BIN}" "${E2E_BIN}" out/docker-machine-driver-*
procs=$(pgrep "minikube-${OS_ARCH}|e2e-${OS_ARCH}" || true)
if [[ "${procs}" != "" ]]; then
echo "ERROR: found stale test processes to kill:"
echo "Warning: found stale test processes to kill:"
ps -f -p ${procs} || true
kill ${procs} || true
kill -9 ${procs} || true
......@@ -130,8 +131,13 @@ if type -P virsh; then
| awk '{ print $2 }' \
| xargs -I {} sh -c "virsh -c qemu:///system destroy {}; virsh -c qemu:///system undefine {}" \
|| true
# list again after clean up
virsh -c qemu:///system list --all
virsh -c qemu:///system list --all \
| grep Test \
| awk '{ print $2 }' \
| xargs -I {} sh -c "virsh -c qemu:///system destroy {}; virsh -c qemu:///system undefine {}" \
|| true
echo ">> Virsh VM list after clean up (should be empty) :"
virsh -c qemu:///system list --all || true
fi
if type -P vboxmanage; then
......@@ -141,6 +147,11 @@ if type -P vboxmanage; then
| cut -d'"' -f2 \
| xargs -I {} sh -c "vboxmanage startvm {} --type emergencystop; vboxmanage unregistervm {} --delete" \
|| true
vboxmanage list vms \
| grep Test \
| cut -d'"' -f2 \
| xargs -I {} sh -c "vboxmanage startvm {} --type emergencystop; vboxmanage unregistervm {} --delete" \
|| true
# remove inaccessible stale VMs https://github.com/kubernetes/minikube/issues/4872
vboxmanage list vms \
......@@ -148,6 +159,7 @@ if type -P vboxmanage; then
| cut -d'"' -f3 \
| xargs -I {} sh -c "vboxmanage startvm {} --type emergencystop; vboxmanage unregistervm {} --delete" \
|| true
# list them again after clean up
vboxmanage list vms || true
fi
......@@ -238,7 +250,7 @@ echo ">> Starting ${E2E_BIN} at $(date)"
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--vm-driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
-minikube-args="--v=10 --logtostderr ${EXTRA_ARGS}" \
-test.v -test.timeout=100m -binary="${MINIKUBE_BIN}" && result=$? || result=$?
-test.v -test.timeout=100m -test.parallel=${PARALLEL_COUNT} -binary="${MINIKUBE_BIN}" && result=$? || result=$?
echo ">> ${E2E_BIN} exited with ${result} at $(date)"
echo ""
......
......@@ -28,6 +28,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
JOB_NAME="Linux-KVM"
PARALLEL_COUNT=4
# Download files and set permissions
source ./common.sh
......@@ -30,6 +30,7 @@ OS_ARCH="linux-amd64"
VM_DRIVER="none"
JOB_NAME="Linux-None"
EXTRA_ARGS="--bootstrapper=kubeadm"
PARALLEL_COUNT=1
SUDO_PREFIX="sudo -E "
export KUBECONFIG="/root/.kube/config"
......
......@@ -28,6 +28,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="virtualbox"
JOB_NAME="Linux-VirtualBox"
PARALLEL_COUNT=4
# Download files and set permissions
source ./common.sh
......@@ -31,7 +31,7 @@ VM_DRIVER="hyperkit"
JOB_NAME="OSX-Hyperkit"
EXTRA_ARGS="--bootstrapper=kubeadm"
EXTRA_START_ARGS=""
PARALLEL_COUNT=3
# Download files and set permissions
source common.sh
......@@ -29,6 +29,7 @@ OS_ARCH="darwin-amd64"
VM_DRIVER="virtualbox"
JOB_NAME="OSX-Virtualbox"
EXTRA_ARGS="--bootstrapper=kubeadm"
PARALLEL_COUNT=3
# Download files and set permissions
source common.sh
......@@ -71,9 +71,14 @@ func (s *PodStore) Stop() {
}
// GetClient gets the client from config
func GetClient() (kubernetes.Interface, error) {
func GetClient(kubectlContext ...string) (kubernetes.Interface, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
if kubectlContext != nil {
configOverrides = &clientcmd.ConfigOverrides{
CurrentContext: kubectlContext[0],
}
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := kubeConfig.ClientConfig()
if err != nil {
......
// +build integration
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// a_download_only_test.go filename starts with a, for the purpose that it runs before all parallel tests and downloads the images and caches them.
package integration
import (
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/hashicorp/go-getter"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/test/integration/util"
)
// Note this test runs before all because filename is alphabetically first
// is used to cache images and binaries used by other parallel tests to avoid redownloading.
// TestDownloadOnly tests the --download-only option
func TestDownloadOnly(t *testing.T) {
p := profileName(t)
mk := NewMinikubeRunner(t, p)
if !isTestNoneDriver(t) { // none driver doesnt need to be deleted
defer mk.TearDown(t)
}
t.Run("Oldest", func(t *testing.T) {
stdout, stderr, err := mk.Start("--download-only", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
if err != nil {
t.Errorf("%s minikube --download-only failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}
})
t.Run("Newest", func(t *testing.T) {
stdout, stderr, err := mk.Start("--download-only", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion))
if err != nil {
t.Errorf("%s minikube --download-only failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}
// TODO: add test to check if files are downloaded
})
t.Run("DownloadLatestRelease", func(t *testing.T) {
dest := filepath.Join(*testdataDir, fmt.Sprintf("minikube-%s-%s-latest-stable", runtime.GOOS, runtime.GOARCH))
err := downloadMinikubeBinary(t, dest, "latest")
if err != nil {
t.Errorf("erorr downloading the latest minikube release %v", err)
}
})
}
// downloadMinikubeBinary downloads the minikube binary from github used by TestVersionUpgrade
// acts as a test setup for TestVersionUpgrade
func downloadMinikubeBinary(t *testing.T, dest string, version string) error {
t.Helper()
// Grab latest release binary
url := pkgutil.GetBinaryDownloadURL(version, runtime.GOOS)
download := func() error {
return getter.GetFile(dest, url)
}
if err := util.RetryX(download, 13*time.Second, 5*time.Minute); err != nil {
return errors.Wrap(err, "Failed to get latest release binary")
}
if runtime.GOOS != "windows" {
if err := os.Chmod(dest, 0700); err != nil {
return err
}
}
return nil
}
// +build integration
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"path/filepath"
"testing"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
commonutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/test/integration/util"
)
func TestContainerd(t *testing.T) {
if isTestNoneDriver(t) {
t.Skip("Can't run containerd backend with none driver")
}
if shouldRunInParallel(t) {
t.Parallel()
}
t.Run("GvisorRestart", testGvisorRestart)
}
func testGvisorRestart(t *testing.T) {
p := profileName(t)
if shouldRunInParallel(t) {
t.Parallel()
}
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
stdout, stderr, err := mk.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", p, err, stdout, stderr)
}
mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up")
if err := waitForGvisorControllerRunning(p); err != nil {
t.Errorf("waiting for gvisor controller to be up: %v", err)
}
createUntrustedWorkload(t, p)
t.Log("making sure untrusted workload is Running")
if err := waitForUntrustedNginxRunning(p); err != nil {
t.Errorf("waiting for nginx to be up: %v", err)
}
deleteUntrustedWorkload(t, p)
mk.RunCommand("delete", true)
stdout, stderr, err = mk.Start()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v \nstdout: %s \nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up")
if err := waitForGvisorControllerRunning(p); err != nil {
t.Errorf("waiting for gvisor controller to be up: %v", err)
}
createUntrustedWorkload(t, p)
t.Log("making sure untrusted workload is Running")
if err := waitForUntrustedNginxRunning(p); err != nil {
t.Errorf("waiting for nginx to be up: %v", err)
}
deleteUntrustedWorkload(t, p)
}
func createUntrustedWorkload(t *testing.T, profile string) {
kr := util.NewKubectlRunner(t, profile)
untrustedPath := filepath.Join(*testdataDir, "nginx-untrusted.yaml")
t.Log("creating pod with untrusted workload annotation")
if _, err := kr.RunCommand([]string{"replace", "-f", untrustedPath, "--force"}); err != nil {
t.Fatalf("creating untrusted nginx resource: %v", err)
}
}
func deleteUntrustedWorkload(t *testing.T, profile string) {
kr := util.NewKubectlRunner(t, profile)
untrustedPath := filepath.Join(*testdataDir, "nginx-untrusted.yaml")
if _, err := kr.RunCommand([]string{"delete", "-f", untrustedPath}); err != nil {
t.Logf("error deleting untrusted nginx resource: %v", err)
}
}
// waitForGvisorControllerRunning waits for the gvisor controller pod to be running
func waitForGvisorControllerRunning(p string) error {
client, err := commonutil.GetClient(p)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"kubernetes.io/minikube-addons": "gvisor"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
return errors.Wrap(err, "waiting for gvisor controller pod to stabilize")
}
return nil
}
// waitForUntrustedNginxRunning waits for the untrusted nginx pod to start running
func waitForUntrustedNginxRunning(miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"run": "nginx"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
return errors.Wrap(err, "waiting for nginx pods")
}
return nil
}
......@@ -20,34 +20,38 @@ package integration
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
)
func TestDocker(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
if usingNoneDriver(mk) {
if isTestNoneDriver(t) {
t.Skip("skipping test as none driver does not bundle docker")
}
p := profileName(t)
if shouldRunInParallel(t) {
t.Parallel()
}
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
// Start a timer for all remaining commands, to display failure output before a panic.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
defer cancel()
if _, _, err := mk.RunWithContext(ctx, "delete"); err != nil {
t.Logf("pre-delete failed (probably ok): %v", err)
}
startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.GlobalArgs,
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
stdout, stderr, err := mk.RunWithContext(ctx, startCmd)
stdout, stderr, err := mk.Start("--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", " --docker-opt=icc=true")
if err != nil {
t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
t.Fatalf("TestDocker minikube start failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mk.EnsureRunning()
mk.CheckStatus(state.Running.String())
stdout, stderr, err = mk.RunWithContext(ctx, "ssh -- systemctl show docker --property=Environment --no-pager")
if err != nil {
......
......@@ -21,6 +21,7 @@ import (
"os"
"strings"
"testing"
"time"
"k8s.io/minikube/test/integration/util"
)
......@@ -31,19 +32,56 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
var startTimeout = flag.Int("timeout", 25, "number of minutes to wait for minikube start")
var binaryPath = flag.String("binary", "../../out/minikube", "path to minikube binary")
var globalArgs = flag.String("minikube-args", "", "Arguments to pass to minikube")
var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start")
var mountArgs = flag.String("minikube-mount-args", "", "Arguments to pass to minikube mount")
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")
var parallel = flag.Bool("parallel", true, "run the tests in parallel, set false for run sequentially")
// NewMinikubeRunner creates a new MinikubeRunner
func NewMinikubeRunner(t *testing.T, extraArgs ...string) util.MinikubeRunner {
func NewMinikubeRunner(t *testing.T, profile string, extraStartArgs ...string) util.MinikubeRunner {
return util.MinikubeRunner{
BinaryPath: *binaryPath,
StartArgs: *startArgs + " " + strings.Join(extraArgs, " "),
GlobalArgs: *globalArgs,
MountArgs: *mountArgs,
T: t,
Profile: profile,
BinaryPath: *binaryPath,
StartArgs: *startArgs + " " + strings.Join(extraStartArgs, " "),
GlobalArgs: *globalArgs,
MountArgs: *mountArgs,
TimeOutStart: time.Duration(*startTimeout) * time.Minute,
T: t,
}
}
// isTestNoneDriver checks if the current test is for none driver
func isTestNoneDriver(t *testing.T) bool {
t.Helper()
return strings.Contains(*startArgs, "--vm-driver=none")
}
// profileName chooses a profile name based on the test name
// to be used in minikube and kubecontext across that test
func profileName(t *testing.T) string {
t.Helper()
if isTestNoneDriver(t) {
return "minikube"
}
p := strings.Split(t.Name(), "/")[0] // for i.e, TestFunctional/SSH returns TestFunctional
if p == "TestFunctional" {
return "minikube"
}
return p
}
// shouldRunInParallel deterimines if test should run in parallel or not
func shouldRunInParallel(t *testing.T) bool {
t.Helper()
if !*parallel {
return false
}
if isTestNoneDriver(t) {
return false
}
p := strings.Split(t.Name(), "/")[0] // for i.e, TestFunctional/SSH returns TestFunctional
return p != "TestFunctional" // gosimple lint: https://staticcheck.io/docs/checks#S1008
}
......@@ -25,22 +25,24 @@ import (
"net"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
retryablehttp "github.com/hashicorp/go-retryablehttp"
"k8s.io/apimachinery/pkg/labels"
commonutil "k8s.io/minikube/pkg/util"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/test/integration/util"
)
func testAddons(t *testing.T) {
t.Parallel()
client, err := pkgutil.GetClient()
p := profileName(t)
client, err := pkgutil.GetClient(p)
if err != nil {
t.Fatalf("Could not get kubernetes client: %v", err)
}
......@@ -76,7 +78,8 @@ func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error)
func testDashboard(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
cmd, out := mk.RunDaemon("dashboard --url")
defer func() {
err := cmd.Process.Kill()
......@@ -85,7 +88,7 @@ func testDashboard(t *testing.T) {
}
}()
s, err := readLineWithTimeout(out, 180*time.Second)
s, err := readLineWithTimeout(out, 240*time.Second)
if err != nil {
t.Fatalf("failed to read url: %v", err)
}
......@@ -121,30 +124,27 @@ func testDashboard(t *testing.T) {
func testIngressController(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
kr := util.NewKubectlRunner(t)
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
kr := util.NewKubectlRunner(t, p)
mk.RunCommand("addons enable ingress", true)
if err := util.WaitForIngressControllerRunning(t); err != nil {
t.Fatalf("waiting for ingress-controller to be up: %v", err)
if err := waitForIngressControllerRunning(p); err != nil {
t.Fatalf("Failed waiting for ingress-controller to be up: %v", err)
}
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
}
ingressPath := path.Join(curdir, "testdata", "nginx-ing.yaml")
ingressPath := filepath.Join(*testdataDir, "nginx-ing.yaml")
if _, err := kr.RunCommand([]string{"create", "-f", ingressPath}); err != nil {
t.Fatalf("creating nginx ingress resource: %v", err)
t.Fatalf("Failed creating nginx ingress resource: %v", err)
}
podPath := path.Join(curdir, "testdata", "nginx-pod-svc.yaml")
podPath := filepath.Join(*testdataDir, "nginx-pod-svc.yaml")
if _, err := kr.RunCommand([]string{"create", "-f", podPath}); err != nil {
t.Fatalf("creating nginx ingress resource: %v", err)
t.Fatalf("Failed creating nginx ingress resource: %v", err)
}
if err := util.WaitForNginxRunning(t); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
if err := waitForNginxRunning(t, p); err != nil {
t.Fatalf("Failed waiting for nginx to be up: %v", err)
}
checkIngress := func() error {
......@@ -157,7 +157,7 @@ func testIngressController(t *testing.T) {
return nil
}
if err := util.Retry(t, checkIngress, 3*time.Second, 5); err != nil {
if err := util.Retry(t, checkIngress, 2*time.Second, 5); err != nil {
t.Fatalf(err.Error())
}
......@@ -173,12 +173,13 @@ func testIngressController(t *testing.T) {
func testServicesList(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
p := profileName(t)
mk := NewMinikubeRunner(t, p)
checkServices := func() error {
output := mk.RunCommand("service list", false)
output, stderr := mk.RunCommand("service list", false)
if !strings.Contains(output, "kubernetes") {
return fmt.Errorf("Error, kubernetes service missing from output %s", output)
return fmt.Errorf("error, kubernetes service missing from output: %s, \n stderr: %s", output, stderr)
}
return nil
}
......@@ -188,9 +189,10 @@ func testServicesList(t *testing.T) {
}
func testRegistry(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
p := profileName(t)
mk := NewMinikubeRunner(t, p)
mk.RunCommand("addons enable registry", true)
client, err := pkgutil.GetClient()
client, err := pkgutil.GetClient(p)
if err != nil {
t.Fatalf("getting kubernetes client: %v", err)
}
......@@ -208,12 +210,12 @@ func testRegistry(t *testing.T) {
if err := pkgutil.WaitForPodsWithLabelRunning(client, "kube-system", ps); err != nil {
t.Fatalf("waiting for registry-proxy pods: %v", err)
}
ip := strings.TrimSpace(mk.RunCommand("ip", true))
ip, stderr := mk.RunCommand("ip", true)
ip = strings.TrimSpace(ip)
endpoint := fmt.Sprintf("http://%s:%d", ip, 5000)
u, err := url.Parse(endpoint)
if err != nil {
t.Fatalf("failed to parse %q: %v", endpoint, err)
t.Fatalf("failed to parse %q: %v stderr : %s", endpoint, err, stderr)
}
t.Log("checking registry access from outside cluster")
......@@ -235,7 +237,8 @@ func testRegistry(t *testing.T) {
}
t.Log("checking registry access from inside cluster")
kr := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
// TODO: Fix this
out, _ := kr.RunCommand([]string{
"run",
"registry-test",
......@@ -259,88 +262,41 @@ func testRegistry(t *testing.T) {
}()
mk.RunCommand("addons disable registry", true)
}
func testGvisor(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
}
createUntrustedWorkload(t)
// waitForNginxRunning waits for nginx service to be up
func waitForNginxRunning(t *testing.T, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
t.Log("making sure untrusted workload is Running")
if err := util.WaitForUntrustedNginxRunning(); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
t.Log("disabling gvisor addon")
mk.RunCommand("addons disable gvisor", true)
t.Log("waiting for gvisor controller pod to be deleted")
if err := util.WaitForGvisorControllerDeleted(); err != nil {
t.Fatalf("waiting for gvisor controller to be deleted: %v", err)
selector := labels.SelectorFromSet(labels.Set(map[string]string{"run": "nginx"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
return errors.Wrap(err, "waiting for nginx pods")
}
createUntrustedWorkload(t)
t.Log("waiting for FailedCreatePodSandBox event")
if err := util.WaitForFailedCreatePodSandBoxEvent(); err != nil {
t.Fatalf("waiting for FailedCreatePodSandBox event: %v", err)
if err := commonutil.WaitForService(client, "default", "nginx", true, time.Millisecond*500, time.Minute*10); err != nil {
t.Errorf("Error waiting for nginx service to be up")
}
deleteUntrustedWorkload(t)
return nil
}
func testGvisorRestart(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
mk.EnsureRunning()
mk.RunCommand("addons enable gvisor", true)
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
}
// TODO: @priyawadhwa to add test for stop as well
mk.RunCommand("delete", false)
mk.CheckStatus(state.None.String())
mk.Start()
mk.CheckStatus(state.Running.String())
t.Log("waiting for gvisor controller to come up")
if err := util.WaitForGvisorControllerRunning(t); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
// waitForIngressControllerRunning waits until ingress controller pod to be running
func waitForIngressControllerRunning(miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
createUntrustedWorkload(t)
t.Log("making sure untrusted workload is Running")
if err := util.WaitForUntrustedNginxRunning(); err != nil {
t.Fatalf("waiting for nginx to be up: %v", err)
if err := commonutil.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", time.Minute*10); err != nil {
return errors.Wrap(err, "waiting for ingress-controller deployment to stabilize")
}
deleteUntrustedWorkload(t)
}
func createUntrustedWorkload(t *testing.T) {
kr := util.NewKubectlRunner(t)
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
selector := labels.SelectorFromSet(labels.Set(map[string]string{"app.kubernetes.io/name": "nginx-ingress-controller"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
return errors.Wrap(err, "waiting for ingress-controller pods")
}
untrustedPath := path.Join(curdir, "testdata", "nginx-untrusted.yaml")
t.Log("creating pod with untrusted workload annotation")
if _, err := kr.RunCommand([]string{"replace", "-f", untrustedPath, "--force"}); err != nil {
t.Fatalf("creating untrusted nginx resource: %v", err)
}
}
func deleteUntrustedWorkload(t *testing.T) {
kr := util.NewKubectlRunner(t)
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
}
untrustedPath := path.Join(curdir, "testdata", "nginx-untrusted.yaml")
if _, err := kr.RunCommand([]string{"delete", "-f", untrustedPath}); err != nil {
t.Logf("error deleting untrusted nginx resource: %v", err)
}
return nil
}
......@@ -32,13 +32,14 @@ import (
func testClusterDNS(t *testing.T) {
t.Parallel()
client, err := pkgutil.GetClient()
p := profileName(t)
client, err := pkgutil.GetClient(p)
if err != nil {
t.Fatalf("Error getting kubernetes client %v", err)
}
kr := util.NewKubectlRunner(t)
busybox := busyBoxPod(t, client, kr)
kr := util.NewKubectlRunner(t, p)
busybox := busyBoxPod(t, client, kr, p)
defer func() {
if _, err := kr.RunCommand([]string{"delete", "po", busybox}); err != nil {
t.Errorf("delete failed: %v", err)
......@@ -61,12 +62,12 @@ func testClusterDNS(t *testing.T) {
}
}
func busyBoxPod(t *testing.T, c kubernetes.Interface, kr *util.KubectlRunner) string {
func busyBoxPod(t *testing.T, c kubernetes.Interface, kr *util.KubectlRunner, profile string) string {
if _, err := kr.RunCommand([]string{"create", "-f", filepath.Join(*testdataDir, "busybox.yaml")}); err != nil {
t.Fatalf("creating busybox pod: %s", err)
}
// TODO(tstromberg): Refactor WaitForBusyboxRunning to return name of pod.
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
if err := util.WaitForBusyboxRunning(t, "default", profile); err != nil {
t.Fatalf("Waiting for busybox pod to be up: %v", err)
}
......
......@@ -30,13 +30,14 @@ import (
// Assert that docker-env subcommand outputs usable information for "docker ps"
func testClusterEnv(t *testing.T) {
t.Parallel()
r := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
// Set a specific shell syntax so that we don't have to handle every possible user shell
envOut := r.RunCommand("docker-env --shell=bash", true)
vars := r.ParseEnvCmdOutput(envOut)
envOut, stderr := mk.RunCommand("docker-env --shell=bash", true)
vars := mk.ParseEnvCmdOutput(envOut)
if len(vars) == 0 {
t.Fatalf("Failed to parse env vars:\n%s", envOut)
t.Fatalf("Failed to parse env vars:\n%s, \n stderr: %s ", envOut, stderr)
}
for k, v := range vars {
t.Logf("Found: %s=%s", k, v)
......
......@@ -25,8 +25,8 @@ import (
func testClusterLogs(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t)
mk.EnsureRunning()
p := profileName(t)
mk := NewMinikubeRunner(t, p)
logsCmdOutput := mk.GetLogs()
// check for # of lines or check for strings
......
......@@ -25,10 +25,11 @@ import (
func testClusterSSH(t *testing.T) {
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
expectedStr := "hello"
sshCmdOutput := mk.RunCommand("ssh echo "+expectedStr, true)
sshCmdOutput, stderr := mk.RunCommand("ssh echo "+expectedStr, true)
if !strings.Contains(sshCmdOutput, expectedStr) {
t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s", expectedStr, sshCmdOutput)
t.Fatalf("ExpectedStr sshCmdOutput to be: %s. Output was: %s Stderr: %s", expectedStr, sshCmdOutput, stderr)
}
}
......@@ -28,12 +28,13 @@ import (
)
func testClusterStatus(t *testing.T) {
kubectlRunner := util.NewKubectlRunner(t)
p := profileName(t)
kr := util.NewKubectlRunner(t, p)
cs := api.ComponentStatusList{}
healthy := func() error {
t.Log("Checking if cluster is healthy.")
if err := kubectlRunner.RunCommandParseOutput([]string{"get", "cs"}, &cs); err != nil {
if err := kr.RunCommandParseOutput([]string{"get", "cs"}, &cs); err != nil {
return err
}
for _, i := range cs.Items {
......@@ -45,7 +46,7 @@ func testClusterStatus(t *testing.T) {
status = c.Status
}
if status != api.ConditionTrue {
err := fmt.Errorf("Component %s is not Healthy! Status: %s", i.GetName(), status)
err := fmt.Errorf("component %s is not Healthy! Status: %s", i.GetName(), status)
t.Logf("Retrying, %v", err)
return err
}
......
......@@ -22,7 +22,6 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
......@@ -38,12 +37,13 @@ func testMounting(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skip("mount tests disabled in darwin due to timeout (issue#3200)")
}
if strings.Contains(*globalArgs, "--vm-driver=none") {
if isTestNoneDriver(t) {
t.Skip("skipping test for none driver as it does not need mount")
}
t.Parallel()
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
tempDir, err := ioutil.TempDir("", "mounttest")
if err != nil {
......@@ -60,14 +60,9 @@ func testMounting(t *testing.T) {
}
}()
kubectlRunner := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
podName := "busybox-mount"
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
}
podPath := path.Join(curdir, "testdata", "busybox-mount-test.yaml")
podPath := filepath.Join(*testdataDir, "busybox-mount-test.yaml")
// Write file in mounted dir from host
expected := "test\n"
if err := writeFilesFromHost(tempDir, []string{"fromhost", "fromhostremove"}, expected); err != nil {
......@@ -77,14 +72,14 @@ func testMounting(t *testing.T) {
// Create the pods we need outside the main test loop.
setupTest := func() error {
t.Logf("Deploying pod from: %s", podPath)
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath}); err != nil {
if _, err := kr.RunCommand([]string{"create", "-f", podPath}); err != nil {
return err
}
return nil
}
defer func() {
t.Logf("Deleting pod from: %s", podPath)
if out, err := kubectlRunner.RunCommand([]string{"delete", "-f", podPath}); err != nil {
if out, err := kr.RunCommand([]string{"delete", "-f", podPath}); err != nil {
t.Logf("delete -f %s failed: %v\noutput: %s\n", podPath, err, out)
}
}()
......@@ -93,13 +88,13 @@ func testMounting(t *testing.T) {
t.Fatal("mountTest failed with error:", err)
}
if err := waitForPods(map[string]string{"integration-test": "busybox-mount"}); err != nil {
if err := waitForPods(map[string]string{"integration-test": "busybox-mount"}, p); err != nil {
t.Fatalf("Error waiting for busybox mount pod to be up: %v", err)
}
t.Logf("Pods appear to be running")
mountTest := func() error {
if err := verifyFiles(mk, kubectlRunner, tempDir, podName, expected); err != nil {
if err := verifyFiles(mk, kr, tempDir, podName, expected); err != nil {
t.Fatalf(err.Error())
}
......@@ -126,14 +121,14 @@ func writeFilesFromHost(mountedDir string, files []string, content string) error
path := filepath.Join(mountedDir, file)
err := ioutil.WriteFile(path, []byte(content), 0644)
if err != nil {
return fmt.Errorf("Unexpected error while writing file %s: %v", path, err)
return fmt.Errorf("unexpected error while writing file %s: %v", path, err)
}
}
return nil
}
func waitForPods(s map[string]string) error {
client, err := pkgutil.GetClient()
func waitForPods(s map[string]string, profile string) error {
client, err := pkgutil.GetClient(profile)
if err != nil {
return fmt.Errorf("getting kubernetes client: %v", err)
}
......@@ -144,7 +139,7 @@ func waitForPods(s map[string]string) error {
return nil
}
func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, tempDir string, podName string, expected string) error {
func verifyFiles(mk util.MinikubeRunner, kr *util.KubectlRunner, tempDir string, podName string, expected string) error {
path := filepath.Join(tempDir, "frompod")
out, err := ioutil.ReadFile(path)
if err != nil {
......@@ -156,7 +151,7 @@ func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, temp
}
// test that file written from host was read in by the pod via cat /mount-9p/fromhost;
if out, err = kubectlRunner.RunCommand([]string{"logs", podName}); err != nil {
if out, err = kr.RunCommand([]string{"logs", podName}); err != nil {
return err
}
if string(out) != expected {
......@@ -169,30 +164,30 @@ func verifyFiles(mk util.MinikubeRunner, kubectlRunner *util.KubectlRunner, temp
statCmd := fmt.Sprintf("stat /mount-9p/%s", file)
statOutput, err := mk.SSH(statCmd)
if err != nil {
return fmt.Errorf("Unable to stat %s via SSH. error %v, %s", file, err, statOutput)
return fmt.Errorf("inable to stat %s via SSH. error %v, %s", file, err, statOutput)
}
if runtime.GOOS == "windows" {
if strings.Contains(statOutput, "Access: 1970-01-01") {
return fmt.Errorf("Invalid access time\n%s", statOutput)
return fmt.Errorf("invalid access time\n%s", statOutput)
}
}
if strings.Contains(statOutput, "Modify: 1970-01-01") {
return fmt.Errorf("Invalid modify time\n%s", statOutput)
return fmt.Errorf("invalid modify time\n%s", statOutput)
}
}
// test that fromhostremove was deleted by the pod from the mount via rm /mount-9p/fromhostremove
path = filepath.Join(tempDir, "fromhostremove")
if _, err := os.Stat(path); err == nil {
return fmt.Errorf("Expected file %s to be removed", path)
return fmt.Errorf("expected file %s to be removed", path)
}
// test that frompodremove can be deleted on the host
path = filepath.Join(tempDir, "frompodremove")
if err := os.Remove(path); err != nil {
return fmt.Errorf("Unexpected error removing file %s: %v", path, err)
return fmt.Errorf("unexpected error removing file %s: %v", path, err)
}
return nil
......
......@@ -25,11 +25,11 @@ import (
// testProfileList tests the `minikube profile list` command
func testProfileList(t *testing.T) {
p := profileName(t)
t.Parallel()
profile := "minikube"
mk := NewMinikubeRunner(t, "--wait=false")
out := mk.RunCommand("profile list", true)
if !strings.Contains(out, profile) {
t.Errorf("Error , failed to read profile name (%s) in `profile list` command output : \n %q ", profile, out)
mk := NewMinikubeRunner(t, p, "--wait=false")
out, stderr := mk.RunCommand("profile list", true)
if !strings.Contains(out, p) {
t.Errorf("Error , failed to read profile name (%s) in `profile list` command output : \n %q : \n stderr: %s ", p, out, stderr)
}
}
......@@ -39,11 +39,13 @@ var (
)
func testProvisioning(t *testing.T) {
p := profileName(t)
t.Parallel()
kubectlRunner := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
defer func() {
if out, err := kubectlRunner.RunCommand([]string{"delete", "pvc", pvcName}); err != nil {
if out, err := kr.RunCommand([]string{"delete", "pvc", pvcName}); err != nil {
t.Logf("delete pvc %s failed: %v\noutput: %s\n", pvcName, err, out)
}
}()
......@@ -53,7 +55,7 @@ func testProvisioning(t *testing.T) {
checkStorageClass := func() error {
scl := storage.StorageClassList{}
if err := kubectlRunner.RunCommandParseOutput([]string{"get", "storageclass"}, &scl); err != nil {
if err := kr.RunCommandParseOutput([]string{"get", "storageclass"}, &scl); err != nil {
return fmt.Errorf("get storageclass: %v", err)
}
......@@ -63,14 +65,14 @@ func testProvisioning(t *testing.T) {
return fmt.Errorf("no default StorageClass yet")
}
if err := util.Retry(t, checkStorageClass, 5*time.Second, 20); err != nil {
if err := util.Retry(t, checkStorageClass, 10*time.Second, 10); err != nil {
t.Fatalf("no default storage class after retry: %v", err)
}
// Check that the storage provisioner pod is running
checkPodRunning := func() error {
client, err := commonutil.GetClient()
client, err := commonutil.GetClient(p)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
......@@ -82,20 +84,20 @@ func testProvisioning(t *testing.T) {
return nil
}
if err := checkPodRunning(); err != nil {
if err := util.Retry(t, checkPodRunning, 2*time.Second, 5); err != nil {
t.Fatalf("Check storage-provisioner pod running failed with error: %v", err)
}
// Now create the PVC
pvcPath := filepath.Join(*testdataDir, "pvc.yaml")
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", pvcPath}); err != nil {
if _, err := kr.RunCommand([]string{"create", "-f", pvcPath}); err != nil {
t.Fatalf("Error creating pvc: %v", err)
}
// And check that it gets bound to a PV.
checkStorage := func() error {
pvc := core.PersistentVolumeClaim{}
if err := kubectlRunner.RunCommandParseOutput(pvcCmd, &pvc); err != nil {
if err := kr.RunCommandParseOutput(pvcCmd, &pvc); err != nil {
return err
}
// The test passes if the volume claim gets bound.
......
......@@ -21,7 +21,6 @@ import (
"io/ioutil"
"net/http"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
......@@ -46,11 +45,13 @@ func testTunnel(t *testing.T) {
}
t.Log("starting tunnel test...")
runner := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
go func() {
output := runner.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
output, stderr := mk.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
if t.Failed() {
fmt.Println(output)
t.Errorf("tunnel stderr : %s", stderr)
t.Errorf("tunnel output : %s", output)
}
}()
......@@ -60,19 +61,15 @@ func testTunnel(t *testing.T) {
t.Fatal(errors.Wrap(err, "cleaning up tunnels"))
}
kubectlRunner := util.NewKubectlRunner(t)
kr := util.NewKubectlRunner(t, p)
t.Log("deploying nginx...")
curdir, err := filepath.Abs("")
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
}
podPath := path.Join(curdir, "testdata", "testsvc.yaml")
if _, err := kubectlRunner.RunCommand([]string{"apply", "-f", podPath}); err != nil {
podPath := filepath.Join(*testdataDir, "testsvc.yaml")
if _, err := kr.RunCommand([]string{"apply", "-f", podPath}); err != nil {
t.Fatalf("creating nginx ingress resource: %s", err)
}
client, err := commonutil.GetClient()
client, err := commonutil.GetClient(p)
if err != nil {
t.Fatal(errors.Wrap(err, "getting kubernetes client"))
......@@ -89,13 +86,13 @@ func testTunnel(t *testing.T) {
t.Log("getting nginx ingress...")
nginxIP, err := getIngress(kubectlRunner)
nginxIP, err := getIngress(kr)
if err != nil {
t.Errorf("error getting ingress IP for nginx: %s", err)
}
if len(nginxIP) == 0 {
stdout, err := describeIngress(kubectlRunner)
stdout, err := describeIngress(kr)
if err != nil {
t.Errorf("error debugging nginx service: %s", err)
......@@ -113,12 +110,12 @@ func testTunnel(t *testing.T) {
}
}
func getIngress(kubectlRunner *util.KubectlRunner) (string, error) {
func getIngress(kr *util.KubectlRunner) (string, error) {
nginxIP := ""
var ret error
err := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
cmd := []string{"get", "svc", "nginx-svc", "-o", "jsonpath={.status.loadBalancer.ingress[0].ip}"}
stdout, err := kubectlRunner.RunCommand(cmd)
stdout, err := kr.RunCommand(cmd)
switch {
case err == nil:
nginxIP = string(stdout)
......@@ -137,8 +134,8 @@ func getIngress(kubectlRunner *util.KubectlRunner) (string, error) {
return nginxIP, ret
}
func describeIngress(kubectlRunner *util.KubectlRunner) ([]byte, error) {
return kubectlRunner.RunCommand([]string{"get", "svc", "nginx-svc", "-o", "jsonpath={.status}"})
func describeIngress(kr *util.KubectlRunner) ([]byte, error) {
return kr.RunCommand([]string{"get", "svc", "nginx-svc", "-o", "jsonpath={.status}"})
}
// getResponseBody returns the contents of a URL
......
......@@ -19,54 +19,43 @@ limitations under the License.
package integration
import (
"strings"
"testing"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/test/integration/util"
)
func TestFunctional(t *testing.T) {
r := NewMinikubeRunner(t)
r.EnsureRunning()
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
t.Run("Status", testClusterStatus)
t.Run("ProfileList", testProfileList)
t.Run("DNS", testClusterDNS)
t.Run("Logs", testClusterLogs)
t.Run("Addons", testAddons)
t.Run("Registry", testRegistry)
t.Run("Dashboard", testDashboard)
t.Run("ServicesList", testServicesList)
t.Run("Provisioning", testProvisioning)
t.Run("Tunnel", testTunnel)
if !usingNoneDriver(r) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
t.Run("Mounting", testMounting)
p := profileName(t)
mk := NewMinikubeRunner(t, p)
stdout, stderr, err := mk.Start()
if err != nil {
t.Fatalf("failed to start minikube failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
}
func TestFunctionalContainerd(t *testing.T) {
r := NewMinikubeRunner(t)
if usingNoneDriver(r) {
t.Skip("Can't run containerd backend with none driver")
if !isTestNoneDriver(t) { // none driver doesn't need to be deleted
defer mk.TearDown(t)
}
if r.GetStatus() != state.None.String() {
r.RunCommand("delete", true)
}
r.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
t.Run("Gvisor", testGvisor)
t.Run("GvisorRestart", testGvisorRestart)
r.RunCommand("delete", true)
}
// group is needed to make sure tear down runs after parallel runs
// https://github.com/golang/go/issues/17791#issuecomment-258476786
t.Run("group", func(t *testing.T) {
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
t.Run("Status", testClusterStatus)
t.Run("ProfileList", testProfileList)
t.Run("DNS", testClusterDNS)
t.Run("Logs", testClusterLogs)
t.Run("Addons", testAddons)
t.Run("Registry", testRegistry)
t.Run("Dashboard", testDashboard)
t.Run("ServicesList", testServicesList)
t.Run("Provisioning", testProvisioning)
t.Run("Tunnel", testTunnel)
if !isTestNoneDriver(t) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
t.Run("Mounting", testMounting)
}
})
// usingNoneDriver returns true if using the none driver
func usingNoneDriver(r util.MinikubeRunner) bool {
return strings.Contains(r.StartArgs, "--vm-driver=none")
}
......@@ -25,19 +25,30 @@ import (
)
func TestISO(t *testing.T) {
p := profileName(t)
if shouldRunInParallel(t) {
t.Parallel()
}
mk := NewMinikubeRunner(t, "--wait=false")
mk := NewMinikubeRunner(t, p, "--wait=false")
mk.RunCommand("delete", false)
mk.Start()
stdout, stderr, err := mk.Start()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
if !isTestNoneDriver(t) { // none driver doesn't need to be deleted
defer mk.TearDown(t)
}
t.Run("permissions", testMountPermissions)
t.Run("packages", testPackages)
t.Run("persistence", testPersistence)
}
func testMountPermissions(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
// test mount permissions
mountPoints := []string{"/Users", "/hosthome"}
perms := "drwxr-xr-x"
......@@ -59,7 +70,8 @@ func testMountPermissions(t *testing.T) {
}
func testPackages(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
packages := []string{
"git",
......@@ -81,7 +93,8 @@ func testPackages(t *testing.T) {
}
func testPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t)
mk := NewMinikubeRunner(t, p, "--wait=false")
for _, dir := range []string{
"/data",
......
......@@ -19,59 +19,51 @@ limitations under the License.
package integration
import (
"path"
"path/filepath"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/test/integration/util"
)
func TestPersistence(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
if usingNoneDriver(mk) {
if isTestNoneDriver(t) {
t.Skip("skipping test as none driver does not support persistence")
}
mk.EnsureRunning()
p := profileName(t)
if shouldRunInParallel(t) {
t.Parallel()
}
mk := NewMinikubeRunner(t, p, "--wait=false")
defer mk.TearDown(t)
kubectlRunner := util.NewKubectlRunner(t)
curdir, err := filepath.Abs("")
stdout, stderr, err := mk.Start()
if err != nil {
t.Errorf("Error getting the file path for current directory: %s", curdir)
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
podPath := path.Join(curdir, "testdata", "busybox.yaml")
// Create a pod and wait for it to be running.
if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath}); err != nil {
t.Fatalf("Error creating test pod: %v", err)
kr := util.NewKubectlRunner(t, p)
if _, err := kr.RunCommand([]string{"create", "-f", filepath.Join(*testdataDir, "busybox.yaml")}); err != nil {
t.Fatalf("creating busybox pod: %s", err)
}
verify := func(t *testing.T) {
if err := util.WaitForBusyboxRunning(t, "default"); err != nil {
verifyBusybox := func(t *testing.T) {
if err := util.WaitForBusyboxRunning(t, "default", p); err != nil {
t.Fatalf("waiting for busybox to be up: %v", err)
}
}
// Make sure everything is up before we stop.
verify(t)
verifyBusybox(t)
// Now restart minikube and make sure the pod is still there.
// mk.RunCommand("stop", true)
// mk.CheckStatus("Stopped")
checkStop := func() error {
mk.RunCommand("stop", true)
return mk.CheckStatusNoFail(state.Stopped.String())
}
mk.RunCommand("stop", true)
mk.CheckStatus(state.Stopped.String())
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err)
stdout, stderr, err = mk.Start()
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.Start()
mk.CheckStatus(state.Running.String())
// Make sure the same things come up after we've restarted.
verify(t)
verifyBusybox(t)
}
......@@ -31,86 +31,96 @@ import (
)
func TestStartStop(t *testing.T) {
tests := []struct {
name string
args []string
}{
{"nocache_oldest", []string{
"--cache-images=false",
fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion),
// default is the network created by libvirt, if we change the name minikube won't boot
// because the given network doesn't exist
"--kvm-network=default",
"--kvm-qemu-uri=qemu:///system",
}},
{"feature_gates_newest_cni", []string{
"--feature-gates",
"ServerSideApply=true",
"--network-plugin=cni",
"--extra-config=kubelet.network-plugin=cni",
"--extra-config=kubeadm.pod-network-cidr=192.168.111.111/16",
fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion),
}},
{"containerd_and_non_default_apiserver_port", []string{
"--container-runtime=containerd",
"--docker-opt containerd=/var/run/containerd/containerd.sock",
"--apiserver-port=8444",
}},
{"crio_ignore_preflights", []string{
"--container-runtime=crio",
"--extra-config",
"kubeadm.ignore-preflight-errors=SystemVerification",
}},
p := profileName(t) // gets profile name used for minikube and kube context
if shouldRunInParallel(t) {
t.Parallel()
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
r := NewMinikubeRunner(t)
if !strings.Contains(test.name, "docker") && usingNoneDriver(r) {
t.Skipf("skipping %s - incompatible with none driver", test.name)
}
r.RunCommand("config set WantReportErrorPrompt false", true)
r.RunCommand("delete", false)
r.CheckStatus(state.None.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())
ip := r.RunCommand("ip", true)
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s", ip)
}
// check for the current-context before and after the stop
kubectlRunner := util.NewKubectlRunner(t)
currentContext, err := kubectlRunner.RunCommand([]string{"config", "current-context"})
if err != nil {
t.Fatalf("Failed to fetch current-context")
}
if strings.TrimRight(string(currentContext), "\n") != "minikube" {
t.Fatalf("got current-context - %q, want current-context %q", string(currentContext), "minikube")
}
checkStop := func() error {
r.RunCommand("stop", true)
return r.CheckStatusNoFail(state.Stopped.String())
}
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err)
}
// running this command results in error when the current-context is not set
if err := r.Run("config current-context"); err != nil {
t.Logf("current-context is not set to minikube")
}
r.Start(test.args...)
r.CheckStatus(state.Running.String())
r.RunCommand("delete", true)
r.CheckStatus(state.None.String())
})
}
t.Run("group", func(t *testing.T) {
if shouldRunInParallel(t) {
t.Parallel()
}
tests := []struct {
name string
args []string
}{
{"oldest", []string{ // nocache_oldest
"--cache-images=false",
fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion),
// default is the network created by libvirt, if we change the name minikube won't boot
// because the given network doesn't exist
"--kvm-network=default",
"--kvm-qemu-uri=qemu:///system",
}},
{"cni", []string{ // feature_gates_newest_cni
"--feature-gates",
"ServerSideApply=true",
"--network-plugin=cni",
"--extra-config=kubelet.network-plugin=cni",
"--extra-config=kubeadm.pod-network-cidr=192.168.111.111/16",
fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion),
}},
{"containerd", []string{ // containerd_and_non_default_apiserver_port
"--container-runtime=containerd",
"--docker-opt containerd=/var/run/containerd/containerd.sock",
"--apiserver-port=8444",
}},
{"crio", []string{ // crio_ignore_preflights
"--container-runtime=crio",
"--extra-config",
"kubeadm.ignore-preflight-errors=SystemVerification",
}},
}
for _, tc := range tests {
n := tc.name // because similar to https://golang.org/doc/faq#closures_and_goroutines
t.Run(tc.name, func(t *testing.T) {
if shouldRunInParallel(t) {
t.Parallel()
}
pn := p + n // TestStartStopoldest
mk := NewMinikubeRunner(t, pn, "--wait=false")
// TODO : redundant first clause ? never happens?
if !strings.Contains(pn, "docker") && isTestNoneDriver(t) {
t.Skipf("skipping %s - incompatible with none driver", t.Name())
}
mk.RunCommand("config set WantReportErrorPrompt false", true)
stdout, stderr, err := mk.Start(tc.args...)
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", pn, err, stdout, stderr)
}
mk.CheckStatus(state.Running.String())
ip, stderr := mk.RunCommand("ip", true)
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s \n %s", ip, stderr)
}
stop := func() error {
stdout, stderr, err = mk.RunCommandRetriable("stop")
return mk.CheckStatusNoFail(state.Stopped.String())
}
err = util.RetryX(stop, 10*time.Second, 2*time.Minute)
mk.CheckStatus(state.Stopped.String())
// TODO medyagh:
// https://github.com/kubernetes/minikube/issues/4854
stdout, stderr, err = mk.Start(tc.args...)
if err != nil {
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
mk.CheckStatus(state.Running.String())
mk.RunCommand("delete", true)
mk.CheckStatus(state.None.String())
})
}
})
}
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"testing"
"time"
"github.com/cenkalti/backoff"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
commonutil "k8s.io/minikube/pkg/util"
)
// WaitForBusyboxRunning waits until busybox pod to be running
func WaitForBusyboxRunning(t *testing.T, namespace string, miniProfile string) error {
client, err := commonutil.GetClient(miniProfile)
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"integration-test": "busybox"}))
return commonutil.WaitForPodsWithLabelRunning(client, namespace, selector)
}
// Retry tries the callback for a number of attempts, with a delay between attempts
func Retry(t *testing.T, callback func() error, d time.Duration, attempts int) (err error) {
for i := 0; i < attempts; i++ {
err = callback()
if err == nil {
return nil
}
time.Sleep(d)
}
return err
}
// Retry2 tries the callback for a number of attempts, with a delay without *testing.T
func Retry2(callback func() error, d time.Duration, attempts int) (err error) {
for i := 0; i < attempts; i++ {
err = callback()
if err == nil {
return nil
}
time.Sleep(d)
}
return err
}
// RetryX is expontential backoff retry
func RetryX(callback func() error, initInterv time.Duration, maxTime time.Duration) error {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = maxTime
b.InitialInterval = initInterv
b.RandomizationFactor = 0.5
b.Multiplier = 1.5
b.Reset()
return backoff.Retry(callback, b)
}
// Logf writes logs to stdout if -v is set.
func Logf(str string, args ...interface{}) {
if !testing.Verbose() {
return
}
fmt.Printf(" %s | ", time.Now().Format("15:04:05"))
fmt.Println(fmt.Sprintf(str, args...))
}
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"os/exec"
"testing"
"time"
commonutil "k8s.io/minikube/pkg/util"
)
const kubectlBinary = "kubectl"
// KubectlRunner runs a command using kubectl
type KubectlRunner struct {
Profile string // kube-context maps to a minikube profile
T *testing.T
BinaryPath string
}
// NewKubectlRunner creates a new KubectlRunner
func NewKubectlRunner(t *testing.T, profile ...string) *KubectlRunner {
if profile == nil {
profile = []string{"minikube"}
}
p, err := exec.LookPath(kubectlBinary)
if err != nil {
t.Fatalf("Couldn't find kubectl on path.")
}
return &KubectlRunner{Profile: profile[0], BinaryPath: p, T: t}
}
// RunCommandParseOutput runs a command and parses the JSON output
func (k *KubectlRunner) RunCommandParseOutput(args []string, outputObj interface{}, useKubeContext ...bool) error {
args = append(args, "-o=json")
output, err := k.RunCommand(args, useKubeContext...)
if err != nil {
return err
}
d := json.NewDecoder(bytes.NewReader(output))
if err := d.Decode(outputObj); err != nil {
return err
}
return nil
}
// RunCommand runs a command, returning stdout
func (k *KubectlRunner) RunCommand(args []string, useKubeContext ...bool) (stdout []byte, err error) {
if useKubeContext == nil {
useKubeContext = []bool{true}
}
if useKubeContext[0] {
kubecContextArg := fmt.Sprintf("--context=%s", k.Profile)
args = append([]string{kubecContextArg}, args...) // prepending --context so it can be with with -- space
}
inner := func() error {
cmd := exec.Command(k.BinaryPath, args...)
stdout, err = cmd.CombinedOutput()
if err != nil {
retriable := &commonutil.RetriableError{Err: fmt.Errorf("error running command %s: %v. Stdout: \n %s", args, err, stdout)}
k.T.Log(retriable)
return retriable
}
return nil
}
err = commonutil.RetryAfter(3, inner, 2*time.Second)
return stdout, err
}
// CreateRandomNamespace creates a random namespace
func (k *KubectlRunner) CreateRandomNamespace() string {
const strLen = 20
name := genRandString(strLen)
if _, err := k.RunCommand([]string{"create", "namespace", name}); err != nil {
k.T.Fatalf("Error creating namespace: %v", err)
}
return name
}
func genRandString(strLen int) string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
rand.Seed(time.Now().UTC().UnixNano())
result := make([]byte, strLen)
for i := 0; i < strLen; i++ {
result[i] = chars[rand.Intn(len(chars))]
}
return string(result)
}
// DeleteNamespace deletes the namespace
func (k *KubectlRunner) DeleteNamespace(namespace string) error {
_, err := k.RunCommand([]string{"delete", "namespace", namespace})
return err
}
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bufio"
"bytes"
"context"
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
commonutil "k8s.io/minikube/pkg/util"
)
// MinikubeRunner runs a command
type MinikubeRunner struct {
Profile string
T *testing.T
BinaryPath string
GlobalArgs string
StartArgs string
MountArgs string
Runtime string
TimeOutStart time.Duration // time to wait for minikube start before killing it
}
// Remove removes a file
func (m *MinikubeRunner) Remove(f assets.CopyableFile) error {
_, err := m.SSH(fmt.Sprintf("rm -rf %s", filepath.Join(f.GetTargetDir(), f.GetTargetName())))
return err
}
// teeRun runs a command, streaming stdout, stderr to console
func (m *MinikubeRunner) teeRun(cmd *exec.Cmd, waitForRun ...bool) (string, string, error) {
w := true
if waitForRun != nil {
w = waitForRun[0]
}
errPipe, err := cmd.StderrPipe()
if err != nil {
return "", "", err
}
outPipe, err := cmd.StdoutPipe()
if err != nil {
return "", "", err
}
if err := cmd.Start(); err != nil {
return "", "", err
}
if w {
var outB bytes.Buffer
var errB bytes.Buffer
var wg sync.WaitGroup
wg.Add(2)
go func() {
if err := commonutil.TeePrefix(commonutil.ErrPrefix, errPipe, &errB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
wg.Done()
}()
go func() {
if err := commonutil.TeePrefix(commonutil.OutPrefix, outPipe, &outB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
wg.Done()
}()
err = cmd.Wait()
wg.Wait()
return outB.String(), errB.String(), err
}
return "", "", err
}
// RunCommand executes a command, optionally checking for error and by default waits for run to finish
func (m *MinikubeRunner) RunCommand(cmdStr string, failError bool, waitForRun ...bool) (string, string) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, cmdArgs...)
Logf("Run: %s", cmd.Args)
stdout, stderr, err := m.teeRun(cmd, waitForRun...)
if err != nil {
errMsg := ""
if exitError, ok := err.(*exec.ExitError); ok {
errMsg = fmt.Sprintf("Error running command: %s %s. Output: %s Stderr: %s", cmdStr, exitError.Stderr, stdout, stderr)
} else {
errMsg = fmt.Sprintf("Error running command: %s %s. Output: %s", cmdStr, stderr, stdout)
}
if failError {
m.T.Fatalf(errMsg)
} else {
m.T.Errorf(errMsg)
}
}
return stdout, stderr
}
// RunCommandRetriable Error executes a command, returns error
// the purpose of this command is to make it retriable and
// better logging for retrying
func (m *MinikubeRunner) RunCommandRetriable(cmdStr string, waitForRun ...bool) (stdout string, stderr string, err error) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, cmdArgs...)
Logf("Run: %s", cmd.Args)
stdout, stderr, err = m.teeRun(cmd, waitForRun...)
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
m.T.Logf("temporary error: running command: %s %s. Output: \n%s", cmdStr, exitError.Stderr, stdout)
} else {
m.T.Logf("temporary error: running command: %s %s. Output: \n%s", cmdStr, stderr, stdout)
}
}
return stdout, stderr, err
}
// RunWithContext calls the minikube command with a context, useful for timeouts.
func (m *MinikubeRunner) RunWithContext(ctx context.Context, cmdStr string, wait ...bool) (string, string, error) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.CommandContext(ctx, path, cmdArgs...)
Logf("Run: %s", cmd.Args)
return m.teeRun(cmd, wait...)
}
// RunDaemon executes a command, returning the stdout
func (m *MinikubeRunner) RunDaemon(cmdStr string) (*exec.Cmd, *bufio.Reader) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, cmdArgs...)
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
m.T.Fatalf("stdout pipe failed: %s %v", cmdStr, err)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
m.T.Fatalf("stderr pipe failed: %s %v", cmdStr, err)
}
var errB bytes.Buffer
go func() {
if err := commonutil.TeePrefix(commonutil.ErrPrefix, stderrPipe, &errB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
}()
err = cmd.Start()
if err != nil {
m.T.Fatalf("Error running command: %s %v", cmdStr, err)
}
return cmd, bufio.NewReader(stdoutPipe)
}
// RunDaemon2 executes a command, returning the stdout and stderr
func (m *MinikubeRunner) RunDaemon2(cmdStr string) (*exec.Cmd, *bufio.Reader, *bufio.Reader) {
profileArg := fmt.Sprintf("-p=%s ", m.Profile)
cmdStr = profileArg + cmdStr
cmdArgs := strings.Split(cmdStr, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, cmdArgs...)
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
m.T.Fatalf("stdout pipe failed: %s %v", cmdStr, err)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
m.T.Fatalf("stderr pipe failed: %s %v", cmdStr, err)
}
err = cmd.Start()
if err != nil {
m.T.Fatalf("Error running command: %s %v", cmdStr, err)
}
return cmd, bufio.NewReader(stdoutPipe), bufio.NewReader(stderrPipe)
}
// SSH returns the output of running a command using SSH
func (m *MinikubeRunner) SSH(cmdStr string) (string, error) {
profileArg := fmt.Sprintf("-p=%s", m.Profile)
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, profileArg, "ssh", cmdStr)
Logf("SSH: %s", cmdStr)
stdout, err := cmd.CombinedOutput()
Logf("Output: %s", stdout)
if err, ok := err.(*exec.ExitError); ok {
return string(stdout), err
}
return string(stdout), nil
}
// Start starts the cluster
func (m *MinikubeRunner) Start(opts ...string) (stdout string, stderr string, err error) {
cmd := fmt.Sprintf("start %s %s %s", m.StartArgs, m.GlobalArgs, strings.Join(opts, " "))
s := func() error {
stdout, stderr, err = m.RunCommandRetriable(cmd)
return err
}
err = RetryX(s, 10*time.Second, m.TimeOutStart)
return stdout, stderr, err
}
// TearDown deletes minikube without waiting for it. used to free up ram/cpu after each test
func (m *MinikubeRunner) TearDown(t *testing.T) {
profileArg := fmt.Sprintf("-p=%s", m.Profile)
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, profileArg, "delete")
err := cmd.Start()
if err != nil {
t.Errorf("error tearing down minikube %s : %v", profileArg, err)
}
}
// EnsureRunning makes sure the container runtime is running
func (m *MinikubeRunner) EnsureRunning(opts ...string) {
s, _, err := m.Status()
if err != nil {
m.T.Errorf("error getting status for ensure running: %v", err)
}
if s != state.Running.String() {
stdout, stderr, err := m.Start(opts...)
if err != nil {
m.T.Errorf("error starting while running EnsureRunning : %v , stdout %s stderr %s", err, stdout, stderr)
}
}
m.CheckStatus(state.Running.String())
}
// ParseEnvCmdOutput parses the output of `env` (assumes bash)
func (m *MinikubeRunner) ParseEnvCmdOutput(out string) map[string]string {
env := map[string]string{}
re := regexp.MustCompile(`(\w+?) ?= ?"?(.+?)"?\n`)
for _, m := range re.FindAllStringSubmatch(out, -1) {
env[m[1]] = m[2]
}
return env
}
// Status returns the status of a service
func (m *MinikubeRunner) Status() (status string, stderr string, err error) {
cmd := fmt.Sprintf("status --format={{.Host}} %s", m.GlobalArgs)
s := func() error {
status, stderr, err = m.RunCommandRetriable(cmd)
status = strings.TrimRight(status, "\n")
return err
}
err = RetryX(s, 15*time.Second, 1*time.Minute)
if err != nil && (status == state.None.String() || status == state.Stopped.String()) {
err = nil // because https://github.com/kubernetes/minikube/issues/4932
}
return status, stderr, err
}
// GetLogs returns the logs of a service
func (m *MinikubeRunner) GetLogs() string {
// TODO: this test needs to check sterr too !
stdout, _ := m.RunCommand(fmt.Sprintf("logs %s", m.GlobalArgs), true)
return stdout
}
// CheckStatus makes sure the service has the desired status, or cause fatal error
func (m *MinikubeRunner) CheckStatus(desired string) {
err := m.CheckStatusNoFail(desired)
if err != nil { // none status returns 1 exit code
m.T.Fatalf("%v", err)
}
}
// CheckStatusNoFail makes sure the service has the desired status, returning error
func (m *MinikubeRunner) CheckStatusNoFail(desired string) error {
s, stderr, err := m.Status()
if s != desired {
return fmt.Errorf("got state: %q, expected %q : stderr: %s err: %v ", s, desired, stderr, err)
}
if err != nil {
return errors.Wrapf(err, stderr)
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/minikube/pkg/minikube/assets"
commonutil "k8s.io/minikube/pkg/util"
)
const kubectlBinary = "kubectl"
// MinikubeRunner runs a command
type MinikubeRunner struct {
T *testing.T
BinaryPath string
GlobalArgs string
StartArgs string
MountArgs string
Runtime string
}
// Logf writes logs to stdout if -v is set.
func Logf(str string, args ...interface{}) {
if !testing.Verbose() {
return
}
fmt.Printf(" %s | ", time.Now().Format("15:04:05"))
fmt.Println(fmt.Sprintf(str, args...))
}
// Run executes a command
func (m *MinikubeRunner) Run(cmd string) error {
_, err := m.SSH(cmd)
return err
}
// Copy copies a file
func (m *MinikubeRunner) Copy(f assets.CopyableFile) error {
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command("/bin/bash", "-c", path, "ssh", "--", fmt.Sprintf("cat >> %s", filepath.Join(f.GetTargetDir(), f.GetTargetName())))
Logf("Running: %s", cmd.Args)
return cmd.Run()
}
// CombinedOutput executes a command, returning the combined stdout and stderr
func (m *MinikubeRunner) CombinedOutput(cmd string) (string, error) {
return m.SSH(cmd)
}
// Remove removes a file
func (m *MinikubeRunner) Remove(f assets.CopyableFile) error {
_, err := m.SSH(fmt.Sprintf("rm -rf %s", filepath.Join(f.GetTargetDir(), f.GetTargetName())))
return err
}
// teeRun runs a command, streaming stdout, stderr to console
func (m *MinikubeRunner) teeRun(cmd *exec.Cmd) (string, string, error) {
errPipe, err := cmd.StderrPipe()
if err != nil {
return "", "", err
}
outPipe, err := cmd.StdoutPipe()
if err != nil {
return "", "", err
}
if err := cmd.Start(); err != nil {
return "", "", err
}
var outB bytes.Buffer
var errB bytes.Buffer
var wg sync.WaitGroup
wg.Add(2)
go func() {
if err := commonutil.TeePrefix(commonutil.ErrPrefix, errPipe, &errB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
wg.Done()
}()
go func() {
if err := commonutil.TeePrefix(commonutil.OutPrefix, outPipe, &outB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
wg.Done()
}()
err = cmd.Wait()
wg.Wait()
return outB.String(), errB.String(), err
}
// RunCommand executes a command, optionally checking for error
func (m *MinikubeRunner) RunCommand(command string, checkError bool) string {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, commandArr...)
Logf("Run: %s", cmd.Args)
stdout, stderr, err := m.teeRun(cmd)
if checkError && err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
m.T.Fatalf("Error running command: %s %s. Output: %s", command, exitError.Stderr, stdout)
} else {
m.T.Fatalf("Error running command: %s %v. Output: %s", command, err, stderr)
}
}
return stdout
}
// RunWithContext calls the minikube command with a context, useful for timeouts.
func (m *MinikubeRunner) RunWithContext(ctx context.Context, command string) (string, string, error) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.CommandContext(ctx, path, commandArr...)
Logf("Run: %s", cmd.Args)
return m.teeRun(cmd)
}
// RunDaemon executes a command, returning the stdout
func (m *MinikubeRunner) RunDaemon(command string) (*exec.Cmd, *bufio.Reader) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, commandArr...)
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
m.T.Fatalf("stdout pipe failed: %s %v", command, err)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
m.T.Fatalf("stderr pipe failed: %s %v", command, err)
}
var errB bytes.Buffer
go func() {
if err := commonutil.TeePrefix(commonutil.ErrPrefix, stderrPipe, &errB, Logf); err != nil {
m.T.Logf("tee: %v", err)
}
}()
err = cmd.Start()
if err != nil {
m.T.Fatalf("Error running command: %s %v", command, err)
}
return cmd, bufio.NewReader(stdoutPipe)
}
// RunDaemon2 executes a command, returning the stdout and stderr
func (m *MinikubeRunner) RunDaemon2(command string) (*exec.Cmd, *bufio.Reader, *bufio.Reader) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, commandArr...)
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
m.T.Fatalf("stdout pipe failed: %s %v", command, err)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
m.T.Fatalf("stderr pipe failed: %s %v", command, err)
}
err = cmd.Start()
if err != nil {
m.T.Fatalf("Error running command: %s %v", command, err)
}
return cmd, bufio.NewReader(stdoutPipe), bufio.NewReader(stderrPipe)
}
// SSH returns the output of running a command using SSH
func (m *MinikubeRunner) SSH(command string) (string, error) {
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, "ssh", command)
Logf("SSH: %s", command)
stdout, err := cmd.CombinedOutput()
Logf("Output: %s", stdout)
if err, ok := err.(*exec.ExitError); ok {
return string(stdout), err
}
return string(stdout), nil
}
// Start starts the cluster
func (m *MinikubeRunner) Start(opts ...string) {
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.GlobalArgs, strings.Join(opts, " "))
m.RunCommand(cmd, true)
}
// EnsureRunning makes sure the container runtime is running
func (m *MinikubeRunner) EnsureRunning() {
if m.GetStatus() != "Running" {
m.Start()
}
m.CheckStatus("Running")
}
// ParseEnvCmdOutput parses the output of `env` (assumes bash)
func (m *MinikubeRunner) ParseEnvCmdOutput(out string) map[string]string {
env := map[string]string{}
re := regexp.MustCompile(`(\w+?) ?= ?"?(.+?)"?\n`)
for _, m := range re.FindAllStringSubmatch(out, -1) {
env[m[1]] = m[2]
}
return env
}
// GetStatus returns the status of a service
func (m *MinikubeRunner) GetStatus() string {
return m.RunCommand(fmt.Sprintf("status --format={{.Host}} %s", m.GlobalArgs), false)
}
// GetLogs returns the logs of a service
func (m *MinikubeRunner) GetLogs() string {
return m.RunCommand(fmt.Sprintf("logs %s", m.GlobalArgs), true)
}
// CheckStatus makes sure the service has the desired status, or cause fatal error
func (m *MinikubeRunner) CheckStatus(desired string) {
if err := m.CheckStatusNoFail(desired); err != nil {
m.T.Fatalf("%v", err)
}
}
// CheckStatusNoFail makes sure the service has the desired status, returning error
func (m *MinikubeRunner) CheckStatusNoFail(desired string) error {
s := m.GetStatus()
if s != desired {
return fmt.Errorf("got state: %q, expected %q", s, desired)
}
return nil
}
// KubectlRunner runs a command using kubectl
type KubectlRunner struct {
T *testing.T
BinaryPath string
}
// NewKubectlRunner creates a new KubectlRunner
func NewKubectlRunner(t *testing.T) *KubectlRunner {
p, err := exec.LookPath(kubectlBinary)
if err != nil {
t.Fatalf("Couldn't find kubectl on path.")
}
return &KubectlRunner{BinaryPath: p, T: t}
}
// RunCommandParseOutput runs a command and parses the JSON output
func (k *KubectlRunner) RunCommandParseOutput(args []string, outputObj interface{}) error {
args = append(args, "-o=json")
output, err := k.RunCommand(args)
if err != nil {
return err
}
d := json.NewDecoder(bytes.NewReader(output))
if err := d.Decode(outputObj); err != nil {
return err
}
return nil
}
// RunCommand runs a command, returning stdout
func (k *KubectlRunner) RunCommand(args []string) (stdout []byte, err error) {
inner := func() error {
cmd := exec.Command(k.BinaryPath, args...)
stdout, err = cmd.CombinedOutput()
if err != nil {
retriable := &commonutil.RetriableError{Err: fmt.Errorf("error running command %s: %v. Stdout: \n %s", args, err, stdout)}
k.T.Log(retriable)
return retriable
}
return nil
}
err = commonutil.RetryAfter(3, inner, 2*time.Second)
return stdout, err
}
// CreateRandomNamespace creates a random namespace
func (k *KubectlRunner) CreateRandomNamespace() string {
const strLen = 20
name := genRandString(strLen)
if _, err := k.RunCommand([]string{"create", "namespace", name}); err != nil {
k.T.Fatalf("Error creating namespace: %v", err)
}
return name
}
func genRandString(strLen int) string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
rand.Seed(time.Now().UTC().UnixNano())
result := make([]byte, strLen)
for i := 0; i < strLen; i++ {
result[i] = chars[rand.Intn(len(chars))]
}
return string(result)
}
// DeleteNamespace deletes the namespace
func (k *KubectlRunner) DeleteNamespace(namespace string) error {
_, err := k.RunCommand([]string{"delete", "namespace", namespace})
return err
}
// WaitForBusyboxRunning waits until busybox pod to be running
func WaitForBusyboxRunning(t *testing.T, namespace string) error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"integration-test": "busybox"}))
return commonutil.WaitForPodsWithLabelRunning(client, namespace, selector)
}
// WaitForIngressControllerRunning waits until ingress controller pod to be running
func WaitForIngressControllerRunning(t *testing.T) error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
if err := commonutil.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", time.Minute*10); err != nil {
return errors.Wrap(err, "waiting for ingress-controller deployment to stabilize")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"app.kubernetes.io/name": "nginx-ingress-controller"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
return errors.Wrap(err, "waiting for ingress-controller pods")
}
return nil
}
// WaitForGvisorControllerRunning waits for the gvisor controller pod to be running
func WaitForGvisorControllerRunning(t *testing.T) error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"kubernetes.io/minikube-addons": "gvisor"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "kube-system", selector); err != nil {
return errors.Wrap(err, "waiting for gvisor controller pod to stabilize")
}
return nil
}
// WaitForGvisorControllerDeleted waits for the gvisor controller pod to be deleted
func WaitForGvisorControllerDeleted() error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"kubernetes.io/minikube-addons": "gvisor"}))
if err := commonutil.WaitForPodDelete(client, "kube-system", selector); err != nil {
return errors.Wrap(err, "waiting for gvisor controller pod deletion")
}
return nil
}
// WaitForUntrustedNginxRunning waits for the untrusted nginx pod to start running
func WaitForUntrustedNginxRunning() error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"run": "nginx"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
return errors.Wrap(err, "waiting for nginx pods")
}
return nil
}
// WaitForFailedCreatePodSandBoxEvent waits for a FailedCreatePodSandBox event to appear
func WaitForFailedCreatePodSandBoxEvent() error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
if err := commonutil.WaitForEvent(client, "default", "FailedCreatePodSandBox"); err != nil {
return errors.Wrap(err, "waiting for FailedCreatePodSandBox event")
}
return nil
}
// WaitForNginxRunning waits for nginx service to be up
func WaitForNginxRunning(t *testing.T) error {
client, err := commonutil.GetClient()
if err != nil {
return errors.Wrap(err, "getting kubernetes client")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{"run": "nginx"}))
if err := commonutil.WaitForPodsWithLabelRunning(client, "default", selector); err != nil {
return errors.Wrap(err, "waiting for nginx pods")
}
if err := commonutil.WaitForService(client, "default", "nginx", true, time.Millisecond*500, time.Minute*10); err != nil {
t.Errorf("Error waiting for nginx service to be up")
}
return nil
}
// Retry tries the callback for a number of attempts, with a delay between attempts
func Retry(t *testing.T, callback func() error, d time.Duration, attempts int) (err error) {
for i := 0; i < attempts; i++ {
err = callback()
if err == nil {
return nil
}
time.Sleep(d)
}
return err
}
......@@ -18,74 +18,77 @@ package integration
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/docker/machine/libmachine/state"
retryablehttp "github.com/hashicorp/go-retryablehttp"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/test/integration/util"
)
func downloadMinikubeBinary(version string) (*os.File, error) {
// Grab latest release binary
url := pkgutil.GetBinaryDownloadURL(version, runtime.GOOS)
resp, err := retryablehttp.Get(url)
if err != nil {
return nil, errors.Wrap(err, "Failed to get latest release binary")
}
defer resp.Body.Close()
tf, err := ioutil.TempFile("", "minikube")
if err != nil {
return nil, errors.Wrap(err, "Failed to create binary file")
}
_, err = io.Copy(tf, resp.Body)
if err != nil {
return nil, errors.Wrap(err, "Failed to populate temp file")
}
if err := tf.Close(); err != nil {
return nil, errors.Wrap(err, "Failed to close temp file")
func fileExists(fname string) error {
check := func() error {
info, err := os.Stat(fname)
if os.IsNotExist(err) {
return err
}
if info.IsDir() {
return fmt.Errorf("Error expect file got dir")
}
return nil
}
if runtime.GOOS != "windows" {
if err := os.Chmod(tf.Name(), 0700); err != nil {
return nil, err
// t.Fatal(errors.Wrap(err, "Failed to make binary executable."))
}
if err := util.Retry2(check, 1*time.Second, 3); err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed check if file (%q) exists,", fname))
}
return tf, err
return nil
}
// TestVersionUpgrade downloads latest version of minikube and runs with
// the odlest supported k8s version and then runs the current head minikube
// and it tries to upgrade from the older supported k8s to news supported k8s
func TestVersionUpgrade(t *testing.T) {
currentRunner := NewMinikubeRunner(t)
currentRunner.RunCommand("delete", true)
currentRunner.CheckStatus(state.None.String())
tf, err := downloadMinikubeBinary("latest")
if err != nil || tf == nil {
t.Fatal(errors.Wrap(err, "Failed to download minikube binary."))
p := profileName(t)
if shouldRunInParallel(t) {
t.Parallel()
}
// fname is the filename for the minikube's latetest binary. this file been pre-downloaded before test by hacks/jenkins/common.sh
fname := filepath.Join(*testdataDir, fmt.Sprintf("minikube-%s-%s-latest-stable", runtime.GOOS, runtime.GOARCH))
err := fileExists(fname)
if err != nil { // download file if it is not downloaded by other test
dest := filepath.Join(*testdataDir, fmt.Sprintf("minikube-%s-%s-latest-stable", runtime.GOOS, runtime.GOARCH))
err := downloadMinikubeBinary(t, dest, "latest")
if err != nil {
// binary is needed for the test
t.Fatalf("erorr downloading the latest minikube release %v", err)
}
}
defer os.Remove(tf.Name())
defer os.Remove(fname)
releaseRunner := NewMinikubeRunner(t)
releaseRunner.BinaryPath = tf.Name()
mkCurrent := NewMinikubeRunner(t, p)
defer mkCurrent.TearDown(t)
mkRelease := NewMinikubeRunner(t, p)
mkRelease.BinaryPath = fname
// For full coverage: also test upgrading from oldest to newest supported k8s release
releaseRunner.Start(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
releaseRunner.CheckStatus(state.Running.String())
releaseRunner.RunCommand("stop", true)
releaseRunner.CheckStatus(state.Stopped.String())
stdout, stderr, err := mkRelease.Start(fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion))
if err != nil {
t.Fatalf("TestVersionUpgrade minikube start failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mkRelease.CheckStatus(state.Running.String())
mkRelease.RunCommand("stop", true)
mkRelease.CheckStatus(state.Stopped.String())
// Trim the leading "v" prefix to assert that we handle it properly.
currentRunner.Start(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
currentRunner.CheckStatus(state.Running.String())
currentRunner.RunCommand("delete", true)
currentRunner.CheckStatus(state.None.String())
stdout, stderr, err = mkCurrent.Start(fmt.Sprintf("--kubernetes-version=%s", strings.TrimPrefix(constants.NewestKubernetesVersion, "v")))
if err != nil {
t.Fatalf("TestVersionUpgrade mkCurrent.Start start failed : %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
mkCurrent.CheckStatus(state.Running.String())
}
......@@ -16,6 +16,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// the name of this file starts with z intentionally to make it run last after all other tests
// the intent is to make sure os env proxy settings be done after all other tests.
// for example in the case the test proxy clean up gets killed or fails
package integration
import (
......@@ -66,20 +69,20 @@ func setUpProxy(t *testing.T) (*http.Server, error) {
func TestProxy(t *testing.T) {
origHP := os.Getenv("HTTP_PROXY")
origNP := os.Getenv("NO_PROXY")
p := profileName(t) // profile name
if isTestNoneDriver(t) {
// TODO fix this later
t.Skip("Skipping proxy warning for none")
}
srv, err := setUpProxy(t)
if err != nil {
t.Fatalf("Failed to set up the test proxy: %s", err)
}
// making sure there is no running minikube to avoid https://github.com/kubernetes/minikube/issues/4132
r := NewMinikubeRunner(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
_, _, err = r.RunWithContext(ctx, "delete")
if err != nil {
t.Logf("Error deleting minikube before test setup %s : ", err)
}
mk := NewMinikubeRunner(t, p)
// Clean up after setting up proxy
defer func(t *testing.T) {
err = os.Setenv("HTTP_PROXY", origHP)
......@@ -95,27 +98,23 @@ func TestProxy(t *testing.T) {
if err != nil {
t.Errorf("Error shutting down the http proxy")
}
_, _, err = r.RunWithContext(ctx, "delete")
if err != nil {
t.Logf("Error deleting minikube when cleaning up proxy setup: %s", err)
if !isTestNoneDriver(t) {
mk.TearDown(t)
}
}(t)
t.Run("Proxy Console Warnning", testProxyWarning)
t.Run("Proxy Dashboard", testProxyDashboard)
}(t)
t.Run("ProxyConsoleWarnning", testProxyWarning)
t.Run("ProxyDashboard", testProxyDashboard)
}
// testProxyWarning checks user is warned correctly about the proxy related env vars
func testProxyWarning(t *testing.T) {
r := NewMinikubeRunner(t, "--wait=false")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
startCmd := fmt.Sprintf("start %s %s", r.StartArgs, r.GlobalArgs)
stdout, stderr, err := r.RunWithContext(ctx, startCmd)
p := profileName(t) // profile name
mk := NewMinikubeRunner(t, p)
stdout, stderr, err := mk.Start()
if err != nil {
t.Fatalf("start: %v\nstdout: %s\nstderr: %s", err, stdout, stderr)
t.Fatalf("failed to start minikube (for profile %s) failed : %v\nstdout: %s\nstderr: %s", t.Name(), err, stdout, stderr)
}
msg := "Found network options:"
......@@ -131,7 +130,8 @@ func testProxyWarning(t *testing.T) {
// testProxyDashboard checks if dashboard URL is accessible if proxy is set
func testProxyDashboard(t *testing.T) {
mk := NewMinikubeRunner(t, "--wait=false")
p := profileName(t) // profile name
mk := NewMinikubeRunner(t, p)
cmd, out := mk.RunDaemon("dashboard --url")
defer func() {
err := cmd.Process.Kill()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册