提交 df1175f5 编写于 作者: A Anders F Björklund

Merge branch 'master' into reportcard-again

......@@ -32,6 +32,7 @@ _testmain.go
#iso version file
deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/etc/VERSION
/pkg/minikube/assets/assets.go-e
/pkg/minikube/assets/assets.go
/pkg/minikube/translate/translations.go
/pkg/minikube/translate/translations.go-e
......
# Release Notes
## Version 1.5.2 - 2019-10-31 (Happy Halloween!)
* service: fix --url mode [#5790](https://github.com/kubernetes/minikube/pull/5790)
* Refactor command runner interface, allow stdin writes [#5530](https://github.com/kubernetes/minikube/pull/5530)
* macOS install docs: minikube is a normal Homebrew formula now [#5750](https://github.com/kubernetes/minikube/pull/5750)
* Allow CPU count check to be disabled using --force [#5803](https://github.com/kubernetes/minikube/pull/5803)
* Make network validation friendlier, especially to corp networks [#5802](https://github.com/kubernetes/minikube/pull/5802)
Thank you to our contributors for this release:
- Anders F Björklund
- Issy Long
- Medya Ghazizadeh
- Thomas Strömberg
## Version 1.5.1 - 2019-10-29
* Set Docker open-files limit ( 'ulimit -n') to be consistent with other runtimes [#5761](https://github.com/kubernetes/minikube/pull/5761)
* Use fixed uid/gid for the default user account [#5767](https://github.com/kubernetes/minikube/pull/5767)
* Set --wait=false to default but still wait for apiserver [#5757](https://github.com/kubernetes/minikube/pull/5757)
* kubelet: Pass --config to use kubeadm generated configuration [#5697](https://github.com/kubernetes/minikube/pull/5697)
* Refactor to remove opening browser and just return url(s) [#5718](https://github.com/kubernetes/minikube/pull/5718)
Huge thank you for this release towards our contributors:
- Anders F Björklund
- Medya Ghazizadeh
- Nanik T
- Priya Wadhwa
- Sharif Elgamal
- Thomas Strömberg
## Version 1.5.0 - 2019-10-25
* Default to best-available local hypervisor rather than VirtualBox [#5700](https://github.com/kubernetes/minikube/pull/5700)
......
......@@ -15,12 +15,12 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 5
VERSION_BUILD ?= 0
VERSION_BUILD ?= 2
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
VERSION ?= v$(RAW_VERSION)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).1
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
RPM_VERSION ?= $(DEB_VERSION)
......@@ -99,6 +99,7 @@ MARKDOWNLINT ?= markdownlint
MINIKUBE_MARKDOWN_FILES := README.md docs CONTRIBUTING.md CHANGELOG.md
MINIKUBE_BUILD_TAGS := container_image_ostree_stub containers_image_openpgp
MINIKUBE_BUILD_TAGS += go_getter_nos3 go_getter_nogcs
MINIKUBE_INTEGRATION_BUILD_TAGS := integration $(MINIKUBE_BUILD_TAGS)
CMD_SOURCE_DIRS = cmd pkg
......
......@@ -14,6 +14,7 @@ approvers:
- sharifelgamal
- RA489
- medyagh
- josedonizetti
emeritus_approvers:
- dlorenc
- luxas
......
......@@ -17,9 +17,12 @@ limitations under the License.
package config
import (
"fmt"
"os"
"text/template"
"github.com/pkg/browser"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/cluster"
......@@ -95,9 +98,20 @@ You can add one by annotating a service with the label {{.labelName}}:{{.addonNa
}
for i := range serviceList.Items {
svc := serviceList.Items[i].ObjectMeta.Name
if err := service.WaitAndMaybeOpenService(api, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
var urlString []string
if urlString, err = service.WaitForService(api, namespace, svc, addonsURLTemplate, addonsURLMode, https, wait, interval); err != nil {
exit.WithCodeT(exit.Unavailable, "Wait failed: {{.error}}", out.V{"error": err})
}
if len(urlString) != 0 {
out.T(out.Celebrate, "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc})
for _, url := range urlString {
if err := browser.OpenURL(url); err != nil {
exit.WithError(fmt.Sprintf("browser failed to open url %s", url), err)
}
}
}
}
},
}
......
......@@ -17,13 +17,19 @@ limitations under the License.
package cmd
import (
"fmt"
"net/url"
"os"
"text/template"
"github.com/golang/glog"
"github.com/pkg/browser"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/service"
)
......@@ -68,11 +74,29 @@ var serviceCmd = &cobra.Command{
if !cluster.IsMinikubeRunning(api) {
os.Exit(1)
}
err = service.WaitAndMaybeOpenService(api, namespace, svc,
serviceURLTemplate, serviceURLMode, https, wait, interval)
urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
if err != nil {
exit.WithError("Error opening service", err)
}
for _, u := range urls {
_, err := url.Parse(u)
if err != nil {
glog.Warningf("failed to parse url %q: %v (will not open)", u, err)
out.String(fmt.Sprintf("%s\n", u))
continue
}
if serviceURLMode {
out.String(fmt.Sprintf("%s\n", u))
continue
}
out.T(out.Celebrate, "Opening service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": svc})
if err := browser.OpenURL(u); err != nil {
exit.WithError(fmt.Sprintf("open url failed: %s", u), err)
}
}
},
}
......
......@@ -171,7 +171,7 @@ func initMinikubeFlags() {
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
startCmd.Flags().Bool(waitUntilHealthy, true, "Wait until Kubernetes core services are healthy before exiting.")
startCmd.Flags().Bool(waitUntilHealthy, false, "Wait until Kubernetes core services are healthy before exiting.")
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
......@@ -390,10 +390,14 @@ func enableAddons() {
}
func waitCluster(bs bootstrapper.Bootstrapper, config cfg.Config) {
if viper.GetBool(waitUntilHealthy) {
if err := bs.WaitCluster(config.KubernetesConfig, viper.GetDuration(waitTimeout)); err != nil {
exit.WithError("Wait failed", err)
var podsToWaitFor []string
if !viper.GetBool(waitUntilHealthy) {
// only wait for apiserver if wait=false
podsToWaitFor = []string{"apiserver"}
}
if err := bs.WaitForPods(config.KubernetesConfig, viper.GetDuration(waitTimeout), podsToWaitFor); err != nil {
exit.WithError("Wait failed", err)
}
}
......@@ -748,7 +752,7 @@ func validateFlags(drvName string) {
} else {
cpuCount = viper.GetInt(cpus)
}
if cpuCount < minimumCPUS {
if cpuCount < minimumCPUS && !viper.GetBool(force) {
exit.UsageT("Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
......@@ -1015,7 +1019,7 @@ func validateNetwork(h *host.Host, r command.Runner) string {
}
}
if driver.BareMetal(h.Driver.DriverName()) {
if !driver.BareMetal(h.Driver.DriverName()) {
trySSH(h, ip)
}
......@@ -1047,8 +1051,10 @@ Suggested workarounds:
}
func tryLookup(r command.Runner) {
if err := r.Run("nslookup kubernetes.io"); err != nil {
out.WarningT("VM is unable to resolve DNS hosts: {[.error}}", out.V{"error": err})
// DNS check
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("VM may be unable to resolve external DNS records")
}
}
......@@ -1060,19 +1066,22 @@ func tryPing(r command.Runner) {
}
func tryRegistry(r command.Runner) {
// Try an HTTPS connection to the
// Try an HTTPS connection to the image repository
proxy := os.Getenv("HTTPS_PROXY")
opts := "-sS"
opts := []string{"-sS"}
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
opts = fmt.Sprintf("-x %s %s", proxy, opts)
opts = append([]string{"-x", proxy}, opts...)
}
repo := viper.GetString(imageRepository)
if repo == "" {
repo = images.DefaultImageRepo
}
if err := r.Run(fmt.Sprintf("curl %s https://%s/", opts, repo)); err != nil {
out.WarningT("VM is unable to connect to the selected image repository: {{.error}}", out.V{"error": err})
opts = append(opts, fmt.Sprintf("https://%s/", repo))
if rr, err := r.RunCmd(exec.Command("curl", opts...)); err != nil {
glog.Warningf("%s failed: %v", rr.Args, err)
out.WarningT("VM is unable to access {{.repository}}, you may need to configure a proxy or set --image-repository", out.V{"repository": repo})
}
}
......
{{ define "main" }}
<div style="padding-top:20px">
{{ .Render "content" }}
</div>
{{ end }}
\ No newline at end of file
{{ define "main" }}
<div style="padding-top:20px">
{{ .Render "content" }}
</div>
{{ end }}
\ No newline at end of file
{{ define "main" }}
<div style="padding-top:60px">
{{ .Render "content" }}
</div>
{{ end }}
\ No newline at end of file
{{ define "main" }}
<div style="padding-top:20px">
{{ .Render "content" }}
</div>
{{ end }}
\ No newline at end of file
docker -1 docker -1 =tcuser /home/docker /bin/bash wheel,vboxsf -
docker 1000 docker 1000 =tcuser /home/docker /bin/bash wheel,vboxsf -
[
{
"name": "v1.5.1",
"checksums": {
"darwin": "7ba345034e176566930d873acd0f38366dd14fdafd038febe600ea38c24c4208",
"linux": "5aed23a876770c92d0162fcf7862d855dc306516614be78ac6fbc47b5cba55e6",
"windows": "5a7bd914b0ae57e0853d72a06b7fb72e645417f2f3cd86d0f1bc4f636a04d160"
}
},
{
"name": "v1.5.0",
"checksums": {
......
......@@ -27,7 +27,7 @@ require (
github.com/google/go-cmp v0.3.0
github.com/gorilla/mux v1.7.1 // indirect
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
github.com/hashicorp/go-getter v1.3.0
github.com/hashicorp/go-getter v1.4.0
github.com/hashicorp/go-multierror v0.0.0-20160811015721-8c5f0ad93604 // indirect
github.com/hashicorp/go-retryablehttp v0.5.4
github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214 // indirect
......@@ -43,6 +43,7 @@ require (
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect
github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect
github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/libvirt/libvirt-go v3.4.0+incompatible
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1
github.com/mattn/go-isatty v0.0.8
......@@ -86,6 +87,7 @@ require (
replace (
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20190910053320-21bd2f51b8ea
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c
k8s.io/api => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20190623232353-8c3b7d7679cc
k8s.io/apiextensions-apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20190623232353-8c3b7d7679cc
k8s.io/apimachinery => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20190623232353-8c3b7d7679cc
......
此差异已折叠。
......@@ -23,8 +23,6 @@
# EXTRA_START_ARGS: additional flags to pass into minikube start
# EXTRA_ARGS: additional flags to pass into minikube
# JOB_NAME: the name of the logfile and check name to update on github
# PARALLEL_COUNT: number of tests to run in parallel
readonly TEST_ROOT="${HOME}/minikube-integration"
readonly TEST_HOME="${TEST_ROOT}/${OS_ARCH}-${VM_DRIVER}-${MINIKUBE_LOCATION}-$$-${COMMIT}"
......@@ -42,21 +40,6 @@ echo "uptime: $(uptime)"
echo "kubectl: $(env KUBECONFIG=${TEST_HOME} kubectl version --client --short=true)"
echo "docker: $(docker version --format '{{ .Client.Version }}')"
readonly LOAD=$(uptime | egrep -o "load average.*: [0-9]" | cut -d" " -f3)
if [[ "${LOAD}" -gt 2 ]]; then
echo ""
echo "********************** LOAD WARNING ********************************"
echo "Load average is very high (${LOAD}), which may cause failures. Top:"
if [[ "$(uname)" == "Darwin" ]]; then
# Two samples, macOS does not calculate CPU usage on the first one
top -l 2 -o cpu -n 5 | tail -n 15
else
top -b -n1 | head -n 15
fi
echo "********************** LOAD WARNING ********************************"
echo ""
fi
case "${VM_DRIVER}" in
kvm2)
echo "virsh: $(virsh --version)"
......@@ -159,6 +142,10 @@ if type -P virsh; then
fi
if type -P vboxmanage; then
killall VBoxHeadless || true
sleep 1
killall -9 VBoxHeadless || true
for guid in $(vboxmanage list vms | grep -Eo '\{[a-zA-Z0-9-]+\}'); do
echo "- Removing stale VirtualBox VM: $guid"
vboxmanage startvm "${guid}" --type emergencystop || true
......@@ -254,14 +241,31 @@ if [ "$(uname)" != "Darwin" ]; then
docker build -t gcr.io/k8s-minikube/gvisor-addon:2 -f testdata/gvisor-addon-Dockerfile ./testdata
fi
readonly LOAD=$(uptime | egrep -o "load average.*: [0-9]+" | cut -d" " -f3)
if [[ "${LOAD}" -gt 2 ]]; then
echo ""
echo "********************** LOAD WARNING ********************************"
echo "Load average is very high (${LOAD}), which may cause failures. Top:"
if [[ "$(uname)" == "Darwin" ]]; then
# Two samples, macOS does not calculate CPU usage on the first one
top -l 2 -o cpu -n 5 | tail -n 15
else
top -b -n1 | head -n 15
fi
echo "********************** LOAD WARNING ********************************"
echo "Sleeping 30s to see if load goes down ...."
sleep 30
uptime
fi
echo ""
echo ">> Starting ${E2E_BIN} at $(date)"
set -x
${SUDO_PREFIX}${E2E_BIN} \
-minikube-start-args="--vm-driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
-expected-default-driver="${EXPECTED_DEFAULT_DRIVER}" \
-test.timeout=60m \
-test.parallel=${PARALLEL_COUNT} \
-test.timeout=70m \
${EXTRA_TEST_ARGS} \
-binary="${MINIKUBE_BIN}" && result=$? || result=$?
set +x
echo ">> ${E2E_BIN} exited with ${result} at $(date)"
......
......@@ -19,21 +19,29 @@ set -uf -o pipefail
PATH=/usr/local/bin:/sbin:/usr/local/sbin:$PATH
exit_if_jenkins() {
jenkins=$(pgrep java)
if [[ "$jenkins" -- "" ]]; then
echo "no java, no jenkins"
return 0
# cleanup shared between Linux and macOS
function check_jenkins() {
jenkins_pid="$(pidof java)"
if [[ "${jenkins_pid}" = "" ]]; then
return
fi
pstree $jenkins | grep -v java && echo "jenkins is running..." && exit 1
pstree "${jenkins_pid}" \
| egrep -i 'bash|integration|e2e|minikube' \
&& echo "tests are is running on pid ${jenkins_pid} ..." \
&& exit 1
}
exit_if_jenkins
echo "waiting to see if any jobs are coming in..."
sleep 15
exit_if_jenkins
echo "doing it"
check_jenkins
logger "cleanup_and_reboot running - may shutdown in 60 seconds"
echo "cleanup_and_reboot running - may shutdown in 60 seconds" | wall
sleep 10
check_jenkins
logger "cleanup_and_reboot is happening!"
# kill jenkins to avoid an incoming request
killall java
sudo rm -Rf ~jenkins/.minikube || echo "could not delete minikube"
sudo rm -Rf ~/jenkins/minikube-integration/* || true
# macOS specific cleanup
sudo rm /var/db/dhcpd_leases || echo "could not clear dhcpd leases"
sudo softwareupdate -i -a -R
sudo reboot
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cleanup shared between Linux and macOS
function check_jenkins() {
jenkins_pid="$(pidof java)"
if [[ "${jenkins_pid}" = "" ]]; then
return
fi
pstree "${jenkins_pid}" \
| egrep -i 'bash|integration|e2e|minikube' \
&& echo "tests are is running on pid ${jenkins_pid} ..." \
&& exit 1
}
check_jenkins
logger "cleanup_and_reboot running - may shutdown in 60 seconds"
echo "cleanup_and_reboot running - may shutdown in 60 seconds" | wall
sleep 10
check_jenkins
logger "cleanup_and_reboot is happening!"
# kill jenkins to avoid an incoming request
killall java
# Linux-specific cleanup
# disable localkube, kubelet
systemctl list-unit-files --state=enabled \
| grep kube \
| awk '{ print $1 }' \
| xargs systemctl disable
# update and reboot
apt update -y && apt upgrade -y && reboot
......@@ -28,8 +28,13 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="kvm2"
JOB_NAME="KVM_Linux"
PARALLEL_COUNT=4
EXPECTED_DEFAULT_DRIVER="kvm2"
# We pick kvm as our gvisor testbed because it is fast & reliable
EXTRA_TEST_ARGS="-gvisor"
mkdir cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
# Download files and set permissions
source ./common.sh
......@@ -30,16 +30,16 @@ OS_ARCH="linux-amd64"
VM_DRIVER="none"
JOB_NAME="none_Linux"
EXTRA_ARGS="--bootstrapper=kubeadm"
PARALLEL_COUNT=1
EXPECTED_DEFAULT_DRIVER="kvm2"
SUDO_PREFIX="sudo -E "
export KUBECONFIG="/root/.kube/config"
# "none" driver specific cleanup from previous runs.
sudo kubeadm reset -f || true
# kubeadm reset may not stop pods immediately
docker rm -f $(docker ps -aq) >/dev/null 2>&1 || true
# Try without -f first, primarily because older kubeadm versions (v1.10) don't support it anyways.
sudo kubeadm reset || sudo kubeadm reset -f || true
# Cleanup data directory
sudo rm -rf /data/*
# Cleanup old Kubernetes configs
......@@ -51,5 +51,8 @@ systemctl is-active --quiet kubelet \
&& echo "stopping kubelet" \
&& sudo systemctl stop kubelet
mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
# Download files and set permissions
source ./common.sh
......@@ -28,8 +28,10 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="virtualbox"
JOB_NAME="VirtualBox_Linux"
PARALLEL_COUNT=4
EXPECTED_DEFAULT_DRIVER="kvm2"
mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron
sudo install cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot
# Download files and set permissions
source ./common.sh
......@@ -31,9 +31,13 @@ VM_DRIVER="hyperkit"
JOB_NAME="HyperKit_macOS"
EXTRA_ARGS="--bootstrapper=kubeadm"
EXTRA_START_ARGS=""
PARALLEL_COUNT=3
EXPECTED_DEFAULT_DRIVER="hyperkit"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "FAILED TO INSTALL CLEANUP"
echo "*/30 * * * * $HOME/cleanup_and_reboot.sh" | crontab
crontab -l
# Download files and set permissions
source common.sh
......@@ -29,11 +29,15 @@ OS_ARCH="darwin-amd64"
VM_DRIVER="virtualbox"
JOB_NAME="VirtualBox_macOS"
EXTRA_ARGS="--bootstrapper=kubeadm"
PARALLEL_COUNT=3
# hyperkit behaves better, so it has higher precedence.
# Assumes that hyperkit is also installed on the VirtualBox CI host.
EXPECTED_DEFAULT_DRIVER="hyperkit"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
install cron/cleanup_and_reboot_Darwin.sh $HOME/cleanup_and_reboot.sh || echo "FAILED TO GET INSTALL CLEANUP"
echo "*/30 * * * * $HOME/cleanup_and_reboot.sh" | crontab
crontab -l
# Download files and set permissions
source common.sh
......@@ -39,6 +39,12 @@ if [ -z "${NEW_SHA256}" ]; then
exit 1
fi
echo "***********************************************************************"
echo "Sorry, this script has not yet been updated to support non-cask updates"
echo "See https://github.com/kubernetes/minikube/issues/5779"
echo "***********************************************************************"
exit 99
git config --global user.name "${GITHUB_USER}"
git config --global user.email "${GITHUB_USER}@google.com"
......
......@@ -4,7 +4,7 @@ publish = "site/public/"
command = "pwd && cd themes/docsy && git submodule update -f --init && cd ../.. && hugo"
[build.environment]
HUGO_VERSION = "0.55.6"
HUGO_VERSION = "0.59.0"
[context.production.environment]
HUGO_ENV = "production"
......
......@@ -17,8 +17,8 @@ limitations under the License.
package none
import (
"bytes"
"fmt"
"os/exec"
"strings"
"time"
......@@ -168,8 +168,8 @@ func (d *Driver) Remove() error {
return errors.Wrap(err, "kill")
}
glog.Infof("Removing: %s", cleanupPaths)
cmd := fmt.Sprintf("sudo rm -rf %s", strings.Join(cleanupPaths, " "))
if err := d.exec.Run(cmd); err != nil {
args := append([]string{"rm", "-rf"}, cleanupPaths...)
if _, err := d.exec.RunCmd(exec.Command("sudo", args...)); err != nil {
glog.Errorf("cleanup incomplete: %v", err)
}
return nil
......@@ -217,22 +217,20 @@ func (d *Driver) RunSSHCommandFromDriver() error {
}
// stopKubelet idempotently stops the kubelet
func stopKubelet(exec command.Runner) error {
func stopKubelet(cr command.Runner) error {
glog.Infof("stopping kubelet.service ...")
stop := func() error {
cmdStop := "sudo systemctl stop kubelet.service"
cmdCheck := "sudo systemctl show -p SubState kubelet"
err := exec.Run(cmdStop)
if err != nil {
glog.Errorf("temporary error for %q : %v", cmdStop, err)
cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service")
if rr, err := cr.RunCmd(cmd); err != nil {
glog.Errorf("temporary error for %q : %v", rr.Command(), err)
}
var out bytes.Buffer
errStatus := exec.CombinedOutputTo(cmdCheck, &out)
if errStatus != nil {
glog.Errorf("temporary error: for %q : %v", cmdCheck, errStatus)
cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet")
rr, err := cr.RunCmd(cmd)
if err != nil {
glog.Errorf("temporary error: for %q : %v", rr.Command(), err)
}
if !strings.Contains(out.String(), "dead") && !strings.Contains(out.String(), "failed") {
return fmt.Errorf("unexpected kubelet state: %q", out)
if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") {
return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String())
}
return nil
}
......@@ -245,13 +243,21 @@ func stopKubelet(exec command.Runner) error {
}
// restartKubelet restarts the kubelet
func restartKubelet(exec command.Runner) error {
func restartKubelet(cr command.Runner) error {
glog.Infof("restarting kubelet.service ...")
return exec.Run("sudo systemctl restart kubelet.service")
c := exec.Command("sudo", "systemctl", "restart", "kubelet.service")
if _, err := cr.RunCmd(c); err != nil {
return err
}
return nil
}
// checkKubelet returns an error if the kubelet is not running.
func checkKubelet(exec command.Runner) error {
func checkKubelet(cr command.Runner) error {
glog.Infof("checking for running kubelet ...")
return exec.Run("systemctl is-active --quiet service kubelet")
c := exec.Command("systemctl", "is-active", "--quiet", "service", "kubelet")
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "check kubelet")
}
return nil
}
......@@ -41,7 +41,7 @@ type Bootstrapper interface {
UpdateCluster(config.KubernetesConfig) error
RestartCluster(config.KubernetesConfig) error
DeleteCluster(config.KubernetesConfig) error
WaitCluster(config.KubernetesConfig, time.Duration) error
WaitForPods(config.KubernetesConfig, time.Duration, []string) error
// LogCommands returns a map of log type to a command which will display that log.
LogCommands(LogOptions) map[string]string
SetupCerts(cfg config.KubernetesConfig) error
......
......@@ -22,6 +22,7 @@ import (
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
......@@ -141,7 +142,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig) error {
// configure CA certificates
if err := configureCACerts(cmd, caCerts); err != nil {
return errors.Wrapf(err, "error configuring CA certificates during provisioning %v", err)
return errors.Wrapf(err, "Configuring CA certs")
}
return nil
}
......@@ -318,21 +319,21 @@ func collectCACerts() (map[string]string, error) {
}
// getSubjectHash calculates Certificate Subject Hash for creating certificate symlinks
func getSubjectHash(cmd command.Runner, filePath string) (string, error) {
out, err := cmd.CombinedOutput(fmt.Sprintf("openssl x509 -hash -noout -in '%s'", filePath))
func getSubjectHash(cr command.Runner, filePath string) (string, error) {
rr, err := cr.RunCmd(exec.Command("openssl", "x509", "-hash", "-noout", "-in", filePath))
if err != nil {
return "", err
return "", errors.Wrapf(err, rr.Command())
}
stringHash := strings.TrimSpace(out)
stringHash := strings.TrimSpace(rr.Stdout.String())
return stringHash, nil
}
// configureCACerts looks up and installs all uploaded PEM certificates in /usr/share/ca-certificates to system-wide certificate store (/etc/ssl/certs).
// OpenSSL binary required in minikube ISO
func configureCACerts(cmd command.Runner, caCerts map[string]string) error {
func configureCACerts(cr command.Runner, caCerts map[string]string) error {
hasSSLBinary := true
if err := cmd.Run("which openssl"); err != nil {
_, err := cr.RunCmd(exec.Command("openssl", "version"))
if err != nil {
hasSSLBinary = false
}
......@@ -343,24 +344,25 @@ func configureCACerts(cmd command.Runner, caCerts map[string]string) error {
for _, caCertFile := range caCerts {
dstFilename := path.Base(caCertFile)
certStorePath := path.Join(SSLCertStoreDir, dstFilename)
if err := cmd.Run(fmt.Sprintf("sudo test -f '%s'", certStorePath)); err != nil {
if err := cmd.Run(fmt.Sprintf("sudo ln -s '%s' '%s'", caCertFile, certStorePath)); err != nil {
return errors.Wrapf(err, "error making symbol link for certificate %s", caCertFile)
_, err := cr.RunCmd(exec.Command("sudo", "test", "-f", certStorePath))
if err != nil {
if _, err := cr.RunCmd(exec.Command("sudo", "ln", "-s", caCertFile, certStorePath)); err != nil {
return errors.Wrapf(err, "create symlink for %s", caCertFile)
}
}
if hasSSLBinary {
subjectHash, err := getSubjectHash(cmd, caCertFile)
subjectHash, err := getSubjectHash(cr, caCertFile)
if err != nil {
return errors.Wrapf(err, "error calculating subject hash for certificate %s", caCertFile)
return errors.Wrapf(err, "calculate hash for cacert %s", caCertFile)
}
subjectHashLink := path.Join(SSLCertStoreDir, fmt.Sprintf("%s.0", subjectHash))
if err := cmd.Run(fmt.Sprintf("sudo test -f '%s'", subjectHashLink)); err != nil {
if err := cmd.Run(fmt.Sprintf("sudo ln -s '%s' '%s'", certStorePath, subjectHashLink)); err != nil {
return errors.Wrapf(err, "error making subject hash symbol link for certificate %s", caCertFile)
_, err = cr.RunCmd(exec.Command("sudo", "test", "-f", subjectHashLink))
if err != nil {
if _, err := cr.RunCmd(exec.Command("sudo", "ln", "-s", certStorePath, subjectHashLink)); err != nil {
return errors.Wrapf(err, "linking caCertFile %s", caCertFile)
}
}
}
}
return nil
}
......@@ -62,9 +62,9 @@ func TestSetupCerts(t *testing.T) {
certStorePath := path.Join(SSLCertStoreDir, dst)
certNameHash := "abcdef"
remoteCertHashLink := path.Join(SSLCertStoreDir, fmt.Sprintf("%s.0", certNameHash))
cmdMap[fmt.Sprintf("sudo ln -s '%s' '%s'", certFile, certStorePath)] = "1"
cmdMap[fmt.Sprintf("openssl x509 -hash -noout -in '%s'", certFile)] = certNameHash
cmdMap[fmt.Sprintf("sudo ln -s '%s' '%s'", certStorePath, remoteCertHashLink)] = "1"
cmdMap[fmt.Sprintf("sudo ln -s %s %s", certFile, certStorePath)] = "1"
cmdMap[fmt.Sprintf("openssl x509 -hash -noout -in %s", certFile)] = certNameHash
cmdMap[fmt.Sprintf("sudo ln -s %s %s", certStorePath, remoteCertHashLink)] = "1"
}
f := command.NewFakeCommandRunner()
f.SetCommandToOutput(cmdMap)
......
......@@ -19,6 +19,7 @@ package kubeadm
import (
"bytes"
"crypto/tls"
"os/exec"
"fmt"
"net"
......@@ -36,6 +37,7 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
......@@ -63,6 +65,7 @@ const (
defaultCNIConfigPath = "/etc/cni/net.d/k8s.conf"
kubeletServiceFile = "/lib/systemd/system/kubelet.service"
kubeletSystemdConfFile = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
AllPods = "ALL_PODS"
)
const (
......@@ -136,12 +139,11 @@ func NewKubeadmBootstrapper(api libmachine.API) (*Bootstrapper, error) {
// GetKubeletStatus returns the kubelet status
func (k *Bootstrapper) GetKubeletStatus() (string, error) {
statusCmd := `sudo systemctl is-active kubelet`
status, err := k.c.CombinedOutput(statusCmd)
rr, err := k.c.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet"))
if err != nil {
return "", errors.Wrap(err, "getting status")
return "", errors.Wrapf(err, "getting kublet status. command: %q", rr.Command())
}
s := strings.TrimSpace(status)
s := strings.TrimSpace(rr.Stdout.String())
switch s {
case "active":
return state.Running.String(), nil
......@@ -222,16 +224,16 @@ func etcdDataDir() string {
// createCompatSymlinks creates compatibility symlinks to transition running services to new directory structures
func (k *Bootstrapper) createCompatSymlinks() error {
legacyEtcd := "/data/minikube"
if err := k.c.Run(fmt.Sprintf("sudo test -d %s", legacyEtcd)); err != nil {
glog.Infof("%s check failed, skipping compat symlinks: %v", legacyEtcd, err)
if _, err := k.c.RunCmd(exec.Command("sudo", "test", "-d", legacyEtcd)); err != nil {
glog.Infof("%s skipping compat symlinks: %v", legacyEtcd, err)
return nil
}
glog.Infof("Found %s, creating compatibility symlinks ...", legacyEtcd)
cmd := fmt.Sprintf("sudo ln -s %s %s", legacyEtcd, etcdDataDir())
out, err := k.c.CombinedOutput(cmd)
if err != nil {
return errors.Wrapf(err, "cmd failed: %s\n%s\n", cmd, out)
c := exec.Command("sudo", "ln", "-s", legacyEtcd, etcdDataDir())
if rr, err := k.c.RunCmd(c); err != nil {
return errors.Wrapf(err, "create symlink failed: %s", rr.Command())
}
return nil
}
......@@ -273,11 +275,9 @@ func (k *Bootstrapper) StartCluster(k8s config.KubernetesConfig) error {
ignore = append(ignore, "SystemVerification")
}
cmd := fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s",
invokeKubeadm(k8s.KubernetesVersion), yamlConfigPath, extraFlags, strings.Join(ignore, ","))
out, err := k.c.CombinedOutput(cmd)
if err != nil {
return errors.Wrapf(err, "cmd failed: %s\n%s\n", cmd, out)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", invokeKubeadm(k8s.KubernetesVersion), yamlConfigPath, extraFlags, strings.Join(ignore, ",")))
if rr, err := k.c.RunCmd(c); err != nil {
return errors.Wrapf(err, "init failed. cmd: %q", rr.Command())
}
glog.Infof("Configuring cluster permissions ...")
......@@ -302,22 +302,23 @@ func (k *Bootstrapper) StartCluster(k8s config.KubernetesConfig) error {
// adjustResourceLimits makes fine adjustments to pod resources that aren't possible via kubeadm config.
func (k *Bootstrapper) adjustResourceLimits() error {
score, err := k.c.CombinedOutput("cat /proc/$(pgrep kube-apiserver)/oom_adj")
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "cat /proc/$(pgrep kube-apiserver)/oom_adj"))
if err != nil {
return errors.Wrap(err, "oom_adj check")
return errors.Wrapf(err, "oom_adj check cmd %s. ", rr.Command())
}
glog.Infof("apiserver oom_adj: %s", score)
glog.Infof("apiserver oom_adj: %s", rr.Stdout.String())
// oom_adj is already a negative number
if strings.HasPrefix(score, "-") {
if strings.HasPrefix(rr.Stdout.String(), "-") {
return nil
}
glog.Infof("adjusting apiserver oom_adj to -10")
// Prevent the apiserver from OOM'ing before other pods, as it is our gateway into the cluster.
// It'd be preferable to do this via Kubernetes, but kubeadm doesn't have a way to set pod QoS.
if err := k.c.Run("echo -10 | sudo tee /proc/$(pgrep kube-apiserver)/oom_adj"); err != nil {
return errors.Wrap(err, "oom_adj adjust")
if _, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", "echo -10 | sudo tee /proc/$(pgrep kube-apiserver)/oom_adj")); err != nil {
return errors.Wrap(err, fmt.Sprintf("oom_adj adjust"))
}
return nil
}
......@@ -352,7 +353,7 @@ func addAddons(files *[]assets.CopyableFile, data interface{}) error {
// client returns a Kubernetes client to use to speak to a kubeadm launched apiserver
func (k *Bootstrapper) client(k8s config.KubernetesConfig) (*kubernetes.Clientset, error) {
// Catch case if WaitCluster was called with a stale ~/.kube/config
// Catch case if WaitForPods was called with a stale ~/.kube/config
config, err := kapi.ClientConfig(k.contextName)
if err != nil {
return nil, errors.Wrap(err, "client config")
......@@ -367,8 +368,8 @@ func (k *Bootstrapper) client(k8s config.KubernetesConfig) (*kubernetes.Clientse
return kubernetes.NewForConfig(config)
}
// WaitCluster blocks until Kubernetes appears to be healthy.
func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Duration) error {
// WaitForPods blocks until pods specified in podsToWaitFor appear to be healthy.
func (k *Bootstrapper) WaitForPods(k8s config.KubernetesConfig, timeout time.Duration, podsToWaitFor []string) error {
// Do not wait for "k8s-app" pods in the case of CNI, as they are managed
// by a CNI plugin which is usually started after minikube has been brought
// up. Otherwise, minikube won't start, as "k8s-app" pods are not ready.
......@@ -377,10 +378,13 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Dur
// Wait until the apiserver can answer queries properly. We don't care if the apiserver
// pod shows up as registered, but need the webserver for all subsequent queries.
if shouldWaitForPod("apiserver", podsToWaitFor) {
out.String(" apiserver")
if err := k.waitForAPIServer(k8s); err != nil {
return errors.Wrap(err, "waiting for apiserver")
}
}
client, err := k.client(k8s)
if err != nil {
......@@ -391,6 +395,9 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Dur
if componentsOnly && p.key != "component" { // skip component check if network plugin is cni
continue
}
if !shouldWaitForPod(p.name, podsToWaitFor) {
continue
}
out.String(" %s", p.name)
selector := labels.SelectorFromSet(labels.Set(map[string]string{p.key: p.value}))
if err := kapi.WaitForPodsWithLabelRunning(client, "kube-system", selector, timeout); err != nil {
......@@ -401,6 +408,29 @@ func (k *Bootstrapper) WaitCluster(k8s config.KubernetesConfig, timeout time.Dur
return nil
}
// shouldWaitForPod returns true if:
// 1. podsToWaitFor is nil
// 2. name is in podsToWaitFor
// 3. ALL_PODS is in podsToWaitFor
// else, return false
func shouldWaitForPod(name string, podsToWaitFor []string) bool {
if podsToWaitFor == nil {
return true
}
if len(podsToWaitFor) == 0 {
return false
}
for _, p := range podsToWaitFor {
if p == AllPods {
return true
}
if p == name {
return true
}
}
return false
}
// RestartCluster restarts the Kubernetes cluster configured by kubeadm
func (k *Bootstrapper) RestartCluster(k8s config.KubernetesConfig) error {
glog.Infof("RestartCluster start")
......@@ -434,18 +464,20 @@ func (k *Bootstrapper) RestartCluster(k8s config.KubernetesConfig) error {
}
// Run commands one at a time so that it is easier to root cause failures.
for _, cmd := range cmds {
if err := k.c.Run(cmd); err != nil {
return errors.Wrapf(err, "running cmd: %s", cmd)
for _, c := range cmds {
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
if err != nil {
return errors.Wrapf(err, "running cmd: %s", rr.Command())
}
}
if err := k.waitForAPIServer(k8s); err != nil {
return errors.Wrap(err, "waiting for apiserver")
}
// restart the proxy and coredns
if err := k.c.Run(fmt.Sprintf("%s phase addon all --config %s", baseCmd, yamlConfigPath)); err != nil {
return errors.Wrapf(err, "addon phase")
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, yamlConfigPath))); err != nil {
return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command()))
}
if err := k.adjustResourceLimits(); err != nil {
......@@ -465,9 +497,9 @@ func (k *Bootstrapper) waitForAPIServer(k8s config.KubernetesConfig) error {
// To give a better error message, first check for process existence via ssh
// Needs minutes in case the image isn't cached (such as with v1.10.x)
err := wait.PollImmediate(time.Millisecond*300, time.Minute*3, func() (bool, error) {
ierr := k.c.Run(`sudo pgrep kube-apiserver`)
rr, ierr := k.c.RunCmd(exec.Command("sudo", "pgrep", "kube-apiserver"))
if ierr != nil {
glog.Warningf("pgrep apiserver: %v", ierr)
glog.Warningf("pgrep apiserver: %v cmd: %s", ierr, rr.Command())
return false, nil
}
return true, nil
......@@ -487,11 +519,21 @@ func (k *Bootstrapper) waitForAPIServer(k8s config.KubernetesConfig) error {
if status != "Running" {
return false, nil
}
return true, nil
// Make sure apiserver pod is retrievable
client, err := k.client(k8s)
if err != nil {
glog.Warningf("get kubernetes client: %v", err)
return false, nil
}
_, err = client.CoreV1().Pods("kube-system").Get("kube-apiserver-minikube", metav1.GetOptions{})
if err != nil {
return false, nil
}
return true, nil
// TODO: Check apiserver/kubelet logs for fatal errors so that users don't
// need to wait minutes to find out their flag didn't work.
}
err = wait.PollImmediate(kconst.APICallRetryInterval, 2*kconst.DefaultControlPlaneTimeout, f)
return err
......@@ -508,9 +550,9 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
if version.LT(semver.MustParse("1.11.0")) {
cmd = fmt.Sprintf("%s reset", invokeKubeadm(k8s.KubernetesVersion))
}
out, err := k.c.CombinedOutput(cmd)
if err != nil {
return errors.Wrapf(err, "kubeadm reset: %s\n%s\n", cmd, out)
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil {
return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command())
}
return nil
......@@ -526,9 +568,9 @@ func (k *Bootstrapper) PullImages(k8s config.KubernetesConfig) error {
return fmt.Errorf("pull command is not supported by kubeadm v%s", version)
}
cmd := fmt.Sprintf("%s config images pull --config %s", invokeKubeadm(k8s.KubernetesVersion), yamlConfigPath)
if err := k.c.Run(cmd); err != nil {
return errors.Wrapf(err, "running cmd: %s", cmd)
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s config images pull --config %s", invokeKubeadm(k8s.KubernetesVersion), yamlConfigPath)))
if err != nil {
return errors.Wrapf(err, "running cmd: %q", rr.Command())
}
return nil
}
......@@ -622,11 +664,12 @@ func (k *Bootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
glog.Infof("kubelet %s config:\n%s", cfg.KubernetesVersion, kubeletCfg)
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
// stop kubelet to avoid "Text File Busy" error
err = k.c.Run(`pgrep kubelet && sudo systemctl stop kubelet`)
if err != nil {
glog.Warningf("unable to stop kubelet: %s", err)
if rr, err := k.c.RunCmd(stopCmd); err != nil {
glog.Warningf("unable to stop kubelet: %s command: %q output: %q", err, rr.Command(), rr.Output())
}
if err := transferBinaries(cfg, k.c); err != nil {
return errors.Wrap(err, "downloading binaries")
}
......@@ -640,7 +683,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
}
}
if err := k.c.Run(`sudo systemctl daemon-reload && sudo systemctl start kubelet`); err != nil {
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")); err != nil {
return errors.Wrap(err, "starting kubelet")
}
return nil
......
......@@ -48,7 +48,7 @@ Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
ExecStart=/var/lib/minikube/binaries/v1.11.10/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,
......@@ -66,7 +66,7 @@ Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
......@@ -84,7 +84,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
......@@ -109,7 +109,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
......@@ -128,7 +128,7 @@ Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
ExecStart=/var/lib/minikube/binaries/v1.16.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,
......@@ -363,3 +363,45 @@ func TestGenerateConfig(t *testing.T) {
}
}
}
func TestShouldWaitForPod(t *testing.T) {
tests := []struct {
description string
pod string
podsToWaitFor []string
expected bool
}{
{
description: "pods to wait for is nil",
pod: "apiserver",
expected: true,
}, {
description: "pods to wait for is empty",
pod: "apiserver",
podsToWaitFor: []string{},
}, {
description: "pod is in podsToWaitFor",
pod: "apiserver",
podsToWaitFor: []string{"etcd", "apiserver"},
expected: true,
}, {
description: "pod is not in podsToWaitFor",
pod: "apiserver",
podsToWaitFor: []string{"etcd", "gvisor"},
}, {
description: "wait for all pods",
pod: "apiserver",
podsToWaitFor: []string{"ALL_PODS"},
expected: true,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
actual := shouldWaitForPod(test.pod, test.podsToWaitFor)
if actual != test.expected {
t.Fatalf("unexpected diff: got %t, expected %t", actual, test.expected)
}
})
}
}
......@@ -217,6 +217,9 @@ var versionSpecificOpts = []config.VersionedExtraOption{
LessThanOrEqual: semver.MustParse("1.15.0-alpha.3"),
},
// Kubelet config file
config.NewUnversionedOption(Kubelet, "config", "/var/lib/kubelet/config.yaml"),
// Network args
config.NewUnversionedOption(Kubelet, "cluster-dns", "10.96.0.10"),
config.NewUnversionedOption(Kubelet, "cluster-domain", "cluster.local"),
......
......@@ -19,12 +19,14 @@ package cluster
import (
"fmt"
"os"
"os/exec"
"sort"
"strconv"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/command"
)
// MountConfig defines the options available to the Mount command
......@@ -49,7 +51,7 @@ type MountConfig struct {
// mountRunner is the subset of CommandRunner used for mounting
type mountRunner interface {
CombinedOutput(string) (string, error)
RunCmd(*exec.Cmd) (*command.RunResult, error)
}
// Mount runs the mount command from the 9p client on the VM to the 9p server on the host
......@@ -58,14 +60,16 @@ func Mount(r mountRunner, source string, target string, c *MountConfig) error {
return errors.Wrap(err, "umount")
}
cmd := fmt.Sprintf("sudo mkdir -m %o -p %s && %s", c.Mode, target, mntCmd(source, target, c))
glog.Infof("Will run: %s", cmd)
out, err := r.CombinedOutput(cmd)
if _, err := r.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mkdir -m %o -p %s && %s", c.Mode, target, mntCmd(source, target, c)))); err != nil {
return errors.Wrap(err, "create folder pre-mount")
}
rr, err := r.RunCmd(exec.Command("/bin/bash", "-c", mntCmd(source, target, c)))
if err != nil {
glog.Infof("%s failed: err=%s, output: %q", cmd, err, out)
return errors.Wrap(err, out)
return errors.Wrapf(err, "mount with cmd %s ", rr.Command())
}
glog.Infof("%s output: %q", cmd, out)
glog.Infof("mount successful: %q", rr.Output())
return nil
}
......@@ -131,20 +135,13 @@ func mntCmd(source string, target string, c *MountConfig) string {
return fmt.Sprintf("sudo mount -t %s -o %s %s %s", c.Type, strings.Join(opts, ","), source, target)
}
// umountCmd returns a command for unmounting
func umountCmd(target string) string {
// grep because findmnt will also display the parent!
return fmt.Sprintf("[ \"x$(findmnt -T %s | grep %s)\" != \"x\" ] && sudo umount -f %s || echo ", target, target, target)
}
// Unmount unmounts a path
func Unmount(r mountRunner, target string) error {
cmd := umountCmd(target)
glog.Infof("Will run: %s", cmd)
out, err := r.CombinedOutput(cmd)
glog.Infof("unmount force err=%v, out=%s", err, out)
if err != nil {
return errors.Wrap(err, out)
// grep because findmnt will also display the parent!
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("[ \"x$(findmnt -T %s | grep %s)\" != \"x\" ] && sudo umount -f %s || echo ", target, target, target))
if _, err := r.RunCmd(c); err != nil {
return errors.Wrap(err, "unmount")
}
glog.Infof("unmount for %s ran successfully", target)
return nil
}
......@@ -23,50 +23,27 @@ import (
"github.com/google/go-cmp/cmp"
)
type mockMountRunner struct {
cmds []string
T *testing.T
}
func newMockMountRunner(t *testing.T) *mockMountRunner {
return &mockMountRunner{
T: t,
cmds: []string{},
}
}
func (m *mockMountRunner) CombinedOutput(cmd string) (string, error) {
m.cmds = append(m.cmds, cmd)
return "", nil
}
func TestMount(t *testing.T) {
func TestMntCmd(t *testing.T) {
var tests = []struct {
name string
source string
target string
cfg *MountConfig
want []string
want string
}{
{
name: "simple",
source: "src",
target: "target",
cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0700)},
want: []string{
"[ \"x$(findmnt -T target | grep target)\" != \"x\" ] && sudo umount -f target || echo ",
"sudo mkdir -m 700 -p target && sudo mount -t 9p -o dfltgid=0,dfltuid=0 src target",
},
want: "sudo mount -t 9p -o dfltgid=0,dfltuid=0 src target",
},
{
name: "named uid",
source: "src",
target: "target",
cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0700), UID: "docker", GID: "docker"},
want: []string{
"[ \"x$(findmnt -T target | grep target)\" != \"x\" ] && sudo umount -f target || echo ",
"sudo mkdir -m 700 -p target && sudo mount -t 9p -o dfltgid=$(grep ^docker: /etc/group | cut -d: -f3),dfltuid=$(id -u docker) src target",
},
want: "sudo mount -t 9p -o dfltgid=$(grep ^docker: /etc/group | cut -d: -f3),dfltuid=$(id -u docker) src target",
},
{
name: "everything",
......@@ -76,10 +53,7 @@ func TestMount(t *testing.T) {
"noextend": "",
"cache": "fscache",
}},
want: []string{
"[ \"x$(findmnt -T /target | grep /target)\" != \"x\" ] && sudo umount -f /target || echo ",
"sudo mkdir -m 777 -p /target && sudo mount -t 9p -o cache=fscache,dfltgid=72,dfltuid=82,noextend,version=9p2000.u 10.0.0.1 /target",
},
want: "sudo mount -t 9p -o cache=fscache,dfltgid=72,dfltuid=82,noextend,version=9p2000.u 10.0.0.1 /target",
},
{
name: "version-conflict",
......@@ -88,35 +62,17 @@ func TestMount(t *testing.T) {
cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0700), Version: "9p2000.u", Options: map[string]string{
"version": "9p2000.L",
}},
want: []string{
"[ \"x$(findmnt -T tgt | grep tgt)\" != \"x\" ] && sudo umount -f tgt || echo ",
"sudo mkdir -m 700 -p tgt && sudo mount -t 9p -o dfltgid=0,dfltuid=0,version=9p2000.L src tgt",
},
want: "sudo mount -t 9p -o dfltgid=0,dfltuid=0,version=9p2000.L src tgt",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
r := newMockMountRunner(t)
err := Mount(r, tc.source, tc.target, tc.cfg)
if err != nil {
t.Fatalf("Mount(%s, %s, %+v): %v", tc.source, tc.target, tc.cfg, err)
}
if diff := cmp.Diff(r.cmds, tc.want); diff != "" {
got := mntCmd(tc.source, tc.target, tc.cfg)
want := tc.want
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("command diff (-want +got): %s", diff)
}
})
}
}
func TestUnmount(t *testing.T) {
r := newMockMountRunner(t)
err := Unmount(r, "/mnt")
if err != nil {
t.Fatalf("Unmount(/mnt): %v", err)
}
want := []string{"[ \"x$(findmnt -T /mnt | grep /mnt)\" != \"x\" ] && sudo umount -f /mnt || echo "}
if diff := cmp.Diff(r.cmds, want); diff != "" {
t.Errorf("command diff (-want +got): %s", diff)
}
}
......@@ -17,34 +17,28 @@ limitations under the License.
package command
import (
"bytes"
"fmt"
"io"
"os/exec"
"path"
"strings"
"k8s.io/minikube/pkg/minikube/assets"
)
// RunResult holds the results of a Runner
type RunResult struct {
Stdout bytes.Buffer
Stderr bytes.Buffer
ExitCode int
Args []string // the args that was passed to Runner
}
// Runner represents an interface to run commands.
type Runner interface {
// Run starts the specified command and waits for it to complete.
Run(cmd string) error
// CombinedOutputTo runs the command and stores both command
// output and error to out. A typical usage is:
//
// var b bytes.Buffer
// CombinedOutput(cmd, &b)
// fmt.Println(b.Bytes())
//
// Or, you can set out to os.Stdout, the command output and
// error would show on your terminal immediately before you
// cmd exit. This is useful for a long run command such as
// continuously print running logs.
CombinedOutputTo(cmd string, out io.Writer) error
// CombinedOutput runs the command and returns its combined standard
// output and standard error.
CombinedOutput(cmd string) (string, error)
// RunCmd runs a cmd of exec.Cmd type. allowing user to set cmd.Stdin, cmd.Stdout,...
// not all implementors are guaranteed to handle all the properties of cmd.
RunCmd(cmd *exec.Cmd) (*RunResult, error)
// Copy is a convenience method that runs a command to copy a file
Copy(assets.CopyableFile) error
......@@ -56,3 +50,29 @@ type Runner interface {
func getDeleteFileCommand(f assets.CopyableFile) string {
return fmt.Sprintf("sudo rm %s", path.Join(f.GetTargetDir(), f.GetTargetName()))
}
// Command returns a human readable command string that does not induce eye fatigue
func (rr RunResult) Command() string {
var sb strings.Builder
sb.WriteString(rr.Args[0])
for _, a := range rr.Args[1:] {
if strings.Contains(a, " ") {
sb.WriteString(fmt.Sprintf(` "%s"`, a))
continue
}
sb.WriteString(fmt.Sprintf(" %s", a))
}
return sb.String()
}
// Output returns human-readable output for an execution result
func (rr RunResult) Output() string {
var sb strings.Builder
if rr.Stdout.Len() > 0 {
sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes()))
}
if rr.Stderr.Len() > 0 {
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes()))
}
return sb.String()
}
......@@ -24,6 +24,7 @@ import (
"path"
"path/filepath"
"strconv"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
......@@ -35,41 +36,45 @@ import (
// It implements the CommandRunner interface.
type ExecRunner struct{}
// Run starts the specified command in a bash shell and waits for it to complete.
func (*ExecRunner) Run(cmd string) error {
glog.Infoln("Run:", cmd)
c := exec.Command("/bin/bash", "-c", cmd)
if err := c.Run(); err != nil {
return errors.Wrapf(err, "running command: %s", cmd)
// RunCmd implements the Command Runner interface to run a exec.Cmd object
func (*ExecRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
rr := &RunResult{Args: cmd.Args}
glog.Infof("(ExecRunner) Run: %v", rr.Command())
var outb, errb io.Writer
if cmd.Stdout == nil {
var so bytes.Buffer
outb = io.MultiWriter(&so, &rr.Stdout)
} else {
outb = io.MultiWriter(cmd.Stdout, &rr.Stdout)
}
return nil
}
// CombinedOutputTo runs the command and stores both command
// output and error to out.
func (*ExecRunner) CombinedOutputTo(cmd string, out io.Writer) error {
glog.Infoln("Run with output:", cmd)
c := exec.Command("/bin/bash", "-c", cmd)
c.Stdout = out
c.Stderr = out
err := c.Run()
if err != nil {
return errors.Wrapf(err, "running command: %s\n.", cmd)
if cmd.Stderr == nil {
var se bytes.Buffer
errb = io.MultiWriter(&se, &rr.Stderr)
} else {
errb = io.MultiWriter(cmd.Stderr, &rr.Stderr)
}
return nil
}
cmd.Stdout = outb
cmd.Stderr = errb
// CombinedOutput runs the command in a bash shell and returns its
// combined standard output and standard error.
func (e *ExecRunner) CombinedOutput(cmd string) (string, error) {
var b bytes.Buffer
err := e.CombinedOutputTo(cmd, &b)
if err != nil {
return "", errors.Wrapf(err, "running command: %s\n output: %s", cmd, b.Bytes())
start := time.Now()
err := cmd.Run()
elapsed := time.Since(start)
if err == nil {
// Reduce log spam
if elapsed > (1 * time.Second) {
glog.Infof("(ExecRunner) Done: %v: (%s)", rr.Command(), elapsed)
}
return b.String(), nil
} else {
if exitError, ok := err.(*exec.ExitError); ok {
rr.ExitCode = exitError.ExitCode()
}
glog.Infof("(ExecRunner) Non-zero exit: %v: %v (%s)\n%s", rr.Command(), err, elapsed, rr.Output())
err = errors.Wrapf(err, "command failed: %s\nstdout: %s\nstderr: %s", rr.Command(), rr.Stdout.String(), rr.Stderr.String())
}
return rr, err
}
// Copy copies a file and its permissions
......
......@@ -20,9 +20,13 @@ import (
"bytes"
"fmt"
"io"
"os/exec"
"strings"
"time"
"golang.org/x/sync/syncmap"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
......@@ -43,34 +47,38 @@ func NewFakeCommandRunner() *FakeCommandRunner {
return &FakeCommandRunner{}
}
// Run returns nil if output has been set for the given command text.
func (f *FakeCommandRunner) Run(cmd string) error {
_, err := f.CombinedOutput(cmd)
return err
}
// RunCmd implements the Command Runner interface to run a exec.Cmd object
func (f *FakeCommandRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
rr := &RunResult{Args: cmd.Args}
glog.Infof("(FakeCommandRunner) Run: %v", rr.Command())
// CombinedOutputTo runs the command and stores both command
// output and error to out.
func (f *FakeCommandRunner) CombinedOutputTo(cmd string, out io.Writer) error {
value, ok := f.cmdMap.Load(cmd)
if !ok {
return fmt.Errorf("unavailable command: %s", cmd)
start := time.Now()
out, ok := f.cmdMap.Load(strings.Join(rr.Args, " "))
var buf bytes.Buffer
outStr := ""
if out != nil {
outStr = out.(string)
}
_, err := fmt.Fprint(out, value)
_, err := buf.WriteString(outStr)
if err != nil {
return err
return rr, errors.Wrap(err, "Writing outStr to FakeCommandRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return nil
}
elapsed := time.Since(start)
// CombinedOutput returns the set output for a given command text.
func (f *FakeCommandRunner) CombinedOutput(cmd string) (string, error) {
out, ok := f.cmdMap.Load(cmd)
if !ok {
return "", fmt.Errorf("unavailable command: %s", cmd)
if ok {
// Reduce log spam
if elapsed > (1 * time.Second) {
glog.Infof("(FakeCommandRunner) Done: %v: (%s)", rr.Command(), elapsed)
}
} else {
glog.Infof("(FakeCommandRunner) Non-zero exit: %v: (%s)\n%s", rr.Command(), elapsed, out)
return rr, fmt.Errorf("unavailable command: %s", rr.Command())
}
return out.(string), nil
return rr, nil
}
// Copy adds the filename, file contents key value pair to the stored map.
......
......@@ -17,13 +17,17 @@ limitations under the License.
package command
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"path"
"sync"
"time"
"github.com/golang/glog"
"github.com/kballard/go-shellquote"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
"golang.org/x/sync/errgroup"
......@@ -55,17 +59,6 @@ func (s *SSHRunner) Remove(f assets.CopyableFile) error {
return sess.Run(cmd)
}
type singleWriter struct {
b bytes.Buffer
mu sync.Mutex
}
func (w *singleWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
return w.b.Write(p)
}
// teeSSH runs an SSH command, streaming stdout, stderr to logs
func teeSSH(s *ssh.Session, cmd string, outB io.Writer, errB io.Writer) error {
outPipe, err := s.StdoutPipe()
......@@ -81,13 +74,13 @@ func teeSSH(s *ssh.Session, cmd string, outB io.Writer, errB io.Writer) error {
wg.Add(2)
go func() {
if err := util.TeePrefix(util.ErrPrefix, errPipe, errB, glog.V(8).Infof); err != nil {
if err := teePrefix(util.ErrPrefix, errPipe, errB, glog.V(8).Infof); err != nil {
glog.Errorf("tee stderr: %v", err)
}
wg.Done()
}()
go func() {
if err := util.TeePrefix(util.OutPrefix, outPipe, outB, glog.V(8).Infof); err != nil {
if err := teePrefix(util.OutPrefix, outPipe, outB, glog.V(8).Infof); err != nil {
glog.Errorf("tee stdout: %v", err)
}
wg.Done()
......@@ -97,12 +90,31 @@ func teeSSH(s *ssh.Session, cmd string, outB io.Writer, errB io.Writer) error {
return err
}
// Run starts a command on the remote and waits for it to return.
func (s *SSHRunner) Run(cmd string) error {
glog.Infof("SSH: %s", cmd)
// RunCmd implements the Command Runner interface to run a exec.Cmd object
func (s *SSHRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
rr := &RunResult{Args: cmd.Args}
glog.Infof("(SSHRunner) Run: %v", rr.Command())
var outb, errb io.Writer
start := time.Now()
if cmd.Stdout == nil {
var so bytes.Buffer
outb = io.MultiWriter(&so, &rr.Stdout)
} else {
outb = io.MultiWriter(cmd.Stdout, &rr.Stdout)
}
if cmd.Stderr == nil {
var se bytes.Buffer
errb = io.MultiWriter(&se, &rr.Stderr)
} else {
errb = io.MultiWriter(cmd.Stderr, &rr.Stderr)
}
sess, err := s.c.NewSession()
if err != nil {
return errors.Wrap(err, "NewSession")
return rr, errors.Wrap(err, "NewSession")
}
defer func() {
......@@ -112,43 +124,21 @@ func (s *SSHRunner) Run(cmd string) error {
}
}
}()
var outB bytes.Buffer
var errB bytes.Buffer
err = teeSSH(sess, cmd, &outB, &errB)
if err != nil {
return errors.Wrapf(err, "command failed: %s\nstdout: %s\nstderr: %s", cmd, outB.String(), errB.String())
}
return nil
}
// CombinedOutputTo runs the command and stores both command
// output and error to out.
func (s *SSHRunner) CombinedOutputTo(cmd string, w io.Writer) error {
out, err := s.CombinedOutput(cmd)
if err != nil {
return err
elapsed := time.Since(start)
err = teeSSH(sess, shellquote.Join(cmd.Args...), outb, errb)
if err == nil {
// Reduce log spam
if elapsed > (1 * time.Second) {
glog.Infof("(SSHRunner) Done: %v: (%s)", rr.Command(), elapsed)
}
_, err = w.Write([]byte(out))
return err
}
// CombinedOutput runs the command on the remote and returns its combined
// standard output and standard error.
func (s *SSHRunner) CombinedOutput(cmd string) (string, error) {
glog.Infoln("Run with output:", cmd)
sess, err := s.c.NewSession()
if err != nil {
return "", errors.Wrap(err, "NewSession")
} else {
if exitError, ok := err.(*exec.ExitError); ok {
rr.ExitCode = exitError.ExitCode()
}
defer sess.Close()
var combined singleWriter
err = teeSSH(sess, cmd, &combined, &combined)
out := combined.b.String()
if err != nil {
return out, err
glog.Infof("(SSHRunner) Non-zero exit: %v: %v (%s)\n%s", rr.Command(), err, elapsed, rr.Output())
}
return out, nil
return rr, err
}
// Copy copies a file to the remote over SSH.
......@@ -198,3 +188,30 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
}
return g.Wait()
}
// teePrefix copies bytes from a reader to writer, logging each new line.
func teePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanBytes)
var line bytes.Buffer
for scanner.Scan() {
b := scanner.Bytes()
if _, err := w.Write(b); err != nil {
return err
}
if bytes.IndexAny(b, "\r\n") == 0 {
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
line.Reset()
}
continue
}
line.Write(b)
}
// Catch trailing output in case stream does not end with a newline
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"bytes"
"fmt"
"strings"
"sync"
"testing"
)
func TestTeePrefix(t *testing.T) {
var in bytes.Buffer
var out bytes.Buffer
var logged strings.Builder
logSink := func(format string, args ...interface{}) {
logged.WriteString("(" + fmt.Sprintf(format, args...) + ")")
}
// Simulate the primary use case: tee in the background. This also helps avoid I/O races.
var wg sync.WaitGroup
wg.Add(1)
go func() {
if err := teePrefix(":", &in, &out, logSink); err != nil {
t.Errorf("teePrefix: %v", err)
}
wg.Done()
}()
in.Write([]byte("goo"))
in.Write([]byte("\n"))
in.Write([]byte("g\r\n\r\n"))
in.Write([]byte("le"))
wg.Wait()
gotBytes := out.Bytes()
wantBytes := []byte("goo\ng\r\n\r\nle")
if !bytes.Equal(gotBytes, wantBytes) {
t.Errorf("output=%q, want: %q", gotBytes, wantBytes)
}
gotLog := logged.String()
wantLog := "(:goo)(:g)(:le)"
if gotLog != wantLog {
t.Errorf("log=%q, want: %q", gotLog, wantLog)
}
}
......@@ -20,11 +20,13 @@ import (
"bytes"
"encoding/base64"
"fmt"
"os/exec"
"path"
"strings"
"text/template"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/out"
)
......@@ -124,17 +126,17 @@ func (r *Containerd) Style() out.StyleEnum {
// Version retrieves the current version of this runtime
func (r *Containerd) Version() (string, error) {
ver, err := r.Runner.CombinedOutput("containerd --version")
c := exec.Command("containerd", "--version")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return "", err
return "", errors.Wrapf(err, "containerd check version.")
}
// containerd github.com/containerd/containerd v1.2.0 c4446665cb9c30056f4998ed953e6d4ff22c7c39
words := strings.Split(ver, " ")
words := strings.Split(rr.Stdout.String(), " ")
if len(words) >= 4 && words[0] == "containerd" {
return strings.Replace(words[2], "v", "", 1), nil
}
return "", fmt.Errorf("unknown version: %q", ver)
return "", fmt.Errorf("unknown version: %q", rr.Stdout.String())
}
// SocketPath returns the path to the socket file for containerd
......@@ -152,13 +154,18 @@ func (r *Containerd) DefaultCNI() bool {
// Active returns if containerd is active on the host
func (r *Containerd) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service containerd")
c := exec.Command("systemctl", "is-active", "--quiet", "service", "containerd")
_, err := r.Runner.RunCmd(c)
return err == nil
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *Containerd) Available() error {
return r.Runner.Run("command -v containerd")
c := exec.Command("which", "containerd")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "check containerd availability.")
}
return nil
}
// generateContainerdConfig sets up /etc/containerd/config.toml
......@@ -174,7 +181,11 @@ func generateContainerdConfig(cr CommandRunner, imageRepository string, k8sVersi
if err := t.Execute(&b, opts); err != nil {
return err
}
return cr.Run(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | base64 -d | sudo tee %s", path.Dir(cPath), base64.StdEncoding.EncodeToString(b.Bytes()), cPath))
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | base64 -d | sudo tee %s", path.Dir(cPath), base64.StdEncoding.EncodeToString(b.Bytes()), cPath))
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "generate containerd cfg.")
}
return nil
}
// Enable idempotently enables containerd on a host
......@@ -194,18 +205,30 @@ func (r *Containerd) Enable(disOthers bool) error {
return err
}
// Otherwise, containerd will fail API requests with 'Unimplemented'
return r.Runner.Run("sudo systemctl restart containerd")
c := exec.Command("sudo", "systemctl", "restart", "containerd")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "restart containerd")
}
return nil
}
// Disable idempotently disables containerd on a host
func (r *Containerd) Disable() error {
return r.Runner.Run("sudo systemctl stop containerd")
c := exec.Command("sudo", "systemctl", "stop", "containerd")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "stop containerd")
}
return nil
}
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("sudo ctr -n=k8s.io images import %s", path))
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", path)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "ctr images import")
}
return nil
}
// KubeletOptions returns kubelet options for a containerd
......
......@@ -21,11 +21,14 @@ import (
"encoding/base64"
"fmt"
"html/template"
"os/exec"
"path"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
)
const (
......@@ -330,19 +333,20 @@ plugin_dirs = [
// listCRIContainers returns a list of containers using crictl
func listCRIContainers(cr CommandRunner, filter string) ([]string, error) {
var content string
var err error
var rr *command.RunResult
state := "Running"
if filter != "" {
content, err = cr.CombinedOutput(fmt.Sprintf(`sudo crictl ps -a --name=%s --state=%s --quiet`, filter, state))
c := exec.Command("sudo", "crictl", "ps", "-a", fmt.Sprintf("--name=%s", filter), fmt.Sprintf("--state=%s", state), "--quiet")
rr, err = cr.RunCmd(c)
} else {
content, err = cr.CombinedOutput(fmt.Sprintf(`sudo crictl ps -a --state=%s --quiet`, state))
rr, err = cr.RunCmd(exec.Command("sudo", "crictl", "ps", "-a", fmt.Sprintf("--state=%s", state), "--quiet"))
}
if err != nil {
return nil, err
}
var ids []string
for _, line := range strings.Split(content, "\n") {
for _, line := range strings.Split(rr.Stderr.String(), "\n") {
if line != "" {
ids = append(ids, line)
}
......@@ -356,7 +360,13 @@ func killCRIContainers(cr CommandRunner, ids []string) error {
return nil
}
glog.Infof("Killing containers: %s", ids)
return cr.Run(fmt.Sprintf("sudo crictl rm %s", strings.Join(ids, " ")))
args := append([]string{"crictl", "rm"}, ids...)
c := exec.Command("sudo", args...)
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "kill cri containers.")
}
return nil
}
// stopCRIContainers stops containers using crictl
......@@ -365,7 +375,13 @@ func stopCRIContainers(cr CommandRunner, ids []string) error {
return nil
}
glog.Infof("Stopping containers: %s", ids)
return cr.Run(fmt.Sprintf("sudo crictl stop %s", strings.Join(ids, " ")))
args := append([]string{"crictl", "rm"}, ids...)
c := exec.Command("sudo", args...)
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "stop cri containers")
}
return nil
}
// populateCRIConfig sets up /etc/crictl.yaml
......@@ -383,7 +399,11 @@ image-endpoint: unix://{{.Socket}}
if err := t.Execute(&b, opts); err != nil {
return err
}
return cr.Run(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(cPath), b.String(), cPath))
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(cPath), b.String(), cPath))
if rr, err := cr.RunCmd(c); err != nil {
return errors.Wrapf(err, "Run: %q", rr.Command())
}
return nil
}
// generateCRIOConfig sets up /etc/crio/crio.conf
......@@ -399,7 +419,12 @@ func generateCRIOConfig(cr CommandRunner, imageRepository string, k8sVersion str
if err := t.Execute(&b, opts); err != nil {
return err
}
return cr.Run(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | base64 -d | sudo tee %s", path.Dir(cPath), base64.StdEncoding.EncodeToString(b.Bytes()), cPath))
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | base64 -d | sudo tee %s", path.Dir(cPath), base64.StdEncoding.EncodeToString(b.Bytes()), cPath))
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "generateCRIOConfig.")
}
return nil
}
// criContainerLogCmd returns the command to retrieve the log for a container based on ID
......
......@@ -18,9 +18,11 @@ package cruntime
import (
"fmt"
"os/exec"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/out"
)
......@@ -44,14 +46,15 @@ func (r *CRIO) Style() out.StyleEnum {
// Version retrieves the current version of this runtime
func (r *CRIO) Version() (string, error) {
ver, err := r.Runner.CombinedOutput("crio --version")
c := exec.Command("crio", "--version")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return "", err
return "", errors.Wrap(err, "crio version.")
}
// crio version 1.13.0
// commit: ""
line := strings.Split(ver, "\n")[0]
line := strings.Split(rr.Stdout.String(), "\n")[0]
return strings.Replace(line, "crio version ", "", 1), nil
}
......@@ -70,12 +73,18 @@ func (r *CRIO) DefaultCNI() bool {
// Available returns an error if it is not possible to use this runtime on a host
func (r *CRIO) Available() error {
return r.Runner.Run("command -v crio")
c := exec.Command("which", "crio")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "check crio available.")
}
return nil
}
// Active returns if CRIO is active on the host
func (r *CRIO) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service crio")
c := exec.Command("systemctl", "is-active", "--quiet", "service", "crio")
_, err := r.Runner.RunCmd(c)
return err == nil
}
......@@ -95,18 +104,29 @@ func (r *CRIO) Enable(disOthers bool) error {
if err := enableIPForwarding(r.Runner); err != nil {
return err
}
return r.Runner.Run("sudo systemctl restart crio")
if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "restart", "crio")); err != nil {
return errors.Wrapf(err, "enable crio.")
}
return nil
}
// Disable idempotently disables CRIO on a host
func (r *CRIO) Disable() error {
return r.Runner.Run("sudo systemctl stop crio")
if _, err := r.Runner.RunCmd(exec.Command("sudo", "systemctl", "stop", "crio")); err != nil {
return errors.Wrapf(err, "disable crio.")
}
return nil
}
// LoadImage loads an image into this runtime
func (r *CRIO) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("sudo podman load -i %s", path))
c := exec.Command("sudo", "podman", "load", "-i", path)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "crio load image")
}
return nil
}
// KubeletOptions returns kubelet options for a runtime.
......
......@@ -19,16 +19,17 @@ package cruntime
import (
"fmt"
"os/exec"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/out"
)
// CommandRunner is the subset of command.Runner this package consumes
type CommandRunner interface {
Run(string) error
CombinedOutput(string) (string, error)
RunCmd(cmd *exec.Cmd) (*command.RunResult, error)
}
// Manager is a common interface for container runtimes
......@@ -130,11 +131,14 @@ func disableOthers(me Manager, cr CommandRunner) error {
// enableIPForwarding configures IP forwarding, which is handled normally by Docker
// Context: https://github.com/kubernetes/kubeadm/issues/1062
func enableIPForwarding(cr CommandRunner) error {
if err := cr.Run("sudo modprobe br_netfilter"); err != nil {
c := exec.Command("sudo", "modprobe", "br_netfilter")
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "br_netfilter")
}
if err := cr.Run("sudo sh -c \"echo 1 > /proc/sys/net/ipv4/ip_forward\""); err != nil {
return errors.Wrap(err, "ip_forward")
c = exec.Command("sudo", "sh", "-c", "echo 1 > /proc/sys/net/ipv4/ip_forward")
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrapf(err, "ip_forward")
}
return nil
}
......@@ -17,12 +17,16 @@ limitations under the License.
package cruntime
import (
"bytes"
"fmt"
"os/exec"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/command"
)
func TestName(t *testing.T) {
......@@ -111,37 +115,93 @@ func NewFakeRunner(t *testing.T) *FakeRunner {
}
// Run a fake command!
func (f *FakeRunner) CombinedOutput(cmd string) (string, error) {
f.cmds = append(f.cmds, cmd)
func (f *FakeRunner) RunCmd(cmd *exec.Cmd) (*command.RunResult, error) {
xargs := cmd.Args
f.cmds = append(f.cmds, xargs...)
root := false
args := strings.Split(cmd, " ")
bin, args := args[0], args[1:]
bin, args := xargs[0], xargs[1:]
f.t.Logf("bin=%s args=%v", bin, args)
if bin == "sudo" {
root = true
bin, args = args[0], args[1:]
bin, args = xargs[1], xargs[2:]
}
switch bin {
case "systemctl":
return f.systemctl(args, root)
s, err := f.systemctl(args, root)
rr := &command.RunResult{}
if err != nil {
return rr, err
}
var buf bytes.Buffer
_, err = buf.WriteString(s)
if err != nil {
return rr, errors.Wrap(err, "Writing outStr to FakeRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return rr, err
case "docker":
return f.docker(args, root)
s, err := f.docker(args, root)
rr := &command.RunResult{}
if err != nil {
return rr, err
}
var buf bytes.Buffer
_, err = buf.WriteString(s)
if err != nil {
return rr, errors.Wrap(err, "Writing FakeRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return rr, err
case "crictl":
return f.crictl(args, root)
s, err := f.crictl(args, root)
rr := &command.RunResult{}
if err != nil {
return rr, err
}
var buf bytes.Buffer
_, err = buf.WriteString(s)
if err != nil {
return rr, errors.Wrap(err, "Writing to FakeRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return rr, err
case "crio":
return f.crio(args, root)
s, err := f.crio(args, root)
rr := &command.RunResult{}
if err != nil {
return rr, err
}
var buf bytes.Buffer
_, err = buf.WriteString(s)
if err != nil {
return rr, errors.Wrap(err, "Writing to FakeRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return rr, err
case "containerd":
return f.containerd(args, root)
default:
return "", nil
s, err := f.containerd(args, root)
rr := &command.RunResult{}
if err != nil {
return rr, err
}
}
// Run a fake command!
func (f *FakeRunner) Run(cmd string) error {
_, err := f.CombinedOutput(cmd)
return err
var buf bytes.Buffer
_, err = buf.WriteString(s)
if err != nil {
return rr, errors.Wrap(err, "Writing to FakeRunner's buffer")
}
rr.Stdout = buf
rr.Stderr = buf
return rr, err
default:
rr := &command.RunResult{}
return rr, nil
}
}
// docker is a fake implementation of docker
......@@ -150,7 +210,7 @@ func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
case "ps":
// ps -a --filter="name=apiserver" --format="{{.ID}}"
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
filter := strings.Split(args[2], `"`)[1]
filter := strings.Split(args[2], `r=`)[1]
fname := strings.Split(filter, "=")[1]
ids := []string{}
f.t.Logf("fake docker: Looking for containers matching %q", fname)
......@@ -163,7 +223,8 @@ func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
return strings.Join(ids, "\n"), nil
}
case "stop":
for _, id := range args[1:] {
ids := strings.Split(args[1], " ")
for _, id := range ids {
f.t.Logf("fake docker: Stopping id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
......@@ -181,16 +242,16 @@ func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
}
case "version":
if args[1] == "--format" && args[2] == "'{{.Server.Version}}'" {
return "18.06.2-ce", nil
}
}
return "", nil
}
// crio is a fake implementation of crio
func (f *FakeRunner) crio(args []string, _ bool) (string, error) {
func (f *FakeRunner) crio(args []string, _ bool) (string, error) { //nolint (result 1 (error) is always nil)
if args[0] == "--version" {
return "crio version 1.13.0", nil
}
......@@ -202,6 +263,9 @@ func (f *FakeRunner) containerd(args []string, _ bool) (string, error) {
if args[0] == "--version" {
return "containerd github.com/containerd/containerd v1.2.0 c4446665cb9c30056f4998ed953e6d4ff22c7c39", nil
}
if args[0] != "--version" { // doing this to suppress lint "result 1 (error) is always nil"
return "", fmt.Errorf("unknown args[0]")
}
return "", nil
}
......@@ -253,7 +317,7 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) {
}
// systemctl is a fake implementation of systemctl
func (f *FakeRunner) systemctl(args []string, root bool) (string, error) {
func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // nolint result 0 (string) is always ""
action := args[0]
svcs := args[1:]
out := ""
......@@ -263,6 +327,7 @@ func (f *FakeRunner) systemctl(args []string, root bool) (string, error) {
if arg == "service" {
svcs = args[i+1:]
}
}
for _, svc := range svcs {
......@@ -314,8 +379,7 @@ func TestVersion(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
runner := NewFakeRunner(t)
r, err := New(Config{Type: tc.runtime, Runner: runner})
r, err := New(Config{Type: tc.runtime, Runner: NewFakeRunner(t)})
if err != nil {
t.Fatalf("New(%s): %v", tc.runtime, err)
}
......@@ -344,9 +408,9 @@ func TestDisable(t *testing.T) {
runtime string
want []string
}{
{"docker", []string{"sudo systemctl stop docker docker.socket"}},
{"crio", []string{"sudo systemctl stop crio"}},
{"containerd", []string{"sudo systemctl stop containerd"}},
{"docker", []string{"sudo", "systemctl", "stop", "docker", "docker.socket"}},
{"crio", []string{"sudo", "systemctl", "stop", "crio"}},
{"containerd", []string{"sudo", "systemctl", "stop", "containerd"}},
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
......
......@@ -22,6 +22,7 @@ import (
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/out"
)
......@@ -47,12 +48,12 @@ func (r *Docker) Style() out.StyleEnum {
// Version retrieves the current version of this runtime
func (r *Docker) Version() (string, error) {
// Note: the server daemon has to be running, for this call to return successfully
ver, err := r.Runner.CombinedOutput("docker version --format '{{.Server.Version}}'")
c := exec.Command("docker", "version", "--format", "'{{.Server.Version}}'")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return "", err
}
return strings.Split(ver, "\n")[0], nil
return strings.Split(rr.Stdout.String(), "\n")[0], nil
}
// SocketPath returns the path to the socket file for Docker
......@@ -73,7 +74,8 @@ func (r *Docker) Available() error {
// Active returns if docker is active on the host
func (r *Docker) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service docker")
c := exec.Command("systemctl", "is-active", "--quiet", "service", "docker")
_, err := r.Runner.RunCmd(c)
return err == nil
}
......@@ -84,18 +86,31 @@ func (r *Docker) Enable(disOthers bool) error {
glog.Warningf("disableOthers: %v", err)
}
}
return r.Runner.Run("sudo systemctl start docker")
c := exec.Command("sudo", "systemctl", "start", "docker")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "enable docker.")
}
return nil
}
// Disable idempotently disables Docker on a host
func (r *Docker) Disable() error {
return r.Runner.Run("sudo systemctl stop docker docker.socket")
c := exec.Command("sudo", "systemctl", "stop", "docker", "docker.socket")
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "disable docker")
}
return nil
}
// LoadImage loads an image into this runtime
func (r *Docker) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("docker load -i %s", path))
c := exec.Command("docker", "load", "-i", path)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "loadimage docker.")
}
return nil
}
// KubeletOptions returns kubelet options for a runtime.
......@@ -108,12 +123,12 @@ func (r *Docker) KubeletOptions() map[string]string {
// ListContainers returns a list of containers
func (r *Docker) ListContainers(filter string) ([]string, error) {
filter = KubernetesContainerPrefix + filter
content, err := r.Runner.CombinedOutput(fmt.Sprintf(`docker ps -a --filter="name=%s" --format="{{.ID}}"`, filter))
rr, err := r.Runner.RunCmd(exec.Command("docker", "ps", "-a", fmt.Sprintf("--filter=name=%s", filter), "--format=\"{{.ID}}\""))
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "docker ListContainers. ")
}
var ids []string
for _, line := range strings.Split(content, "\n") {
for _, line := range strings.Split(rr.Stdout.String(), "\n") {
if line != "" {
ids = append(ids, line)
}
......@@ -127,7 +142,12 @@ func (r *Docker) KillContainers(ids []string) error {
return nil
}
glog.Infof("Killing containers: %s", ids)
return r.Runner.Run(fmt.Sprintf("docker rm -f %s", strings.Join(ids, " ")))
args := append([]string{"rm", "-f"}, ids...)
c := exec.Command("docker", args...)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "Killing containers docker.")
}
return nil
}
// StopContainers stops a running container based on ID
......@@ -136,7 +156,12 @@ func (r *Docker) StopContainers(ids []string) error {
return nil
}
glog.Infof("Stopping containers: %s", ids)
return r.Runner.Run(fmt.Sprintf("docker stop %s", strings.Join(ids, " ")))
args := append([]string{"stop"}, ids...)
c := exec.Command("docker", args...)
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "stopping containers docker.")
}
return nil
}
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
......
......@@ -22,11 +22,13 @@ import (
"bytes"
"fmt"
"os"
"os/exec"
"regexp"
"sort"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/cruntime"
......@@ -51,18 +53,30 @@ var importantPods = []string{
"kube-controller-manager",
}
// logRunner is the subset of CommandRunner used for logging
type logRunner interface {
RunCmd(*exec.Cmd) (*command.RunResult, error)
}
// lookbackwardsCount is how far back to look in a log for problems. This should be large enough to
// include usage messages from a failed binary, but small enough to not include irrelevant problems.
const lookBackwardsCount = 200
// Follow follows logs from multiple files in tail(1) format
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner) error {
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) error {
cs := []string{}
for _, v := range logCommands(r, bs, 0, true) {
cs = append(cs, v+" &")
}
cs = append(cs, "wait")
return runner.CombinedOutputTo(strings.Join(cs, " "), os.Stdout)
cmd := exec.Command("/bin/bash", "-c", strings.Join(cs, " "))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
if _, err := cr.RunCmd(cmd); err != nil {
return errors.Wrapf(err, "log follow")
}
return nil
}
// IsProblem returns whether this line matches a known problem
......@@ -71,15 +85,18 @@ func IsProblem(line string) bool {
}
// FindProblems finds possible root causes among the logs
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner) map[string][]string {
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) map[string][]string {
pMap := map[string][]string{}
cmds := logCommands(r, bs, lookBackwardsCount, false)
for name, cmd := range cmds {
for name := range cmds {
glog.Infof("Gathering logs for %s ...", name)
var b bytes.Buffer
err := runner.CombinedOutputTo(cmds[name], &b)
if err != nil {
glog.Warningf("failed %s: %s: %v", name, cmd, err)
c := exec.Command("/bin/bash", "-c", cmds[name])
c.Stderr = &b
c.Stdout = &b
if rr, err := cr.RunCmd(c); err != nil {
glog.Warningf("failed %s: command: %s %v output: %s", name, rr.Command(), err, rr.Output())
continue
}
scanner := bufio.NewScanner(&b)
......@@ -129,10 +146,11 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run
}
out.T(out.Empty, "==> {{.name}} <==", out.V{"name": name})
var b bytes.Buffer
err := runner.CombinedOutputTo(cmds[name], &b)
if err != nil {
glog.Errorf("failed: %v", err)
c := exec.Command("/bin/bash", "-c", cmds[name])
c.Stdout = &b
c.Stderr = &b
if rr, err := runner.RunCmd(c); err != nil {
glog.Errorf("command %s failed with error: %v output: %q", rr.Command(), err, rr.Output())
failed = append(failed, name)
continue
}
......
......@@ -56,7 +56,7 @@ var vmProblems = map[string]match{
Issues: []int{1926, 4206},
},
"HYPERKIT_NOT_FOUND": {
Regexp: re(`Driver "hyperkit" not found. Do you have the plugin binary .* accessible in your PATH?`),
Regexp: re(`Driver "hyperkit" not found.`),
Advice: "Please install the minikube hyperkit VM driver, or select an alternative --vm-driver",
URL: "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/",
HideCreateLink: true,
......
......@@ -57,5 +57,9 @@ func status() registry.State {
if err != nil {
return registry.State{Error: err, Fix: "Install docker-machine-driver-vmware", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/"}
}
_, err = exec.LookPath("vmrun")
if err != nil {
return registry.State{Error: err, Fix: "Install vmrun", Doc: "https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/"}
}
return registry.State{Installed: true, Healthy: true}
}
......@@ -29,7 +29,6 @@ import (
"github.com/docker/machine/libmachine"
"github.com/golang/glog"
"github.com/olekukonko/tablewriter"
"github.com/pkg/browser"
"github.com/pkg/errors"
"github.com/spf13/viper"
core "k8s.io/api/core/v1"
......@@ -265,9 +264,11 @@ func PrintServiceList(writer io.Writer, data [][]string) {
table.Render()
}
// WaitAndMaybeOpenService waits for a service, and opens it when running
func WaitAndMaybeOpenService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool,
wait int, interval int) error {
// WaitForService waits for a service, and return the urls when available
func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool,
wait int, interval int) ([]string, error) {
var urlList []string
// Convert "Amount of time to wait" and "interval of each check" to attempts
if interval == 0 {
interval = 1
......@@ -275,12 +276,12 @@ func WaitAndMaybeOpenService(api libmachine.API, namespace string, service strin
chkSVC := func() error { return CheckService(namespace, service) }
if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil {
return errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n <namespace>", service, namespace, service)
return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n <namespace>", service, namespace, service)
}
serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate)
if err != nil {
return errors.Wrap(err, "Check that minikube is running and that you have specified the correct namespace")
return urlList, errors.Wrap(err, "Check that minikube is running and that you have specified the correct namespace")
}
if !urlMode {
......@@ -295,22 +296,14 @@ func WaitAndMaybeOpenService(api libmachine.API, namespace string, service strin
if len(serviceURL.URLs) == 0 {
out.T(out.Sad, "service {{.namespace_name}}/{{.service_name}} has no node port", out.V{"namespace_name": namespace, "service_name": service})
return nil
return urlList, nil
}
for _, bareURLString := range serviceURL.URLs {
urlString, isHTTPSchemedURL := OptionallyHTTPSFormattedURLString(bareURLString, https)
if urlMode || !isHTTPSchemedURL {
out.T(out.Empty, urlString)
} else {
out.T(out.Celebrate, "Opening kubernetes service {{.namespace_name}}/{{.service_name}} in default browser...", out.V{"namespace_name": namespace, "service_name": service})
if err := browser.OpenURL(urlString); err != nil {
out.ErrT(out.Empty, "browser failed to open url: {{.error}}", out.V{"error": err})
url, _ := OptionallyHTTPSFormattedURLString(bareURLString, https)
urlList = append(urlList, url)
}
}
}
return nil
return urlList, nil
}
// GetServiceListByLabel returns a ServiceList by label
......
......@@ -885,6 +885,15 @@ func TestWaitAndMaybeOpenService(t *testing.T) {
api: defaultAPI,
urlMode: true,
https: true,
expected: []string{"https://127.0.0.1:1111", "https://127.0.0.1:2222"},
},
{
description: "correctly return serviceURLs, http, url mode",
namespace: "default",
service: "mock-dashboard",
api: defaultAPI,
urlMode: true,
https: false,
expected: []string{"http://127.0.0.1:1111", "http://127.0.0.1:2222"},
},
{
......@@ -903,12 +912,28 @@ func TestWaitAndMaybeOpenService(t *testing.T) {
servicesMap: serviceNamespaces,
endpointsMap: endpointNamespaces,
}
err := WaitAndMaybeOpenService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
var urlList []string
urlList, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
if test.err && err == nil {
t.Fatalf("WaitAndMaybeOpenService expected to fail for test: %v", test)
t.Fatalf("WaitForService expected to fail for test: %v", test)
}
if !test.err && err != nil {
t.Fatalf("WaitAndMaybeOpenService not expected to fail but got err: %v", err)
t.Fatalf("WaitForService not expected to fail but got err: %v", err)
}
if test.urlMode {
// check the size of the url slices
if len(urlList) != len(test.expected) {
t.Fatalf("WaitForService returned [%d] urls while expected is [%d] url", len(urlList), len(test.expected))
}
// check content of the expected url
for i, v := range test.expected {
if v != urlList[i] {
t.Fatalf("WaitForService returned [%s] urls while expected is [%s] url", urlList[i], v)
}
}
}
})
......@@ -954,12 +979,12 @@ func TestWaitAndMaybeOpenServiceForNotDefaultNamspace(t *testing.T) {
servicesMap: serviceNamespaceOther,
endpointsMap: endpointNamespaces,
}
err := WaitAndMaybeOpenService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
_, err := WaitForService(test.api, test.namespace, test.service, defaultTemplate, test.urlMode, test.https, 1, 0)
if test.err && err == nil {
t.Fatalf("WaitAndMaybeOpenService expected to fail for test: %v", test)
t.Fatalf("WaitForService expected to fail for test: %v", test)
}
if !test.err && err != nil {
t.Fatalf("WaitAndMaybeOpenService not expected to fail but got err: %v", err)
t.Fatalf("WaitForService not expected to fail but got err: %v", err)
}
})
......
......@@ -156,21 +156,21 @@ func (router *osRouter) Cleanup(route *Route) error {
if !exists {
return nil
}
command := exec.Command("sudo", "route", "-n", "delete", route.DestCIDR.String())
stdInAndOut, err := command.CombinedOutput()
cmd := exec.Command("sudo", "route", "-n", "delete", route.DestCIDR.String())
stdInAndOut, err := cmd.CombinedOutput()
if err != nil {
return err
}
message := fmt.Sprintf("%s", stdInAndOut)
glog.V(4).Infof("%s", message)
msg := fmt.Sprintf("%s", stdInAndOut)
glog.V(4).Infof("%s", msg)
re := regexp.MustCompile("^delete net ([^:]*)$")
if !re.MatchString(message) {
return fmt.Errorf("error deleting route: %s, %d", message, len(strings.Split(message, "\n")))
if !re.MatchString(msg) {
return fmt.Errorf("error deleting route: %s, %d", msg, len(strings.Split(msg, "\n")))
}
// idempotent removal of cluster domain dns
resolverFile := fmt.Sprintf("/etc/resolver/%s", route.ClusterDomain)
command = exec.Command("sudo", "rm", "-f", resolverFile)
if err := command.Run(); err != nil {
cmd = exec.Command("sudo", "rm", "-f", resolverFile)
if err := cmd.Run(); err != nil {
return fmt.Errorf("could not remove %s: %s", resolverFile, err)
}
return nil
......@@ -191,12 +191,12 @@ func writeResolverFile(route *Route) error {
if err = tmpFile.Close(); err != nil {
return err
}
command := exec.Command("sudo", "mkdir", "-p", "/etc/resolver")
if err := command.Run(); err != nil {
cmd := exec.Command("sudo", "mkdir", "-p", "/etc/resolver")
if err := cmd.Run(); err != nil {
return err
}
command = exec.Command("sudo", "cp", "-f", tmpFile.Name(), resolverFile)
if err := command.Run(); err != nil {
cmd = exec.Command("sudo", "cp", "-f", tmpFile.Name(), resolverFile)
if err := cmd.Run(); err != nil {
return err
}
return nil
......
......@@ -107,7 +107,6 @@ Requires= minikube-automount.service docker.socket
[Service]
Type=notify
`
if noPivot {
log.Warn("Using fundamentally insecure --no-pivot option")
......@@ -127,8 +126,11 @@ Environment=DOCKER_RAMDISK=yes
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
......
......@@ -17,7 +17,6 @@ limitations under the License.
package util
import (
"bufio"
"bytes"
"fmt"
"io"
......@@ -150,34 +149,6 @@ func MaybeChownDirRecursiveToMinikubeUser(dir string) error {
return nil
}
// TeePrefix copies bytes from a reader to writer, logging each new line.
func TeePrefix(prefix string, r io.Reader, w io.Writer, logger func(format string, args ...interface{})) error {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanBytes)
var line bytes.Buffer
for scanner.Scan() {
b := scanner.Bytes()
if _, err := w.Write(b); err != nil {
return err
}
if bytes.IndexAny(b, "\r\n") == 0 {
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
line.Reset()
}
continue
}
line.Write(b)
}
// Catch trailing output in case stream does not end with a newline
if line.Len() > 0 {
logger("%s%s", prefix, line.String())
}
return nil
}
// ReplaceChars returns a copy of the src slice with each string modified by the replacer
func ReplaceChars(src []string, replacer *strings.Replacer) []string {
ret := make([]string, len(src))
......
......@@ -17,10 +17,7 @@ limitations under the License.
package util
import (
"bytes"
"fmt"
"strings"
"sync"
"testing"
)
......@@ -44,44 +41,6 @@ func TestGetBinaryDownloadURL(t *testing.T) {
}
func TestTeePrefix(t *testing.T) {
var in bytes.Buffer
var out bytes.Buffer
var logged strings.Builder
logSink := func(format string, args ...interface{}) {
logged.WriteString("(" + fmt.Sprintf(format, args...) + ")")
}
// Simulate the primary use case: tee in the background. This also helps avoid I/O races.
var wg sync.WaitGroup
wg.Add(1)
go func() {
if err := TeePrefix(":", &in, &out, logSink); err != nil {
t.Errorf("TeePrefix: %v", err)
}
wg.Done()
}()
in.Write([]byte("goo"))
in.Write([]byte("\n"))
in.Write([]byte("g\r\n\r\n"))
in.Write([]byte("le"))
wg.Wait()
gotBytes := out.Bytes()
wantBytes := []byte("goo\ng\r\n\r\nle")
if !bytes.Equal(gotBytes, wantBytes) {
t.Errorf("output=%q, want: %q", gotBytes, wantBytes)
}
gotLog := logged.String()
wantLog := "(:goo)(:g)(:le)"
if gotLog != wantLog {
t.Errorf("log=%q, want: %q", gotLog, wantLog)
}
}
func TestReplaceChars(t *testing.T) {
testData := []struct {
src []string
......
......@@ -33,6 +33,30 @@ pygmentsStyle = "tango"
[permalinks]
blog = "/:section/:year/:month/:day/:slug/"
[module]
[[module.mounts]]
source = "../deploy/addons/gvisor/"
target = "content/gvisor/"
[[module.mounts]]
source = "../deploy/addons/helm-tiller/"
target = "content/helm-tiller/"
[[module.mounts]]
source = "../deploy/addons/ingress-dns/"
target = "content/ingress-dns/"
[[module.mounts]]
source = "../deploy/addons/storage-provisioner-gluster/"
target = "content/storage-provisioner-gluster/"
[[module.mounts]]
source = "../deploy/addons/layouts/"
target = "layouts"
[[module.mounts]]
source = "content/en"
target = "content"
[[module.mounts]]
source = "layouts"
target = "layouts"
## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday
[blackfriday]
plainIDAnchors = true
......@@ -68,7 +92,7 @@ weight = 1
[params]
copyright = "The Kubernetes Authors -- "
# The latest release of minikube
latest_release = "1.5.0"
latest_release = "1.5.2"
privacy_policy = ""
......
......@@ -18,11 +18,11 @@ Access the Kubernetes Dashboard running within the minikube cluster:
Once started, you can interact with your cluster using `kubectl`, just like any other Kubernetes cluster. For instance, starting a server:
`kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.4 --port=8080`
`kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4`
Exposing a service as a NodePort
`kubectl expose deployment hello-minikube --type=NodePort`
`kubectl expose deployment hello-minikube --type=NodePort --port=8080`
minikube makes it easy to open this exposed endpoint in your browser:
......
......@@ -17,7 +17,7 @@ weight: 2
If the [Brew Package Manager](https://brew.sh/) is installed, use it to download and install minikube:
```shell
brew cask install minikube
brew install minikube
```
{{% /tab %}}
......@@ -40,8 +40,8 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin
If the [Brew Package Manager](https://brew.sh/) is installed, use it to download and upgrade minikube:
```shell
rm /usr/local/bin/minikube
brew cask reinstall minikube
brew update
brew upgrade minikube
```
{{% /tab %}}
......
......@@ -20,10 +20,10 @@ minikube has a set of built-in addons that, when enabled, can be used within Kub
* [nvidia-driver-installer](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/nvidia-driver-installer/minikube)
* [nvidia-gpu-device-plugin](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu)
* [logviewer](https://github.com/ivans3/minikube-log-viewer)
* [gvisor](../deploy/addons/gvisor/README.md)
* [storage-provisioner-gluster](../deploy/addons/storage-provisioner-gluster/README.md)
* [helm-tiller](../deploy/addons/helm-tiller/README.md)
* [ingress-dns](../deploy/addons/ingress-dns/README.md)
* [gvisor](../../../gvisor/readme/)
* [storage-provisioner-gluster](../../../storage-provisioner-gluster/readme)
* [helm-tiller](../../../helm-tiller/readme)
* [ingress-dns](../../../ingress-dns/readme)
## Listing available addons
......
......@@ -36,8 +36,8 @@ import (
// TestAddons tests addons that require no special environment -- in parallel
func TestAddons(t *testing.T) {
MaybeSlowParallel(t)
MaybeParallel(t)
WaitForStartSlot(t)
profile := UniqueProfileName("addons")
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Minute)
defer CleanupWithLogs(t, profile, cancel)
......
......@@ -30,7 +30,8 @@ func TestDockerFlags(t *testing.T) {
if NoneDriver() {
t.Skip("skipping: none driver does not support ssh or bundle docker")
}
MaybeSlowParallel(t)
MaybeParallel(t)
WaitForStartSlot(t)
profile := UniqueProfileName("docker-flags")
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute)
......
......@@ -60,6 +60,7 @@ func TestFunctional(t *testing.T) {
}{
{"StartWithProxy", validateStartWithProxy}, // Set everything else up for success
{"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start"
{"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up
{"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy
}
for _, tc := range tests {
......@@ -88,11 +89,12 @@ func TestFunctional(t *testing.T) {
{"LogsCmd", validateLogsCmd},
{"MountCmd", validateMountCmd},
{"ProfileCmd", validateProfileCmd},
{"ServicesCmd", validateServicesCmd},
{"ServiceCmd", validateServiceCmd},
{"AddonsCmd", validateAddonsCmd},
{"PersistentVolumeClaim", validatePersistentVolumeClaim},
{"TunnelCmd", validateTunnelCmd},
{"SSHCmd", validateSSHCmd},
{"MySQL", validateMySQL},
}
for _, tc := range tests {
tc := tc
......@@ -142,6 +144,18 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
}
}
// validateKubectlGetPods asserts that `kubectl get pod -A` returns non-zero content
func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "get", "pod", "-A"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
podName := "kube-apiserver-minikube"
if !strings.Contains(rr.Stdout.String(), podName) {
t.Errorf("%s is not up in running, got: %s\n", podName, rr.Stdout.String())
}
}
// validateAddonManager asserts that the kube-addon-manager pod is deployed properly
func validateAddonManager(ctx context.Context, t *testing.T, profile string) {
// If --wait=false, this may take a couple of minutes
......@@ -284,7 +298,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
if NoneDriver() {
t.Skipf("skipping: cache unsupported by none")
}
for _, img := range []string{"busybox", "busybox:1.28.4-glibc"} {
for _, img := range []string{"busybox", "busybox:1.28.4-glibc", "mysql:5.6"} {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
......@@ -383,13 +397,77 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
}
// validateServiceCmd asserts basic "service" command functionality
func validateServicesCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list"))
func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node"))
if err != nil {
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080"))
if err != nil {
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
}
if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", 4*time.Minute); err != nil {
t.Fatalf("wait: %v", err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
if !strings.Contains(rr.Stdout.String(), "kubernetes") {
t.Errorf("service list got %q, wanted *kubernetes*", rr.Stdout.String())
if !strings.Contains(rr.Stdout.String(), "hello-node") {
t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String())
}
// Test --https --url mode
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
if rr.Stderr.String() != "" {
t.Errorf("unexpected stderr output: %s", rr.Stderr)
}
endpoint := strings.TrimSpace(rr.Stdout.String())
u, err := url.Parse(endpoint)
if err != nil {
t.Fatalf("failed to parse %q: %v", endpoint, err)
}
if u.Scheme != "https" {
t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https")
}
// Test --format=IP
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() {
t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname())
}
// Test a regular URLminikube
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
endpoint = strings.TrimSpace(rr.Stdout.String())
u, err = url.Parse(endpoint)
if err != nil {
t.Fatalf("failed to parse %q: %v", endpoint, err)
}
if u.Scheme != "http" {
t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http")
}
t.Logf("url: %s", endpoint)
resp, err := retryablehttp.Get(endpoint)
if err != nil {
t.Fatalf("get failed: %v\nresp: %v", err, resp)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK)
}
}
......@@ -465,6 +543,27 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) {
}
}
// validateMySQL validates a minimalist MySQL deployment
func validateMySQL(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
// Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule.
mysql := func() error {
names, err := PodWait(ctx, t, profile, "default", "app=mysql", 5*time.Second)
if err != nil {
return err
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;"))
return err
}
if err = retry.Expo(mysql, 1*time.Second, 2*time.Minute); err != nil {
t.Errorf("mysql failing: %v", err)
}
}
// startHTTPProxy runs a local http proxy and sets the env vars for it.
func startHTTPProxy(t *testing.T) (*http.Server, error) {
port, err := freeport.GetFreePort()
......
......@@ -27,7 +27,9 @@ import (
)
func TestGuestEnvironment(t *testing.T) {
MaybeSlowParallel(t)
MaybeParallel(t)
WaitForStartSlot(t)
profile := UniqueProfileName("guest")
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
defer CleanupWithLogs(t, profile, cancel)
......
......@@ -30,10 +30,14 @@ func TestGvisorAddon(t *testing.T) {
if NoneDriver() {
t.Skip("Can't run containerd backend with none driver")
}
MaybeSlowParallel(t)
if !*enableGvisor {
t.Skip("skipping test because --gvisor=false")
}
MaybeParallel(t)
WaitForStartSlot(t)
profile := UniqueProfileName("gvisor")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute)
defer func() {
if t.Failed() {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "gvisor", "-n", "kube-system"))
......
......@@ -42,8 +42,10 @@ import (
)
var (
antiRaceCounter = 0
antiRaceMutex = &sync.Mutex{}
// startTimes is a list of startup times, to guarantee --start-offset
startTimes = []time.Time{}
// startTimesMutex is a lock to update startTimes without a race condition
startTimesMutex = &sync.Mutex{}
)
// RunResult stores the result of an cmd.Run call
......@@ -177,7 +179,7 @@ func CleanupWithLogs(t *testing.T, profile string, cancel context.CancelFunc) {
t.Helper()
if t.Failed() && *postMortemLogs {
t.Logf("%s failed, collecting logs ...", t.Name())
rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "-n", "100"))
rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems"))
if err != nil {
t.Logf("failed logs error: %v", err)
}
......@@ -227,9 +229,10 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
f := func() (bool, error) {
pods, err := client.CoreV1().Pods(ns).List(listOpts)
if err != nil {
t.Logf("Pod(%s).List(%v) returned error: %v", ns, selector, err)
// Don't bother to retry: something is very wrong.
return true, err
t.Logf("WARNING: pod list for %q %q returned: %v", ns, selector, err)
// Don't return the error upwards so that this is retried, in case the apiserver is rescheduled
podStart = time.Time{}
return false, nil
}
if len(pods.Items) == 0 {
podStart = time.Time{}
......@@ -331,25 +334,32 @@ func MaybeParallel(t *testing.T) {
t.Parallel()
}
// MaybeSlowParallel is a terrible workaround for tests which start clusters in a race-filled world
// TODO: Try removing this hack once certificates are deployed per-profile
func MaybeSlowParallel(t *testing.T) {
// NoneDriver shouldn't parallelize "minikube start"
// WaitForStartSlot enforces --start-offset to avoid startup race conditions
func WaitForStartSlot(t *testing.T) {
// Not parallel
if NoneDriver() {
return
}
antiRaceMutex.Lock()
antiRaceCounter++
antiRaceMutex.Unlock()
wakeup := time.Now()
startTimesMutex.Lock()
if len(startTimes) > 0 {
nextStart := startTimes[len(startTimes)-1].Add(*startOffset)
// Ignore nextStart if it is in the past - to guarantee offset for next caller
if time.Now().Before(nextStart) {
wakeup = nextStart
}
}
startTimes = append(startTimes, wakeup)
startTimesMutex.Unlock()
if antiRaceCounter > 0 {
// Slow enough to offset start, but not slow to be a major source of delay
penalty := time.Duration(5*antiRaceCounter) * time.Second
t.Logf("MaybeSlowParallel: Sleeping %s to avoid start race ...", penalty)
time.Sleep(penalty)
if time.Now().Before(wakeup) {
d := time.Until(wakeup)
t.Logf("Waiting for start slot at %s (sleeping %s) ...", wakeup, d)
time.Sleep(d)
} else {
t.Logf("No need to wait for start slot, it is already %s", time.Now())
}
t.Parallel()
}
// killProcessFamily kills a pid and all of its children
......
......@@ -32,6 +32,8 @@ var defaultDriver = flag.String("expected-default-driver", "", "Expected default
// Flags for faster local integration testing
var forceProfile = flag.String("profile", "", "force tests to run against a particular profile")
var cleanup = flag.Bool("cleanup", true, "cleanup failed test run")
var enableGvisor = flag.Bool("gvisor", false, "run gvisor integration test (slow)")
var startOffset = flag.Duration("start-offset", 30*time.Second, "how much time to offset between cluster starts")
var postMortemLogs = flag.Bool("postmortem-logs", true, "show logs after a failed test run")
// Paths to files - normally set for CI
......
......@@ -23,6 +23,7 @@ import (
"fmt"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
......@@ -74,7 +75,8 @@ func TestStartStop(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
MaybeSlowParallel(t)
MaybeParallel(t)
WaitForStartSlot(t)
if !strings.Contains(tc.name, "docker") && NoneDriver() {
t.Skipf("skipping %s - incompatible with none driver", t.Name())
......@@ -84,7 +86,7 @@ func TestStartStop(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Minute)
defer CleanupWithLogs(t, profile, cancel)
startArgs := append([]string{"start", "-p", profile, "--alsologtostderr", "-v=3"}, tc.args...)
startArgs := append([]string{"start", "-p", profile, "--alsologtostderr", "-v=3", "--wait=true"}, tc.args...)
startArgs = append(startArgs, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
......@@ -102,9 +104,27 @@ func TestStartStop(t *testing.T) {
t.Fatalf("%s failed: %v", rr.Args, err)
}
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 2*time.Minute); err != nil {
names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 4*time.Minute)
if err != nil {
t.Fatalf("wait: %v", err)
}
// Use this pod to confirm that the runtime resource limits are sane
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "/bin/sh", "-c", "ulimit -n"))
if err != nil {
t.Fatalf("ulimit: %v", err)
}
got, err := strconv.ParseInt(strings.TrimSpace(rr.Stdout.String()), 10, 64)
if err != nil {
t.Errorf("ParseInt(%q): %v", rr.Stdout.String(), err)
}
// Arbitrary value set by some container runtimes. If higher, apps like MySQL may make bad decisions.
expected := int64(1048576)
if got != expected {
t.Errorf("'ulimit -n' returned %d, expected %d", got, expected)
}
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
......@@ -117,6 +137,7 @@ func TestStartStop(t *testing.T) {
t.Errorf("status = %q; want = %q", got, state.Stopped)
}
WaitForStartSlot(t)
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
// Explicit fatal so that failures don't move directly to deletion
......@@ -134,11 +155,13 @@ func TestStartStop(t *testing.T) {
t.Errorf("status = %q; want = %q", got, state.Running)
}
if !*cleanup {
// Normally handled by cleanuprofile, but not fatal there
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
}
})
}
})
......
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
ports:
- port: 3306
selector:
app: mysql
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
strategy:
type: Recreate
template:
metadata:
labels:
app: mysql
spec:
containers:
- image: mysql:5.6
name: mysql
env:
# Use secret in real usage
- name: MYSQL_ROOT_PASSWORD
value: password
ports:
- containerPort: 3306
name: mysql
......@@ -39,9 +39,10 @@ import (
// the odlest supported k8s version and then runs the current head minikube
// and it tries to upgrade from the older supported k8s to news supported k8s
func TestVersionUpgrade(t *testing.T) {
MaybeParallel(t)
WaitForStartSlot(t)
profile := UniqueProfileName("vupgrade")
ctx, cancel := context.WithTimeout(context.Background(), 55*time.Minute)
MaybeSlowParallel(t)
defer CleanupWithLogs(t, profile, cancel)
......@@ -89,6 +90,7 @@ func TestVersionUpgrade(t *testing.T) {
t.Errorf("status = %q; want = %q", got, state.Stopped.String())
}
WaitForStartSlot(t)
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册