未验证 提交 ec5dfbb1 编写于 作者: M Medya Ghazizadeh 提交者: GitHub

Merge pull request #6959 from afbjorklund/reportcard-cleanup

Improve the go reportcard for minikube
......@@ -88,6 +88,24 @@ func init() {
RootCmd.AddCommand(deleteCmd)
}
func deleteContainersAndVolumes() {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
}
// runDelete handles the executes the flow of "minikube delete"
func runDelete(cmd *cobra.Command, args []string) {
if len(args) > 0 {
......@@ -110,23 +128,9 @@ func runDelete(cmd *cobra.Command, args []string) {
}
if deleteAll {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
}
deleteContainersAndVolumes()
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = DeleteProfiles(profilesToDelete)
errs := DeleteProfiles(profilesToDelete)
if len(errs) > 0 {
HandleDeletionErrors(errs)
} else {
......@@ -185,13 +189,11 @@ func DeleteProfiles(profiles []*config.Profile) []error {
return errs
}
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
func deleteProfileContainersAndVolumes(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
......@@ -202,6 +204,13 @@ func deleteProfile(profile *config.Profile) error {
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
deleteProfileContainersAndVolumes(profile.Name)
api, err := machine.NewAPIClient()
if err != nil {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
......@@ -230,37 +239,48 @@ func deleteProfile(profile *config.Profile) error {
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
if err := deleteConfig(profile.Name); err != nil {
return err
}
if err := deleteContext(profile.Name); err != nil {
return err
}
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}
func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
if err = machine.DeleteHost(api, machineName); err != nil {
if err := machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
}
}
}
}
}
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
if err := config.DeleteProfile(profile.Name); err != nil {
func deleteConfig(profileName string) error {
if err := config.DeleteProfile(profileName); err != nil {
if config.IsNotExist(err) {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("failed to remove profile %v", err))
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
return DeletionError{Err: delErr, Errtype: Fatal}
}
if err := deleteContext(profile.Name); err != nil {
return err
}
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}
......
......@@ -337,14 +337,7 @@ func runStart(cmd *cobra.Command, args []string) {
ssh.SetDefaultClient(ssh.External)
}
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
kubeconfig, err := node.Start(mc, n, true, existingAddons)
kubeconfig, err := startNode(existing, mc, n)
if err != nil {
exit.WithError("Starting node", err)
}
......@@ -389,6 +382,17 @@ func displayEnviron(env []string) {
}
}
func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) {
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
return node.Start(mc, n, true, existingAddons)
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
if kcs.KeepContext {
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
......
......@@ -28,9 +28,7 @@ import (
"k8s.io/minikube/pkg/minikube/cruntime"
)
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) (map[string]string, error) {
k8s := mc.KubernetesConfig
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
......@@ -79,7 +77,18 @@ func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
extraOpts["feature-gates"] = kubeletFeatureArgs
}
return extraOpts, nil
}
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
b := bytes.Buffer{}
extraOpts, err := extraKubeletOpts(mc, nc, r)
if err != nil {
return nil, err
}
k8s := mc.KubernetesConfig
opts := struct {
ExtraOptions string
ContainerRuntime string
......
......@@ -41,6 +41,7 @@ import (
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
......@@ -421,10 +422,9 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
// stop kubelet to avoid "Text File Busy" error
if rr, err := k.c.RunCmd(stopCmd); err != nil {
glog.Warningf("unable to stop kubelet: %s command: %q output: %q", err, rr.Command(), rr.Output())
if err := stopKubelet(k.c); err != nil {
glog.Warningf("unable to stop kubelet: %s", err)
}
if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil {
......@@ -436,24 +436,46 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
cniFile = []byte(defaultCNIConfig)
}
files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile)
if err := copyFiles(k.c, files); err != nil {
return err
}
if err := startKubelet(k.c); err != nil {
return err
}
return nil
}
func stopKubelet(runner command.Runner) error {
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
if rr, err := runner.RunCmd(stopCmd); err != nil {
return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output())
}
return nil
}
func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
// Combine mkdir request into a single call to reduce load
dirs := []string{}
for _, f := range files {
dirs = append(dirs, f.GetTargetDir())
}
args := append([]string{"mkdir", "-p"}, dirs...)
if _, err := k.c.RunCmd(exec.Command("sudo", args...)); err != nil {
if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil {
return errors.Wrap(err, "mkdir")
}
for _, f := range files {
if err := k.c.Copy(f); err != nil {
if err := runner.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
}
return nil
}
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")); err != nil {
func startKubelet(runner command.Runner) error {
startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")
if _, err := runner.RunCmd(startCmd); err != nil {
return errors.Wrap(err, "starting kubelet")
}
return nil
......
......@@ -224,43 +224,60 @@ func (f *FakeRunner) Remove(assets.CopyableFile) error {
return nil
}
func (f *FakeRunner) dockerPs(args []string) (string, error) {
// ps -a --filter="name=apiserver" --format="{{.ID}}"
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
filter := strings.Split(args[2], `r=`)[1]
fname := strings.Split(filter, "=")[1]
ids := []string{}
f.t.Logf("fake docker: Looking for containers matching %q", fname)
for id, cname := range f.containers {
if strings.Contains(cname, fname) {
ids = append(ids, id)
}
}
f.t.Logf("fake docker: Found containers: %v", ids)
return strings.Join(ids, "\n"), nil
}
return "", nil
}
func (f *FakeRunner) dockerStop(args []string) (string, error) {
ids := strings.Split(args[1], " ")
for _, id := range ids {
f.t.Logf("fake docker: Stopping id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
return "", nil
}
func (f *FakeRunner) dockerRm(args []string) (string, error) {
// Skip "-f" argument
for _, id := range args[2:] {
f.t.Logf("fake docker: Removing id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
return "", nil
}
// docker is a fake implementation of docker
func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
switch cmd := args[0]; cmd {
case "ps":
// ps -a --filter="name=apiserver" --format="{{.ID}}"
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
filter := strings.Split(args[2], `r=`)[1]
fname := strings.Split(filter, "=")[1]
ids := []string{}
f.t.Logf("fake docker: Looking for containers matching %q", fname)
for id, cname := range f.containers {
if strings.Contains(cname, fname) {
ids = append(ids, id)
}
}
f.t.Logf("fake docker: Found containers: %v", ids)
return strings.Join(ids, "\n"), nil
}
return f.dockerPs(args)
case "stop":
ids := strings.Split(args[1], " ")
for _, id := range ids {
f.t.Logf("fake docker: Stopping id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
return f.dockerStop(args)
case "rm":
// Skip "-f" argument
for _, id := range args[2:] {
f.t.Logf("fake docker: Removing id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
return f.dockerRm(args)
}
case "version":
if args[1] == "--format" && args[2] == "{{.Server.Version}}" {
......
......@@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/juju/mutex"
"github.com/pkg/errors"
......@@ -120,6 +121,20 @@ func saveToTarFile(iname, rawDest string) error {
return errors.Wrapf(err, "nil image for %s", iname)
}
tag, err := name.NewTag(iname, name.WeakValidation)
if err != nil {
return errors.Wrap(err, "newtag")
}
err = writeImage(img, dst, tag)
if err != nil {
return err
}
glog.Infof("%s exists", dst)
return nil
}
func writeImage(img v1.Image, dst string, tag name.Tag) error {
glog.Infoln("opening: ", dst)
f, err := ioutil.TempFile(filepath.Dir(dst), filepath.Base(dst)+".*.tmp")
if err != nil {
......@@ -135,10 +150,7 @@ func saveToTarFile(iname, rawDest string) error {
}
}
}()
tag, err := name.NewTag(iname, name.WeakValidation)
if err != nil {
return errors.Wrap(err, "newtag")
}
err = tarball.Write(tag, img, f)
if err != nil {
return errors.Wrap(err, "write")
......@@ -151,6 +163,5 @@ func saveToTarFile(iname, rawDest string) error {
if err != nil {
return errors.Wrap(err, "rename")
}
glog.Infof("%s exists", dst)
return nil
}
......@@ -59,7 +59,7 @@ var defaultClusterConfig = config.ClusterConfig{
Name: viper.GetString("profile"),
Driver: driver.Mock,
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
Nodes: []config.Node{config.Node{Name: "minikube"}},
Nodes: []config.Node{{Name: "minikube"}},
}
func TestCreateHost(t *testing.T) {
......
......@@ -73,6 +73,45 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
// check if need to re-run docker-env
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
h, err = recreateIfNeeded(api, cc, n, h)
if err != nil {
return h, err
}
e := engineOptions(cc)
if len(e.Env) > 0 {
h.HostOptions.EngineOptions.Env = e.Env
glog.Infof("Detecting provisioner ...")
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return h, errors.Wrap(err, "detecting provisioner")
}
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
return h, errors.Wrap(err, "provision")
}
}
if driver.IsMock(h.DriverName) {
return h, nil
}
if err := postStartSetup(h, cc); err != nil {
return h, errors.Wrap(err, "post-start")
}
if driver.BareMetal(h.Driver.DriverName()) {
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
return h, nil
}
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
if err := h.ConfigureAuth(); err != nil {
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
}
return h, ensureSyncedGuestClock(h, cc.Driver)
}
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
s, err := h.Driver.GetState()
if err != nil || s == state.Stopped || s == state.None {
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
......@@ -118,37 +157,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
}
}
e := engineOptions(cc)
if len(e.Env) > 0 {
h.HostOptions.EngineOptions.Env = e.Env
glog.Infof("Detecting provisioner ...")
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return h, errors.Wrap(err, "detecting provisioner")
}
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
return h, errors.Wrap(err, "provision")
}
}
if driver.IsMock(h.DriverName) {
return h, nil
}
if err := postStartSetup(h, cc); err != nil {
return h, errors.Wrap(err, "post-start")
}
if driver.BareMetal(h.Driver.DriverName()) {
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
return h, nil
}
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
if err := h.ConfigureAuth(); err != nil {
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
}
return h, ensureSyncedGuestClock(h, cc.Driver)
return h, nil
}
// maybeWarnAboutEvalEnv wil warn user if they need to re-eval their docker-env, podman-env
......@@ -222,6 +231,41 @@ func adjustGuestClock(h hostRunner, t time.Time) error {
return err
}
func machineExistsState(s state.State, err error) (bool, error) {
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsError(s state.State, err error, drverr error) (bool, error) {
_ = s // not used
if err == drverr {
// if the error matches driver error
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
if s == state.None || (err != nil && err.Error() == msg) {
// if the error contains the message
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsDocker(s state.State, err error) (bool, error) {
if s == state.Error {
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, ErrorMachineNotExist
} else if s == state.None {
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, ErrorMachineNotExist
}
return true, err
}
// machineExists checks if virtual machine does not exist
// if the virtual machine exists, return true
func machineExists(d string, s state.State, err error) (bool, error) {
......@@ -230,54 +274,23 @@ func machineExists(d string, s state.State, err error) (bool, error) {
}
switch d {
case driver.HyperKit:
if s == state.None || (err != nil && err.Error() == "connection is shut down") {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsMessage(s, err, "connection is shut down")
case driver.HyperV:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.KVM2:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.None:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.Parallels:
if err != nil && err.Error() == "machine does not exist" {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsMessage(s, err, "connection is shut down")
case driver.VirtualBox:
if err == virtualbox.ErrMachineNotExist {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsError(s, err, virtualbox.ErrMachineNotExist)
case driver.VMware:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.VMwareFusion:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.Docker:
if s == state.Error {
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, ErrorMachineNotExist
} else if s == state.None {
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, ErrorMachineNotExist
}
return true, err
return machineExistsDocker(s, err)
case driver.Mock:
if s == state.Error {
return false, ErrorMachineNotExist
......
......@@ -44,16 +44,19 @@ type LoadBalancerEmulator struct {
patchConverter patchConverter
}
// PatchServices will update all load balancer services
func (l *LoadBalancerEmulator) PatchServices() ([]string, error) {
return l.applyOnLBServices(l.updateService)
}
// PatchServiceIP will patch the given service and ip
func (l *LoadBalancerEmulator) PatchServiceIP(restClient rest.Interface, svc core.Service, ip string) error {
// TODO: do not ignore result
_, err := l.updateServiceIP(restClient, svc, ip)
return err
}
// Cleanup will clean up all load balancer services
func (l *LoadBalancerEmulator) Cleanup() ([]string, error) {
return l.applyOnLBServices(l.cleanupService)
}
......@@ -143,6 +146,7 @@ func (l *LoadBalancerEmulator) cleanupService(restClient rest.Interface, svc cor
}
// NewLoadBalancerEmulator creates a new LoadBalancerEmulator
func NewLoadBalancerEmulator(corev1Client typed_core.CoreV1Interface) LoadBalancerEmulator {
return LoadBalancerEmulator{
coreV1Client: corev1Client,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册