提交 ae92c289 编写于 作者: J jjanik

add csi-hostpath-driver addon

上级 c5ae7b95
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-attacher
namespace: kube-system
labels:
app: csi-hostpath-attacher
spec:
selector:
app: csi-hostpath-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-attacher
namespace: kube-system
spec:
serviceName: "csi-hostpath-attacher"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-attacher
template:
metadata:
labels:
app: csi-hostpath-attacher
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.0-rc1
args:
- --v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
namespace: kube-system
spec:
# Supports persistent and ephemeral inline volumes.
volumeLifecycleModes:
- Persistent
- Ephemeral
# To determine at runtime which mode a volume uses, pod info and its
# "csi.storage.k8s.io/ephemeral" entry are needed.
podInfoOnMount: true
# Service defined here, plus serviceName below in StatefulSet,
# are needed only because of condition explained in
# https://github.com/kubernetes/kubernetes/issues/69608
kind: Service
apiVersion: v1
metadata:
name: csi-hostpathplugin
namespace: kube-system
labels:
app: csi-hostpathplugin
spec:
selector:
app: csi-hostpathplugin
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpathplugin
namespace: kube-system
spec:
serviceName: "csi-hostpathplugin"
# One replica only:
# Host path driver only works when everything runs
# on a single node. We achieve that by starting it once and then
# co-locate all other pods via inter-pod affinity
replicas: 1
selector:
matchLabels:
app: csi-hostpathplugin
template:
metadata:
labels:
app: csi-hostpathplugin
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
containers:
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0
args:
- --v=5
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- name: hostpath
image: quay.io/k8scsi/hostpathplugin:v1.4.0-rc2
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
ports:
- containerPort: 9898
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /var/lib/kubelet/plugins
mountPropagation: Bidirectional
name: plugins-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- mountPath: /dev
name: dev-dir
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: quay.io/k8scsi/livenessprobe:v1.1.0
args:
- --csi-address=/csi/csi.sock
- --health-port=9898
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins
type: Directory
name: plugins-dir
- hostPath:
# 'path' is where PV data is persisted on host.
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
path: /var/lib/csi-hostpath-data/
type: DirectoryOrCreate
name: csi-data-dir
- hostPath:
path: /dev
type: Directory
name: dev-dir
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-provisioner
namespace: kube-system
labels:
app: csi-hostpath-provisioner
spec:
selector:
app: csi-hostpath-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-provisioner
namespace: kube-system
spec:
serviceName: "csi-hostpath-provisioner"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-provisioner
template:
metadata:
labels:
app: csi-hostpath-provisioner
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-provisioner
containers:
- name: csi-provisioner
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v2.0.0-rc2
args:
- -v=5
- --csi-address=/csi/csi.sock
- --feature-gates=Topology=true
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-resizer
namespace: kube-system
labels:
app: csi-hostpath-resizer
spec:
selector:
app: csi-hostpath-resizer
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-resizer
namespace: kube-system
spec:
serviceName: "csi-hostpath-resizer"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-resizer
template:
metadata:
labels:
app: csi-hostpath-resizer
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-resizer
containers:
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:v0.6.0-rc1
args:
- -v=5
- -csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-snapshotter
namespace: kube-system
labels:
app: csi-hostpath-snapshotter
spec:
selector:
app: csi-hostpath-snapshotter
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-snapshotter
namespace: kube-system
spec:
serviceName: "csi-hostpath-snapshotter"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-snapshotter
template:
metadata:
labels:
app: csi-hostpath-snapshotter
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccount: csi-snapshotter
containers:
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v2.1.0
args:
- -v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-hostpath-sc
provisioner: hostpath.csi.k8s.io #csi-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate
# This YAML file contains all RBAC objects that are necessary to run external
# CSI attacher.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# attacher, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
namespace: kube-system
---
# Attacher must be able to work with PVs, CSINodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
#Secret permission is optional.
#Enable it if you need value from secret.
#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters
#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# Attacher must be able to work with configmaps or leases in the current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-attacher-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: kube-system
roleRef:
kind: Role
name: external-attacher-cfg
apiGroup: rbac.authorization.k8s.io
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# provisioner, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-provisioner-cfg
rules:
# Only one of the following rules for endpoints or leases is required based on
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: kube-system
roleRef:
kind: Role
name: external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
# This YAML file contains all RBAC objects that are necessary to run external
# CSI resizer.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# resizer, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-resizer
namespace: kube-system
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-resizer-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
subjects:
- kind: ServiceAccount
name: csi-resizer
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-resizer-runner
apiGroup: rbac.authorization.k8s.io
---
# Resizer must be able to work with end point in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-resizer-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-resizer
namespace: kube-system
roleRef:
kind: Role
name: external-resizer-cfg
apiGroup: rbac.authorization.k8s.io
# RBAC file for the snapshot controller.
#
# The snapshot controller implements the control loop for CSI snapshot functionality.
# It should be installed as part of the base Kubernetes distribution in an appropriate
# namespace for components implementing base system functionality. For installing with
# Vanilla Kubernetes, kube-system makes sense for the namespace.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-snapshotter
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# rename if there are conflicts
name: csi-snapshotter-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: kube-system
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: csi-snapshotter-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: csi-snapshotter-leaderelection
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-leaderelection
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: kube-system
roleRef:
kind: Role
name: csi-snapshotter-leaderelection
apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
......@@ -33,10 +33,11 @@ type Addon struct {
// addonPodLabels holds the pod label that will be used to verify if the addon is enabled
var addonPodLabels = map[string]string{
"ingress": "app.kubernetes.io/name=ingress-nginx",
"registry": "kubernetes.io/minikube-addons=registry",
"gvisor": "kubernetes.io/minikube-addons=gvisor",
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
"ingress": "app.kubernetes.io/name=ingress-nginx",
"registry": "kubernetes.io/minikube-addons=registry",
"gvisor": "kubernetes.io/minikube-addons=gvisor",
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
"csi-hostpath-driver": "kubernetes.io/minikube-addons=csi-hostpath-driver",
}
// Addons is a list of all addons
......@@ -175,4 +176,10 @@ var Addons = []*Addon{
set: SetBool,
callbacks: []setFn{enableOrDisableAddon},
},
{
name: "csi-hostpath-driver",
set: SetBool,
validations: []setFn{IsVolumesnapshotsEnabled},
callbacks: []setFn{enableOrDisableAddon, verifyAddonStatus},
},
}
......@@ -18,11 +18,16 @@ package addons
import (
"fmt"
"strconv"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/out"
)
const volumesnapshotsAddon = "volumesnapshots"
// containerdOnlyMsg is the message shown when a containerd-only addon is enabled
const containerdOnlyAddonMsg = `
This addon can only be enabled with the containerd runtime backend. To enable this backend, please first stop minikube with:
......@@ -33,6 +38,12 @@ and then start minikube again with the following flags:
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
// volumesnapshotsDisabledMsg is the message shown when csi-hostpath-driver addon is enabled without the volumesnapshots addon
const volumesnapshotsDisabledMsg = `[WARNING] For full functionality, the 'csi-hostpath-driver' addon requires the 'volumesnapshots' addon to be enabled.
You can enable 'volumesnapshots' addon by running: 'minikube addons enable volumesnapshots'
`
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
......@@ -46,6 +57,21 @@ func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
return nil
}
// IsVolumesnapshotsEnabled is a validator that prints out a warning if the volumesnapshots addon
// is disabled (does not return any errors!)
func IsVolumesnapshotsEnabled(cc *config.ClusterConfig, _, value string) error {
isCsiDriverEnabled, _ := strconv.ParseBool(value)
// assets.Addons[].IsEnabled() returns the current status of the addon or default value.
// config.AddonList contains list of addons to be enabled.
isVolumesnapshotsEnabled := assets.Addons[volumesnapshotsAddon].IsEnabled(cc) || contains(config.AddonList, volumesnapshotsAddon)
if isCsiDriverEnabled && !isVolumesnapshotsEnabled {
// just print out a warning directly, we don't want to return any errors since
// that would prevent the addon from being enabled (callbacks wouldn't be run)
out.WarningT(volumesnapshotsDisabledMsg)
}
return nil
}
// isAddonValid returns the addon, true if it is valid
// otherwise returns nil, false
func isAddonValid(name string) (*Addon, bool) {
......@@ -56,3 +82,12 @@ func isAddonValid(name string) (*Addon, bool) {
}
return nil, false
}
func contains(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}
......@@ -472,6 +472,74 @@ var Addons = map[string]*Addon{
"0640",
false),
}, false, "volumesnapshots"),
"csi-hostpath-driver": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml",
vmpath.GuestAddonsDir,
"rbac-external-attacher.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml",
vmpath.GuestAddonsDir,
"rbac-external-provisioner.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml",
vmpath.GuestAddonsDir,
"rbac-external-resizer.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml",
vmpath.GuestAddonsDir,
"rbac-external-snapshotter.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-attacher.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-driverinfo.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-plugin.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-provisioner.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-resizer.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-snapshotter.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-storageclass.yaml",
"0640",
false),
}, false, "csi-hostpath-driver"),
}
// GenerateTemplateData generates template data for template assets
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册