未验证 提交 616fcb17 编写于 作者: K KubeSphere CI Bot 提交者: GitHub

Merge pull request #2334 from wansir/kubectl-image

Configuring kubectl image from command line
......@@ -65,6 +65,7 @@ func addControllers(
multiClusterEnabled bool,
networkPolicyEnabled bool,
serviceMeshEnabled bool,
kubectlImage string,
stopCh <-chan struct{}) error {
kubernetesInformer := informerFactory.KubernetesSharedInformerFactory()
......@@ -219,7 +220,8 @@ func addControllers(
kubernetesInformer.Rbac().V1().ClusterRoleBindings(),
kubernetesInformer.Apps().V1().Deployments(),
kubernetesInformer.Core().V1().Pods(),
kubesphereInformer.Iam().V1alpha2().Users())
kubesphereInformer.Iam().V1alpha2().Users(),
kubectlImage)
globalRoleController := globalrole.NewController(client.Kubernetes(), client.KubeSphere(),
kubesphereInformer.Iam().V1alpha2().GlobalRoles(), fedGlobalRoleCache, fedGlobalRoleCacheController)
......
......@@ -6,6 +6,7 @@ import (
"k8s.io/client-go/tools/leaderelection"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap"
......@@ -19,29 +20,31 @@ import (
)
type KubeSphereControllerManagerOptions struct {
KubernetesOptions *k8s.KubernetesOptions
DevopsOptions *jenkins.Options
S3Options *s3.Options
LdapOptions *ldapclient.Options
OpenPitrixOptions *openpitrix.Options
NetworkOptions *network.Options
MultiClusterOptions *multicluster.Options
ServiceMeshOptions *servicemesh.Options
LeaderElect bool
LeaderElection *leaderelection.LeaderElectionConfig
WebhookCertDir string
KubernetesOptions *k8s.KubernetesOptions
DevopsOptions *jenkins.Options
S3Options *s3.Options
AuthenticationOptions *authoptions.AuthenticationOptions
LdapOptions *ldapclient.Options
OpenPitrixOptions *openpitrix.Options
NetworkOptions *network.Options
MultiClusterOptions *multicluster.Options
ServiceMeshOptions *servicemesh.Options
LeaderElect bool
LeaderElection *leaderelection.LeaderElectionConfig
WebhookCertDir string
}
func NewKubeSphereControllerManagerOptions() *KubeSphereControllerManagerOptions {
s := &KubeSphereControllerManagerOptions{
KubernetesOptions: k8s.NewKubernetesOptions(),
DevopsOptions: jenkins.NewDevopsOptions(),
S3Options: s3.NewS3Options(),
LdapOptions: ldapclient.NewOptions(),
OpenPitrixOptions: openpitrix.NewOptions(),
NetworkOptions: network.NewNetworkOptions(),
MultiClusterOptions: multicluster.NewOptions(),
ServiceMeshOptions: servicemesh.NewServiceMeshOptions(),
KubernetesOptions: k8s.NewKubernetesOptions(),
DevopsOptions: jenkins.NewDevopsOptions(),
S3Options: s3.NewS3Options(),
LdapOptions: ldapclient.NewOptions(),
OpenPitrixOptions: openpitrix.NewOptions(),
NetworkOptions: network.NewNetworkOptions(),
MultiClusterOptions: multicluster.NewOptions(),
ServiceMeshOptions: servicemesh.NewServiceMeshOptions(),
AuthenticationOptions: authoptions.NewAuthenticateOptions(),
LeaderElection: &leaderelection.LeaderElectionConfig{
LeaseDuration: 30 * time.Second,
RenewDeadline: 15 * time.Second,
......@@ -60,6 +63,7 @@ func (s *KubeSphereControllerManagerOptions) Flags() cliflag.NamedFlagSets {
s.KubernetesOptions.AddFlags(fss.FlagSet("kubernetes"), s.KubernetesOptions)
s.DevopsOptions.AddFlags(fss.FlagSet("devops"), s.DevopsOptions)
s.S3Options.AddFlags(fss.FlagSet("s3"), s.S3Options)
s.AuthenticationOptions.AddFlags(fss.FlagSet("authentication"), s.AuthenticationOptions)
s.LdapOptions.AddFlags(fss.FlagSet("ldap"), s.LdapOptions)
s.OpenPitrixOptions.AddFlags(fss.FlagSet("openpitrix"), s.OpenPitrixOptions)
s.NetworkOptions.AddFlags(fss.FlagSet("network"), s.NetworkOptions)
......
......@@ -56,16 +56,18 @@ func NewControllerManagerCommand() *cobra.Command {
if err == nil {
// make sure LeaderElection is not nil
s = &options.KubeSphereControllerManagerOptions{
KubernetesOptions: conf.KubernetesOptions,
DevopsOptions: conf.DevopsOptions,
S3Options: conf.S3Options,
LdapOptions: conf.LdapOptions,
OpenPitrixOptions: conf.OpenPitrixOptions,
NetworkOptions: conf.NetworkOptions,
MultiClusterOptions: conf.MultiClusterOptions,
ServiceMeshOptions: conf.ServiceMeshOptions,
LeaderElection: s.LeaderElection,
LeaderElect: s.LeaderElect,
KubernetesOptions: conf.KubernetesOptions,
DevopsOptions: conf.DevopsOptions,
S3Options: conf.S3Options,
AuthenticationOptions: conf.AuthenticationOptions,
LdapOptions: conf.LdapOptions,
OpenPitrixOptions: conf.OpenPitrixOptions,
NetworkOptions: conf.NetworkOptions,
MultiClusterOptions: conf.MultiClusterOptions,
ServiceMeshOptions: conf.ServiceMeshOptions,
LeaderElection: s.LeaderElection,
LeaderElect: s.LeaderElect,
WebhookCertDir: s.WebhookCertDir,
}
}
......@@ -181,8 +183,10 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
// TODO(jeff): refactor config with CRD
servicemeshEnabled := s.ServiceMeshOptions != nil && len(s.ServiceMeshOptions.IstioPilotHost) != 0
if err = addControllers(mgr, kubernetesClient, informerFactory, devopsClient, s3Client, ldapClient, openpitrixClient,
s.MultiClusterOptions.Enable, s.NetworkOptions.EnableNetworkPolicy, servicemeshEnabled, stopCh); err != nil {
if err = addControllers(mgr, kubernetesClient, informerFactory,
devopsClient, s3Client, ldapClient, openpitrixClient,
s.MultiClusterOptions.Enable, s.NetworkOptions.EnableNetworkPolicy,
servicemeshEnabled, s.AuthenticationOptions.KubectlImage, stopCh); err != nil {
klog.Fatalf("unable to register controllers to the manager: %v", err)
}
......
......@@ -34,6 +34,7 @@ type AuthenticationOptions struct {
JwtSecret string `json:"-" yaml:"jwtSecret"`
// oauth options
OAuthOptions *oauth.Options `json:"oauthOptions" yaml:"oauthOptions"`
KubectlImage string `json:"kubectlImage" yaml:"kubectlImage"`
}
func NewAuthenticateOptions() *AuthenticationOptions {
......@@ -43,6 +44,7 @@ func NewAuthenticateOptions() *AuthenticationOptions {
OAuthOptions: oauth.NewOptions(),
MultipleLogin: false,
JwtSecret: "",
KubectlImage: "kubesphere/kubectl:v1.0.0",
}
}
......@@ -61,4 +63,5 @@ func (options *AuthenticationOptions) AddFlags(fs *pflag.FlagSet, s *Authenticat
fs.BoolVar(&options.MultipleLogin, "multiple-login", s.MultipleLogin, "Allow multiple login with the same account, disable means only one user can login at the same time.")
fs.StringVar(&options.JwtSecret, "jwt-secret", s.JwtSecret, "Secret to sign jwt token, must not be empty.")
fs.DurationVar(&options.OAuthOptions.AccessTokenMaxAge, "access-token-max-age", s.OAuthOptions.AccessTokenMaxAge, "AccessTokenMaxAgeSeconds control the lifetime of access tokens, 0 means no expiration.")
fs.StringVar(&s.KubectlImage, "kubectl-image", s.KubectlImage, "Setup the image used by kubectl terminal pod")
}
......@@ -53,6 +53,8 @@ type Controller struct {
clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister
clusterRoleBindingSynced cache.InformerSynced
userSynced cache.InformerSynced
deploymentSynced cache.InformerSynced
podSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
......@@ -65,7 +67,9 @@ type Controller struct {
kubectlOperator kubectl.Interface
}
func NewController(k8sClient kubernetes.Interface, clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer, deploymentInformer appsv1informers.DeploymentInformer, podInformer coreinfomers.PodInformer, userInformer iamv1alpha2informers.UserInformer) *Controller {
func NewController(k8sClient kubernetes.Interface, clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer,
deploymentInformer appsv1informers.DeploymentInformer, podInformer coreinfomers.PodInformer,
userInformer iamv1alpha2informers.UserInformer, kubectlImage string) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
......@@ -81,7 +85,9 @@ func NewController(k8sClient kubernetes.Interface, clusterRoleBindingInformer rb
clusterRoleBindingLister: clusterRoleBindingInformer.Lister(),
clusterRoleBindingSynced: clusterRoleBindingInformer.Informer().HasSynced,
userSynced: userInformer.Informer().HasSynced,
kubectlOperator: kubectl.NewOperator(k8sClient, deploymentInformer, podInformer, userInformer),
deploymentSynced: deploymentInformer.Informer().HasSynced,
podSynced: podInformer.Informer().HasSynced,
kubectlOperator: kubectl.NewOperator(k8sClient, deploymentInformer, podInformer, userInformer, kubectlImage),
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleBinding"),
recorder: recorder,
}
......@@ -105,7 +111,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.clusterRoleBindingSynced, c.userSynced); !ok {
if ok := cache.WaitForCacheSync(stopCh, c.clusterRoleBindingSynced, c.userSynced, c.deploymentSynced, c.podSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
......
......@@ -51,7 +51,7 @@ func newResourceHandler(k8sClient kubernetes.Interface, factory informers.Inform
kubeconfigOperator: kubeconfig.NewReadOnlyOperator(factory.KubernetesSharedInformerFactory().Core().V1().ConfigMaps(), masterURL),
kubectlOperator: kubectl.NewOperator(nil, factory.KubernetesSharedInformerFactory().Apps().V1().Deployments(),
factory.KubernetesSharedInformerFactory().Core().V1().Pods(),
factory.KubeSphereSharedInformerFactory().Iam().V1alpha2().Users()),
factory.KubeSphereSharedInformerFactory().Iam().V1alpha2().Users(), ""),
}
}
......
......@@ -31,7 +31,6 @@ import (
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/models"
"math/rand"
"os"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"kubesphere.io/kubesphere/pkg/constants"
......@@ -52,18 +51,13 @@ type operator struct {
deploymentInformer appsv1informers.DeploymentInformer
podInformer coreinfomers.PodInformer
userInformer iamv1alpha2informers.UserInformer
kubectlImage string
}
func NewOperator(k8sClient kubernetes.Interface, deploymentInformer appsv1informers.DeploymentInformer, podInformer coreinfomers.PodInformer, userInformer iamv1alpha2informers.UserInformer) Interface {
return &operator{k8sClient: k8sClient, deploymentInformer: deploymentInformer, podInformer: podInformer, userInformer: userInformer}
}
var DefaultImage = "kubesphere/kubectl:advanced-1.0.0"
func init() {
if env := os.Getenv("KUBECTL_IMAGE"); env != "" {
DefaultImage = env
}
func NewOperator(k8sClient kubernetes.Interface, deploymentInformer appsv1informers.DeploymentInformer,
podInformer coreinfomers.PodInformer, userInformer iamv1alpha2informers.UserInformer, kubectlImage string) Interface {
return &operator{k8sClient: k8sClient, deploymentInformer: deploymentInformer, podInformer: podInformer,
userInformer: userInformer, kubectlImage: kubectlImage}
}
func (o *operator) GetKubectlPod(username string) (models.PodInfo, error) {
......@@ -118,7 +112,6 @@ func (o *operator) CreateKubectlDeploy(username string) error {
deployName := fmt.Sprintf(deployNameFormat, username)
user, err := o.userInformer.Lister().Get(username)
if err != nil {
klog.Error(err)
// ignore if user not exist
......@@ -146,7 +139,7 @@ func (o *operator) CreateKubectlDeploy(username string) error {
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "kubectl",
Image: DefaultImage,
Image: o.kubectlImage,
},
},
ServiceAccountName: "kubesphere-cluster-admin",
......@@ -156,14 +149,12 @@ func (o *operator) CreateKubectlDeploy(username string) error {
}
err = controllerutil.SetControllerReference(user, deployment, scheme.Scheme)
if err != nil {
klog.Errorln(err)
return err
}
_, err = o.k8sClient.AppsV1().Deployments(namespace).Create(deployment)
if err != nil {
if errors.IsAlreadyExists(err) {
return nil
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册