未验证 提交 21a5f830 编写于 作者: Z zryfish 提交者: GitHub

fix unable to setup webhook server under leader election (#2830)

Signed-off-by: NJeff <zw0948@gmail.com>
上级 48e77cbb
...@@ -17,22 +17,15 @@ limitations under the License. ...@@ -17,22 +17,15 @@ limitations under the License.
package app package app
import ( import (
"context"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
cliflag "k8s.io/component-base/cli/flag" cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/klog/klogr" "k8s.io/klog/klogr"
"kubesphere.io/kubesphere/cmd/controller-manager/app/options" "kubesphere.io/kubesphere/cmd/controller-manager/app/options"
"kubesphere.io/kubesphere/pkg/apis" "kubesphere.io/kubesphere/pkg/apis"
controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config" controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
"kubesphere.io/kubesphere/pkg/controller/namespace" "kubesphere.io/kubesphere/pkg/controller/namespace"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy" "kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy"
"kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/user"
...@@ -46,11 +39,10 @@ import ( ...@@ -46,11 +39,10 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/term" "kubesphere.io/kubesphere/pkg/utils/term"
"os" "os"
application "sigs.k8s.io/application/controllers"
"sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals" "sigs.k8s.io/controller-runtime/pkg/runtime/signals"
"sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook"
application "sigs.k8s.io/application/controllers"
) )
func NewControllerManagerCommand() *cobra.Command { func NewControllerManagerCommand() *cobra.Command {
...@@ -72,6 +64,8 @@ func NewControllerManagerCommand() *cobra.Command { ...@@ -72,6 +64,8 @@ func NewControllerManagerCommand() *cobra.Command {
LeaderElect: s.LeaderElect, LeaderElect: s.LeaderElect,
WebhookCertDir: s.WebhookCertDir, WebhookCertDir: s.WebhookCertDir,
} }
} else {
klog.Fatal("Failed to load configuration from disk", err)
} }
cmd := &cobra.Command{ cmd := &cobra.Command{
...@@ -83,7 +77,7 @@ func NewControllerManagerCommand() *cobra.Command { ...@@ -83,7 +77,7 @@ func NewControllerManagerCommand() *cobra.Command {
os.Exit(1) os.Exit(1)
} }
if err = Run(s, signals.SetupSignalHandler()); err != nil { if err = run(s, signals.SetupSignalHandler()); err != nil {
klog.Error(err) klog.Error(err)
os.Exit(1) os.Exit(1)
} }
...@@ -101,13 +95,13 @@ func NewControllerManagerCommand() *cobra.Command { ...@@ -101,13 +95,13 @@ func NewControllerManagerCommand() *cobra.Command {
usageFmt := "Usage:\n %s\n" usageFmt := "Usage:\n %s\n"
cols, _, _ := term.TerminalSize(cmd.OutOrStdout()) cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine()) _, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStdout(), namedFlagSets, cols) cliflag.PrintSections(cmd.OutOrStdout(), namedFlagSets, cols)
}) })
return cmd return cmd
} }
func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) error { func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) error {
kubernetesClient, err := k8s.NewKubernetesClient(s.KubernetesOptions) kubernetesClient, err := k8s.NewKubernetesClient(s.KubernetesOptions)
if err != nil { if err != nil {
klog.Errorf("Failed to create kubernetes clientset %v", err) klog.Errorf("Failed to create kubernetes clientset %v", err)
...@@ -160,126 +154,89 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) ...@@ -160,126 +154,89 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
kubernetesClient.Snapshot(), kubernetesClient.Snapshot(),
kubernetesClient.ApiExtensions()) kubernetesClient.ApiExtensions())
run := func(ctx context.Context) { mgrOptions := manager.Options{
klog.V(0).Info("setting up manager") CertDir: s.WebhookCertDir,
// Use 8443 instead of 443 cause we need root permission to bind port 443 Port: 8443,
mgr, err := manager.New(kubernetesClient.Config(), manager.Options{CertDir: s.WebhookCertDir, Port: 8443}) }
if err != nil {
klog.Fatalf("unable to set up overall controller manager: %v", err)
}
klog.V(0).Info("setting up scheme")
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
klog.Fatalf("unable add APIs to scheme: %v", err)
}
klog.V(0).Info("Setting up controllers")
err = workspace.Add(mgr)
if err != nil {
klog.Fatal("Unable to create workspace controller")
}
err = namespace.Add(mgr)
if err != nil {
klog.Fatal("Unable to create namespace controller")
}
err = (&application.ApplicationReconciler{
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
Mapper: mgr.GetRESTMapper(),
Log: klogr.New(),
}).SetupWithManager(mgr)
if err != nil {
klog.Fatal("Unable to create application controller")
}
// TODO(jeff): refactor config with CRD if s.LeaderElect {
servicemeshEnabled := s.ServiceMeshOptions != nil && len(s.ServiceMeshOptions.IstioPilotHost) != 0 mgrOptions = manager.Options{
if err = addControllers(mgr, CertDir: s.WebhookCertDir,
kubernetesClient, Port: 8443,
informerFactory, LeaderElection: s.LeaderElect,
devopsClient, LeaderElectionNamespace: "kubesphere-system",
s3Client, LeaderElectionID: "ks-controller-manager-leader-election",
ldapClient, LeaseDuration: &s.LeaderElection.LeaseDuration,
s.AuthenticationOptions, RetryPeriod: &s.LeaderElection.RetryPeriod,
openpitrixClient, RenewDeadline: &s.LeaderElection.RenewDeadline,
s.MultiClusterOptions.Enable,
s.NetworkOptions,
servicemeshEnabled,
s.AuthenticationOptions.KubectlImage, stopCh); err != nil {
klog.Fatalf("unable to register controllers to the manager: %v", err)
} }
}
// Start cache data after all informer is registered klog.V(0).Info("setting up manager")
informerFactory.Start(stopCh)
// Setup webhooks
klog.Info("setting up webhook server")
hookServer := mgr.GetWebhookServer()
klog.Info("registering webhooks to the webhook server")
hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}})
hookServer.Register("/validate-nsnp-kubesphere-io-v1alpha1-network", &webhook.Admission{Handler: &nsnetworkpolicy.NSNPValidator{Client: mgr.GetClient()}})
klog.V(0).Info("Starting the controllers.")
if err = mgr.Start(stopCh); err != nil {
klog.Fatalf("unable to run the manager: %v", err)
}
select {} // Use 8443 instead of 443 cause we need root permission to bind port 443
mgr, err := manager.New(kubernetesClient.Config(), mgrOptions)
if err != nil {
klog.Fatalf("unable to set up overall controller manager: %v", err)
} }
ctx, cancel := context.WithCancel(context.Background()) if err = apis.AddToScheme(mgr.GetScheme()); err != nil {
defer cancel() klog.Fatalf("unable add APIs to scheme: %v", err)
}
go func() { err = workspace.Add(mgr)
<-stopCh if err != nil {
cancel() klog.Fatal("Unable to create workspace controller")
}() }
if !s.LeaderElect { err = namespace.Add(mgr)
run(ctx) if err != nil {
return nil klog.Fatal("Unable to create namespace controller")
} }
id, err := os.Hostname() err = (&application.ApplicationReconciler{
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
Mapper: mgr.GetRESTMapper(),
Log: klogr.New(),
}).SetupWithManager(mgr)
if err != nil { if err != nil {
return err klog.Fatal("Unable to create application controller")
} }
// add a uniquifier so that two processes on the same host don't accidentally both become active // TODO(jeff): refactor config with CRD
id = id + "_" + string(uuid.NewUUID()) servicemeshEnabled := s.ServiceMeshOptions != nil && len(s.ServiceMeshOptions.IstioPilotHost) != 0
if err = addControllers(mgr,
kubernetesClient,
informerFactory,
devopsClient,
s3Client,
ldapClient,
s.AuthenticationOptions,
openpitrixClient,
s.MultiClusterOptions.Enable,
s.NetworkOptions,
servicemeshEnabled,
s.AuthenticationOptions.KubectlImage, stopCh); err != nil {
klog.Fatalf("unable to register controllers to the manager: %v", err)
}
lock, err := resourcelock.New(resourcelock.LeasesResourceLock, // Start cache data after all informer is registered
"kubesphere-system", klog.V(0).Info("Starting cache resource from apiserver...")
"ks-controller-manager", informerFactory.Start(stopCh)
kubernetesClient.Kubernetes().CoreV1(),
kubernetesClient.Kubernetes().CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: record.NewBroadcaster().NewRecorder(scheme.Scheme, v1.EventSource{
Component: "ks-controller-manager",
}),
})
if err != nil { // Setup webhooks
klog.Fatalf("error creating lock: %v", err) klog.V(2).Info("setting up webhook server")
} hookServer := mgr.GetWebhookServer()
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ klog.V(2).Info("registering webhooks to the webhook server")
Lock: lock, hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}})
LeaseDuration: s.LeaderElection.LeaseDuration, hookServer.Register("/validate-nsnp-kubesphere-io-v1alpha1-network", &webhook.Admission{Handler: &nsnetworkpolicy.NSNPValidator{Client: mgr.GetClient()}})
RenewDeadline: s.LeaderElection.RenewDeadline,
RetryPeriod: s.LeaderElection.RetryPeriod, klog.V(0).Info("Starting the controllers.")
Callbacks: leaderelection.LeaderCallbacks{ if err = mgr.Start(stopCh); err != nil {
OnStartedLeading: run, klog.Fatalf("unable to run the manager: %v", err)
OnStoppedLeading: func() { }
klog.Errorf("leadership lost")
os.Exit(0)
},
},
})
return nil return nil
} }
...@@ -191,7 +191,7 @@ func (c *StorageCapabilityController) handlerCSIDriver(obj interface{}) { ...@@ -191,7 +191,7 @@ func (c *StorageCapabilityController) handlerCSIDriver(obj interface{}) {
} }
for _, storageClass := range storageClasses { for _, storageClass := range storageClasses {
if storageClass.Provisioner == csiDriver.Name { if storageClass.Provisioner == csiDriver.Name {
klog.Info("enqueue StorageClass when handler csiDriver", storageClass) klog.V(4).Infof("enqueue StorageClass %s when handling csiDriver", storageClass.Name)
c.enqueueStorageClass(storageClass) c.enqueueStorageClass(storageClass)
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册