提交 24e3ac86 编写于 作者: D Duan Jiong

add ippool resource api

add ippool webhook and fix some bugs
Signed-off-by: NDuan Jiong <djduanjiong@gmail.com>
上级 8a6ce2d7
此差异已折叠。
...@@ -51,7 +51,6 @@ import ( ...@@ -51,7 +51,6 @@ import (
ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/network" "kubesphere.io/kubesphere/pkg/simple/client/network"
ippoolclient "kubesphere.io/kubesphere/pkg/simple/client/network/ippool" ippoolclient "kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
calicoclient "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
"kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/simple/client/s3"
"sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager"
...@@ -261,17 +260,12 @@ func addControllers( ...@@ -261,17 +260,12 @@ func addControllers(
} }
var ippoolController manager.Runnable var ippoolController manager.Runnable
if networkOptions.EnableIPPool { ippoolProvider := ippoolclient.NewProvider(kubernetesInformer.Core().V1().Pods(), client.KubeSphere(), client.Kubernetes(), networkOptions.IPPoolType, options)
var ippoolProvider ippoolclient.Provider if ippoolProvider != nil {
ippoolProvider = ippoolclient.NewProvider(client.KubeSphere(), networkOptions.IPPoolOptions)
if networkOptions.IPPoolOptions.Calico != nil {
ippoolProvider = calicoclient.NewProvider(client.KubeSphere(), *networkOptions.IPPoolOptions.Calico, options)
}
ippoolController = ippool.NewIPPoolController(kubesphereInformer.Network().V1alpha1().IPPools(), ippoolController = ippool.NewIPPoolController(kubesphereInformer.Network().V1alpha1().IPPools(),
kubesphereInformer.Network().V1alpha1().IPAMBlocks(), kubesphereInformer.Network().V1alpha1().IPAMBlocks(),
client.Kubernetes(), client.Kubernetes(),
client.KubeSphere(), client.KubeSphere(),
networkOptions.IPPoolOptions,
ippoolProvider) ippoolProvider)
} }
......
...@@ -18,6 +18,9 @@ package options ...@@ -18,6 +18,9 @@ package options
import ( import (
"flag" "flag"
"strings"
"time"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection"
cliflag "k8s.io/component-base/cli/flag" cliflag "k8s.io/component-base/cli/flag"
...@@ -31,8 +34,6 @@ import ( ...@@ -31,8 +34,6 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
"kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/simple/client/servicemesh" "kubesphere.io/kubesphere/pkg/simple/client/servicemesh"
"strings"
"time"
) )
type KubeSphereControllerManagerOptions struct { type KubeSphereControllerManagerOptions struct {
......
...@@ -27,7 +27,7 @@ import ( ...@@ -27,7 +27,7 @@ import (
"kubesphere.io/kubesphere/pkg/apis" "kubesphere.io/kubesphere/pkg/apis"
controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config" controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config"
"kubesphere.io/kubesphere/pkg/controller/namespace" "kubesphere.io/kubesphere/pkg/controller/namespace"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy" "kubesphere.io/kubesphere/pkg/controller/network/webhooks"
"kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/user"
"kubesphere.io/kubesphere/pkg/controller/workspace" "kubesphere.io/kubesphere/pkg/controller/workspace"
"kubesphere.io/kubesphere/pkg/controller/workspacerole" "kubesphere.io/kubesphere/pkg/controller/workspacerole"
...@@ -252,7 +252,8 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) ...@@ -252,7 +252,8 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
klog.V(2).Info("registering webhooks to the webhook server") klog.V(2).Info("registering webhooks to the webhook server")
hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}}) hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}})
hookServer.Register("/validate-nsnp-kubesphere-io-v1alpha1-network", &webhook.Admission{Handler: &nsnetworkpolicy.NSNPValidator{Client: mgr.GetClient()}}) hookServer.Register("/validate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.ValidatingHandler{C: mgr.GetClient()}})
hookServer.Register("/mutate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.MutatingHandler{C: mgr.GetClient()}})
klog.V(0).Info("Starting the controllers.") klog.V(0).Info("Starting the controllers.")
if err = mgr.Start(stopCh); err != nil { if err = mgr.Start(stopCh); err != nil {
......
...@@ -3,14 +3,20 @@ ...@@ -3,14 +3,20 @@
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null creationTimestamp: null
name: ippools.network.kubesphere.io name: ippools.network.kubesphere.io
spec: spec:
group: network.kubesphere.io group: network.kubesphere.io
names: names:
kind: IPPool kind: IPPool
listKind: IPPoolList
plural: ippools plural: ippools
singular: ippool
scope: Cluster scope: Cluster
subresources:
status: {}
validation: validation:
openAPIV3Schema: openAPIV3Schema:
properties: properties:
...@@ -36,8 +42,8 @@ spec: ...@@ -36,8 +42,8 @@ spec:
description: The pool CIDR. description: The pool CIDR.
type: string type: string
disabled: disabled:
description: When disabled is true, IPAM will not assign addresses description: When disabled is true, IPAM will not assign addresses from
from this pool. this pool.
type: boolean type: boolean
dns: dns:
description: DNS contains values interesting for DNS resolvers description: DNS contains values interesting for DNS resolvers
...@@ -59,11 +65,11 @@ spec: ...@@ -59,11 +65,11 @@ spec:
type: object type: object
gateway: gateway:
type: string type: string
namespace:
type: string
rangeEnd: rangeEnd:
description: The last ip, inclusive
type: string type: string
rangeStart: rangeStart:
description: The first ip, inclusive
type: string type: string
routes: routes:
items: items:
...@@ -87,8 +93,6 @@ spec: ...@@ -87,8 +93,6 @@ spec:
- master - master
- vlanId - vlanId
type: object type: object
workspace:
type: string
required: required:
- cidr - cidr
- type - type
...@@ -96,7 +100,6 @@ spec: ...@@ -96,7 +100,6 @@ spec:
status: status:
properties: properties:
allocations: allocations:
description: Allocations should equal to (Total - Reserved - Unallocated)
type: integer type: integer
capacity: capacity:
type: integer type: integer
...@@ -106,6 +109,19 @@ spec: ...@@ -106,6 +109,19 @@ spec:
type: boolean type: boolean
unallocated: unallocated:
type: integer type: integer
workspaces:
additionalProperties:
properties:
allocations:
type: integer
required:
- allocations
type: object
type: object
required:
- allocations
- capacity
- unallocated
type: object type: object
type: object type: object
version: v1alpha1 version: v1alpha1
......
...@@ -18,11 +18,11 @@ package v1alpha1 ...@@ -18,11 +18,11 @@ package v1alpha1
import ( import (
"fmt" "fmt"
"github.com/projectcalico/libcalico-go/lib/names"
"math/big" "math/big"
"reflect" "reflect"
"strings" "strings"
"github.com/projectcalico/libcalico-go/lib/names"
cnet "github.com/projectcalico/libcalico-go/lib/net" cnet "github.com/projectcalico/libcalico-go/lib/net"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
......
...@@ -31,15 +31,21 @@ const ( ...@@ -31,15 +31,21 @@ const (
// scope type > id > name // scope type > id > name
// id used to detect cidr overlap // id used to detect cidr overlap
IPPoolTypeLabel = "ippool.network.kubesphere.io/type" IPPoolTypeLabel = "ippool.network.kubesphere.io/type"
IPPoolNameLabel = "ippool.network.kubesphere.io/name" IPPoolNameLabel = "ippool.network.kubesphere.io/name"
IPPoolIDLabel = "ippool.network.kubesphere.io/id" IPPoolIDLabel = "ippool.network.kubesphere.io/id"
IPPoolDefaultLabel = "ippool.network.kubesphere.io/default"
IPPoolTypeNone = "none"
IPPoolTypeLocal = "local"
IPPoolTypeCalico = "calico"
) )
// +genclient // +genclient
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true // +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster // +kubebuilder:resource:scope=Cluster
type IPPool struct { type IPPool struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
...@@ -69,12 +75,17 @@ type DNS struct { ...@@ -69,12 +75,17 @@ type DNS struct {
Options []string `json:"options,omitempty"` Options []string `json:"options,omitempty"`
} }
type WorkspaceStatus struct {
Allocations int `json:"allocations"`
}
type IPPoolStatus struct { type IPPoolStatus struct {
Unallocated int `json:"unallocated,omitempty"` Unallocated int `json:"unallocated"`
Allocations int `json:"allocations,omitempty"` Allocations int `json:"allocations"`
Capacity int `json:"capacity,omitempty"` Capacity int `json:"capacity"`
Reserved int `json:"reserved,omitempty"` Reserved int `json:"reserved,omitempty"`
Synced bool `json:"synced,omitempty"` Synced bool `json:"synced,omitempty"`
Workspaces map[string]WorkspaceStatus `json:"workspaces,omitempty"`
} }
type IPPoolSpec struct { type IPPoolSpec struct {
...@@ -100,9 +111,6 @@ type IPPoolSpec struct { ...@@ -100,9 +111,6 @@ type IPPoolSpec struct {
Gateway string `json:"gateway,omitempty"` Gateway string `json:"gateway,omitempty"`
Routes []Route `json:"routes,omitempty"` Routes []Route `json:"routes,omitempty"`
DNS DNS `json:"dns,omitempty"` DNS DNS `json:"dns,omitempty"`
Workspace string `json:"workspace,omitempty"`
Namespace string `json:"namespace,omitempty"`
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
...@@ -127,9 +135,9 @@ const ( ...@@ -127,9 +135,9 @@ const (
// Find the ordinal (i.e. how far into the block) a given IP lies. Returns an error if the IP is outside the block. // Find the ordinal (i.e. how far into the block) a given IP lies. Returns an error if the IP is outside the block.
func (b IPPool) IPToOrdinal(ip cnet.IP) (int, error) { func (b IPPool) IPToOrdinal(ip cnet.IP) (int, error) {
netIP, _, _ := cnet.ParseCIDR(b.Spec.CIDR) _, cidr, _ := cnet.ParseCIDR(b.Spec.CIDR)
ipAsInt := cnet.IPToBigInt(ip) ipAsInt := cnet.IPToBigInt(ip)
baseInt := cnet.IPToBigInt(*netIP) baseInt := cnet.IPToBigInt(cnet.IP{IP: cidr.IP})
ord := big.NewInt(0).Sub(ipAsInt, baseInt).Int64() ord := big.NewInt(0).Sub(ipAsInt, baseInt).Int64()
if ord < 0 || ord >= int64(b.NumAddresses()) { if ord < 0 || ord >= int64(b.NumAddresses()) {
return 0, fmt.Errorf("IP %s not in pool %s", ip, b.Spec.CIDR) return 0, fmt.Errorf("IP %s not in pool %s", ip, b.Spec.CIDR)
...@@ -145,6 +153,14 @@ func (b IPPool) NumAddresses() int { ...@@ -145,6 +153,14 @@ func (b IPPool) NumAddresses() int {
return numAddresses return numAddresses
} }
func (b IPPool) Type() string {
if b.Spec.Type == VLAN {
return IPPoolTypeLocal
}
return b.Spec.Type
}
func (b IPPool) NumReservedAddresses() int { func (b IPPool) NumReservedAddresses() int {
return b.StartReservedAddressed() + b.EndReservedAddressed() return b.StartReservedAddressed() + b.EndReservedAddressed()
} }
...@@ -166,6 +182,17 @@ func (b IPPool) EndReservedAddressed() int { ...@@ -166,6 +182,17 @@ func (b IPPool) EndReservedAddressed() int {
return total - end - 1 return total - end - 1
} }
func (b IPPool) Overlapped(dst IPPool) bool {
if b.ID() != dst.ID() {
return false
}
_, cidr, _ := cnet.ParseCIDR(b.Spec.CIDR)
_, cidrDst, _ := cnet.ParseCIDR(dst.Spec.CIDR)
return cidr.IsNetOverlap(cidrDst.IPNet)
}
func (pool IPPool) ID() uint32 { func (pool IPPool) ID() uint32 {
switch pool.Spec.Type { switch pool.Spec.Type {
case VLAN: case VLAN:
......
...@@ -30,7 +30,7 @@ func TestIPPool(t *testing.T) { ...@@ -30,7 +30,7 @@ func TestIPPool(t *testing.T) {
}, },
Spec: IPPoolSpec{ Spec: IPPoolSpec{
Type: VLAN, Type: VLAN,
CIDR: "192.168.0.0/24", CIDR: "192.168.0.1/24",
RangeEnd: "192.168.0.250", RangeEnd: "192.168.0.250",
RangeStart: "192.168.0.10", RangeStart: "192.168.0.10",
}, },
......
...@@ -148,3 +148,7 @@ type NamespaceNetworkPolicyList struct { ...@@ -148,3 +148,7 @@ type NamespaceNetworkPolicyList struct {
metav1.ListMeta `json:"metadata,omitempty"` metav1.ListMeta `json:"metadata,omitempty"`
Items []NamespaceNetworkPolicy `json:"items"` Items []NamespaceNetworkPolicy `json:"items"`
} }
const (
NSNPPrefix = "nsnp-"
)
...@@ -259,7 +259,7 @@ func (in *IPPool) DeepCopyInto(out *IPPool) { ...@@ -259,7 +259,7 @@ func (in *IPPool) DeepCopyInto(out *IPPool) {
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec) in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status in.Status.DeepCopyInto(&out.Status)
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool.
...@@ -337,6 +337,13 @@ func (in *IPPoolSpec) DeepCopy() *IPPoolSpec { ...@@ -337,6 +337,13 @@ func (in *IPPoolSpec) DeepCopy() *IPPoolSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPPoolStatus) DeepCopyInto(out *IPPoolStatus) { func (in *IPPoolStatus) DeepCopyInto(out *IPPoolStatus) {
*out = *in *out = *in
if in.Workspaces != nil {
in, out := &in.Workspaces, &out.Workspaces
*out = make(map[string]WorkspaceStatus, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolStatus. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolStatus.
...@@ -603,3 +610,18 @@ func (in *VLANConfig) DeepCopy() *VLANConfig { ...@@ -603,3 +610,18 @@ func (in *VLANConfig) DeepCopy() *VLANConfig {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus.
func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus {
if in == nil {
return nil
}
out := new(WorkspaceStatus)
in.DeepCopyInto(out)
return out
}
...@@ -409,6 +409,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { ...@@ -409,6 +409,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error {
{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"},
{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"}, {Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"},
{Group: "devops.kubesphere.io", Version: "v1alpha3", Resource: "devopsprojects"}, {Group: "devops.kubesphere.io", Version: "v1alpha3", Resource: "devopsprojects"},
{Group: "network.kubesphere.io", Version: "v1alpha1", Resource: "ippools"},
} }
devopsGVRs := []schema.GroupVersionResource{ devopsGVRs := []schema.GroupVersionResource{
......
...@@ -21,6 +21,7 @@ import ( ...@@ -21,6 +21,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"io/ioutil" "io/ioutil"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth" "kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options" authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options"
authorizationoptions "kubesphere.io/kubesphere/pkg/apiserver/authorization/options" authorizationoptions "kubesphere.io/kubesphere/pkg/apiserver/authorization/options"
...@@ -106,6 +107,7 @@ func newTestConfig() (*Config, error) { ...@@ -106,6 +107,7 @@ func newTestConfig() (*Config, error) {
AllowedIngressNamespaces: []string{}, AllowedIngressNamespaces: []string{},
}, },
WeaveScopeHost: "weave-scope-app.weave", WeaveScopeHost: "weave-scope-app.weave",
IPPoolType: networkv1alpha1.IPPoolTypeNone,
}, },
MonitoringOptions: &prometheus.Options{ MonitoringOptions: &prometheus.Options{
Endpoint: "http://prometheus.kubesphere-monitoring-system.svc", Endpoint: "http://prometheus.kubesphere-monitoring-system.svc",
......
...@@ -22,10 +22,12 @@ import ( ...@@ -22,10 +22,12 @@ import (
"time" "time"
cnet "github.com/projectcalico/libcalico-go/lib/net" cnet "github.com/projectcalico/libcalico-go/lib/net"
podv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
...@@ -39,6 +41,7 @@ import ( ...@@ -39,6 +41,7 @@ import (
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned" kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
networkInformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1" networkInformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/utils" "kubesphere.io/kubesphere/pkg/controller/network/utils"
"kubesphere.io/kubesphere/pkg/controller/network/webhooks"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool" "kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
) )
...@@ -62,8 +65,6 @@ type IPPoolController struct { ...@@ -62,8 +65,6 @@ type IPPoolController struct {
client clientset.Interface client clientset.Interface
kubesphereClient kubesphereclient.Interface kubesphereClient kubesphereclient.Interface
options ippool.Options
} }
func (c *IPPoolController) ippoolHandle(obj interface{}) { func (c *IPPoolController) ippoolHandle(obj interface{}) {
...@@ -112,35 +113,86 @@ func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error { ...@@ -112,35 +113,86 @@ func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error {
return nil return nil
} }
// check cidr overlap func (c *IPPoolController) ValidateCreate(obj runtime.Object) error {
func (c *IPPoolController) checkIPPool(pool *networkv1alpha1.IPPool) (bool, error) { b := obj.(*networkv1alpha1.IPPool)
_, poolCIDR, err := cnet.ParseCIDR(pool.Spec.CIDR) _, cidr, err := cnet.ParseCIDR(b.Spec.CIDR)
if err != nil { if err != nil {
return false, err return fmt.Errorf("invalid cidr")
}
size, _ := cidr.Mask.Size()
if b.Spec.BlockSize > 0 && b.Spec.BlockSize < size {
return fmt.Errorf("the blocksize should be larger than the cidr mask")
}
if b.Spec.RangeStart != "" || b.Spec.RangeEnd != "" {
iStart := cnet.ParseIP(b.Spec.RangeStart)
iEnd := cnet.ParseIP(b.Spec.RangeEnd)
if iStart == nil || iEnd == nil {
return fmt.Errorf("invalid rangeStart or rangeEnd")
}
offsetStart, err := b.IPToOrdinal(*iStart)
if err != nil {
return err
}
offsetEnd, err := b.IPToOrdinal(*iEnd)
if err != nil {
return err
}
if offsetEnd < offsetStart {
return fmt.Errorf("rangeStart should not big than rangeEnd")
}
} }
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(metav1.ListOptions{ pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{ LabelSelector: labels.SelectorFromSet(labels.Set{
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", pool.ID()), networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", b.ID()),
}).String(), }).String(),
}) })
if err != nil { if err != nil {
return false, err return err
} }
for _, p := range pools.Items { for _, p := range pools.Items {
_, cidr, err := cnet.ParseCIDR(p.Spec.CIDR) if b.Overlapped(p) {
if err != nil { return fmt.Errorf("ippool cidr is overlapped with %s", p.Name)
return false, err
} }
}
if cidr.IsNetOverlap(poolCIDR.IPNet) { return nil
return false, ErrCIDROverlap }
}
func (c *IPPoolController) ValidateUpdate(old runtime.Object, new runtime.Object) error {
oldP := old.(*networkv1alpha1.IPPool)
newP := new.(*networkv1alpha1.IPPool)
if newP.Spec.CIDR != oldP.Spec.CIDR {
return fmt.Errorf("cidr cannot be modified")
} }
return true, nil if newP.Spec.Type != oldP.Spec.Type {
return fmt.Errorf("ippool type cannot be modified")
}
if newP.Spec.BlockSize != oldP.Spec.BlockSize {
return fmt.Errorf("ippool blockSize cannot be modified")
}
if newP.Spec.RangeEnd != oldP.Spec.RangeEnd || newP.Spec.RangeStart != oldP.Spec.RangeStart {
return fmt.Errorf("ippool rangeEnd/rangeStart cannot be modified")
}
return nil
}
func (c *IPPoolController) ValidateDelete(obj runtime.Object) error {
p := obj.(*networkv1alpha1.IPPool)
if p.Status.Allocations > 0 {
return fmt.Errorf("ippool is in use, please remove the workload before deleting")
}
return nil
} }
func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error { func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
...@@ -159,18 +211,19 @@ func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error { ...@@ -159,18 +211,19 @@ func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
func (c *IPPoolController) updateIPPoolStatus(old *networkv1alpha1.IPPool) error { func (c *IPPoolController) updateIPPoolStatus(old *networkv1alpha1.IPPool) error {
new, err := c.provider.GetIPPoolStats(old) new, err := c.provider.GetIPPoolStats(old)
if err != nil { if err != nil {
return err return fmt.Errorf("failed to get ippool %s status %v", old.Name, err)
} }
if reflect.DeepEqual(old.Status, new.Status) { if reflect.DeepEqual(old.Status, new.Status) {
return nil return nil
} }
clone := old.DeepCopy() _, err = c.kubesphereClient.NetworkV1alpha1().IPPools().UpdateStatus(new)
clone.Status = new.Status if err != nil {
old, err = c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone) return fmt.Errorf("failed to update ippool %s status %v", old.Name, err)
}
return err return nil
} }
func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) { func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
...@@ -181,10 +234,16 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) { ...@@ -181,10 +234,16 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
}() }()
pool, err := c.ippoolInformer.Lister().Get(name) pool, err := c.ippoolInformer.Lister().Get(name)
if apierrors.IsNotFound(err) { if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to get ippool %s: %v", name, err)
}
if pool.Type() != c.provider.Type() {
klog.V(4).Infof("pool %s type not match, ignored", pool.Name)
return nil, nil return nil, nil
} else if err != nil {
return nil, err
} }
if utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) { if utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) {
...@@ -199,6 +258,7 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) { ...@@ -199,6 +258,7 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if canDelete { if canDelete {
return nil, c.removeFinalizer(pool) return nil, c.removeFinalizer(pool)
} }
...@@ -209,14 +269,6 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) { ...@@ -209,14 +269,6 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
} }
if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) { if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) {
valid, err := c.checkIPPool(pool)
if err != nil {
return nil, err
}
if !valid {
return nil, nil
}
err = c.addFinalizer(pool) err = c.addFinalizer(pool)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -310,7 +362,6 @@ func NewIPPoolController( ...@@ -310,7 +362,6 @@ func NewIPPoolController(
ipamblockInformer networkInformer.IPAMBlockInformer, ipamblockInformer networkInformer.IPAMBlockInformer,
client clientset.Interface, client clientset.Interface,
kubesphereClient kubesphereclient.Interface, kubesphereClient kubesphereclient.Interface,
options ippool.Options,
provider ippool.Provider) *IPPoolController { provider ippool.Provider) *IPPoolController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
...@@ -318,7 +369,7 @@ func NewIPPoolController( ...@@ -318,7 +369,7 @@ func NewIPPoolController(
klog.Info(fmt.Sprintf(format, args)) klog.Info(fmt.Sprintf(format, args))
}) })
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")}) broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ippool-controller"})
c := &IPPoolController{ c := &IPPoolController{
eventBroadcaster: broadcaster, eventBroadcaster: broadcaster,
...@@ -330,7 +381,6 @@ func NewIPPoolController( ...@@ -330,7 +381,6 @@ func NewIPPoolController(
ipamblockSynced: ipamblockInformer.Informer().HasSynced, ipamblockSynced: ipamblockInformer.Informer().HasSynced,
client: client, client: client,
kubesphereClient: kubesphereClient, kubesphereClient: kubesphereClient,
options: options,
provider: provider, provider: provider,
} }
...@@ -350,5 +400,11 @@ func NewIPPoolController( ...@@ -350,5 +400,11 @@ func NewIPPoolController(
DeleteFunc: c.ipamblockHandle, DeleteFunc: c.ipamblockHandle,
}) })
//register ippool webhook
webhooks.RegisterValidator(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindIPPool).String(),
&webhooks.ValidatorWrap{Obj: &networkv1alpha1.IPPool{}, Helper: c})
webhooks.RegisterDefaulter(podv1.SchemeGroupVersion.WithKind("Pod").String(),
&webhooks.DefaulterWrap{Obj: &podv1.Pod{}, Helper: provider})
return c return c
} }
...@@ -18,6 +18,9 @@ package ippool ...@@ -18,6 +18,9 @@ package ippool
import ( import (
"flag" "flag"
"testing"
"time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
...@@ -29,7 +32,6 @@ import ( ...@@ -29,7 +32,6 @@ import (
"kubesphere.io/kubesphere/pkg/controller/network/utils" "kubesphere.io/kubesphere/pkg/controller/network/utils"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool" "kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam" "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam"
"testing"
) )
func TestIPPoolSuit(t *testing.T) { func TestIPPoolSuit(t *testing.T) {
...@@ -49,31 +51,52 @@ var _ = Describe("test ippool", func() { ...@@ -49,31 +51,52 @@ var _ = Describe("test ippool", func() {
Name: "testippool", Name: "testippool",
}, },
Spec: v1alpha1.IPPoolSpec{ Spec: v1alpha1.IPPoolSpec{
Type: v1alpha1.VLAN, Type: v1alpha1.VLAN,
CIDR: "192.168.0.0/24", CIDR: "192.168.0.0/24",
BlockSize: 24,
}, },
Status: v1alpha1.IPPoolStatus{}, Status: v1alpha1.IPPoolStatus{},
} }
ksclient := ksfake.NewSimpleClientset() ksclient := ksfake.NewSimpleClientset()
k8sclinet := k8sfake.NewSimpleClientset() k8sclinet := k8sfake.NewSimpleClientset()
options := ippool.Options{} p := ippool.NewProvider(nil, ksclient, k8sclinet, v1alpha1.IPPoolTypeLocal, nil)
p := ippool.NewProvider(ksclient, options)
ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN) ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN)
ksInformer := ksinformers.NewSharedInformerFactory(ksclient, 0) ksInformer := ksinformers.NewSharedInformerFactory(ksclient, 0)
ippoolInformer := ksInformer.Network().V1alpha1().IPPools() ippoolInformer := ksInformer.Network().V1alpha1().IPPools()
ipamblockInformer := ksInformer.Network().V1alpha1().IPAMBlocks() ipamblockInformer := ksInformer.Network().V1alpha1().IPAMBlocks()
c := NewIPPoolController(ippoolInformer, ipamblockInformer, k8sclinet, ksclient, options, p) c := NewIPPoolController(ippoolInformer, ipamblockInformer, k8sclinet, ksclient, p)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
go ksInformer.Start(stopCh) go ksInformer.Start(stopCh)
go c.Start(stopCh) go c.Start(stopCh)
It("test create ippool", func() { It("test create ippool", func() {
_, err := ksclient.NetworkV1alpha1().IPPools().Create(pool) clone := pool.DeepCopy()
clone.Spec.CIDR = "testxxx"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.RangeStart = "192.168.0.100"
clone.Spec.RangeEnd = "192.168.0.99"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.RangeStart = "192.168.3.100"
clone.Spec.RangeEnd = "192.168.3.111"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.BlockSize = 23
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
_, err := ksclient.NetworkV1alpha1().IPPools().Create(clone)
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
Eventually(func() bool { Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{}) result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
if len(result.Labels) != 3 { if len(result.Labels) != 3 {
...@@ -85,7 +108,17 @@ var _ = Describe("test ippool", func() { ...@@ -85,7 +108,17 @@ var _ = Describe("test ippool", func() {
} }
return true return true
}).Should(Equal(true)) }, 3*time.Second).Should(Equal(true))
clone = pool.DeepCopy()
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
})
It("test update ippool", func() {
old, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
new := old.DeepCopy()
new.Spec.CIDR = "192.168.1.0/24"
Expect(c.ValidateUpdate(old, new)).Should(HaveOccurred())
}) })
It("test ippool stats", func() { It("test ippool stats", func() {
...@@ -102,10 +135,13 @@ var _ = Describe("test ippool", func() { ...@@ -102,10 +135,13 @@ var _ = Describe("test ippool", func() {
} }
return true return true
}).Should(Equal(true)) }, 3*time.Second).Should(Equal(true))
}) })
It("test delete pool", func() { It("test delete pool", func() {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
Expect(c.ValidateDelete(result)).Should(HaveOccurred())
ipamClient.ReleaseByHandle("testhandle") ipamClient.ReleaseByHandle("testhandle")
Eventually(func() bool { Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{}) result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
...@@ -114,7 +150,7 @@ var _ = Describe("test ippool", func() { ...@@ -114,7 +150,7 @@ var _ = Describe("test ippool", func() {
} }
return true return true
}).Should(Equal(true)) }, 3*time.Second).Should(Equal(true))
err := ksclient.NetworkV1alpha1().IPPools().Delete(pool.Name, &v1.DeleteOptions{}) err := ksclient.NetworkV1alpha1().IPPools().Delete(pool.Name, &v1.DeleteOptions{})
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
......
...@@ -18,7 +18,6 @@ package nsnetworkpolicy ...@@ -18,7 +18,6 @@ package nsnetworkpolicy
import ( import (
"fmt" "fmt"
"kubesphere.io/kubesphere/pkg/controller/network/types"
"net" "net"
"sort" "sort"
"strings" "strings"
...@@ -62,7 +61,7 @@ const ( ...@@ -62,7 +61,7 @@ const (
NodeNSNPAnnotationKey = "kubesphere.io/snat-node-ips" NodeNSNPAnnotationKey = "kubesphere.io/snat-node-ips"
AnnotationNPNAME = types.NSNPPrefix + "network-isolate" AnnotationNPNAME = v1alpha1.NSNPPrefix + "network-isolate"
//TODO: configure it //TODO: configure it
DNSLocalIP = "169.254.25.10" DNSLocalIP = "169.254.25.10"
...@@ -222,7 +221,7 @@ func (c *NSNetworkPolicyController) convertPeer(peer v1alpha1.NetworkPolicyPeer, ...@@ -222,7 +221,7 @@ func (c *NSNetworkPolicyController) convertPeer(peer v1alpha1.NetworkPolicyPeer,
func (c *NSNetworkPolicyController) convertToK8sNP(n *v1alpha1.NamespaceNetworkPolicy) (*netv1.NetworkPolicy, error) { func (c *NSNetworkPolicyController) convertToK8sNP(n *v1alpha1.NamespaceNetworkPolicy) (*netv1.NetworkPolicy, error) {
np := &netv1.NetworkPolicy{ np := &netv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: types.NSNPPrefix + n.Name, Name: v1alpha1.NSNPPrefix + n.Name,
Namespace: n.Namespace, Namespace: n.Namespace,
}, },
Spec: netv1.NetworkPolicySpec{ Spec: netv1.NetworkPolicySpec{
...@@ -564,7 +563,7 @@ func (c *NSNetworkPolicyController) syncNSNP(key string) error { ...@@ -564,7 +563,7 @@ func (c *NSNetworkPolicyController) syncNSNP(key string) error {
if err != nil { if err != nil {
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
klog.V(4).Infof("NSNP %v has been deleted", key) klog.V(4).Infof("NSNP %v has been deleted", key)
c.provider.Delete(c.provider.GetKey(types.NSNPPrefix+name, namespace)) c.provider.Delete(c.provider.GetKey(v1alpha1.NSNPPrefix+name, namespace))
return nil return nil
} }
......
...@@ -19,7 +19,6 @@ package provider ...@@ -19,7 +19,6 @@ package provider
import ( import (
"context" "context"
"fmt" "fmt"
"kubesphere.io/kubesphere/pkg/controller/network/types"
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
...@@ -36,6 +35,7 @@ import ( ...@@ -36,6 +35,7 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/klog" "k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
) )
const ( const (
...@@ -246,7 +246,7 @@ func NewNsNetworkPolicyProvider(client kubernetes.Interface, npInformer informer ...@@ -246,7 +246,7 @@ func NewNsNetworkPolicyProvider(client kubernetes.Interface, npInformer informer
// Filter in only objects that are written by policy controller. // Filter in only objects that are written by policy controller.
m := make(map[string]interface{}) m := make(map[string]interface{})
for _, policy := range policies { for _, policy := range policies {
if strings.HasPrefix(policy.Name, types.NSNPPrefix) { if strings.HasPrefix(policy.Name, v1alpha1.NSNPPrefix) {
policy.ObjectMeta = metav1.ObjectMeta{Name: policy.Name, Namespace: policy.Namespace} policy.ObjectMeta = metav1.ObjectMeta{Name: policy.Name, Namespace: policy.Namespace}
k := c.GetKey(policy.Name, policy.Namespace) k := c.GetKey(policy.Name, policy.Namespace)
m[k] = *policy m[k] = *policy
......
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nsnetworkpolicy
import (
"context"
corev1 "k8s.io/api/core/v1"
k8snet "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
"net"
"net/http"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
type NSNPValidator struct {
Client client.Client
decoder *admission.Decoder
}
func (v *NSNPValidator) Handle(ctx context.Context, req admission.Request) admission.Response {
nsnp := &networkv1alpha1.NamespaceNetworkPolicy{}
err := v.decoder.Decode(req, nsnp)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
allErrs := field.ErrorList{}
allErrs = append(allErrs, v.ValidateNSNPSpec(&nsnp.Spec, field.NewPath("spec"))...)
if len(allErrs) != 0 {
return admission.Denied(allErrs.ToAggregate().Error())
}
return admission.Allowed("")
}
func (v *NSNPValidator) InjectDecoder(d *admission.Decoder) error {
v.decoder = d
return nil
}
// ValidateNetworkPolicyPort validates a NetworkPolicyPort
func (v *NSNPValidator) ValidateNetworkPolicyPort(port *k8snet.NetworkPolicyPort, portPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if port.Protocol != nil && *port.Protocol != corev1.ProtocolTCP && *port.Protocol != corev1.ProtocolUDP && *port.Protocol != corev1.ProtocolSCTP {
allErrs = append(allErrs, field.NotSupported(portPath.Child("protocol"), *port.Protocol, []string{string(corev1.ProtocolTCP), string(corev1.ProtocolUDP), string(corev1.ProtocolSCTP)}))
}
if port.Port != nil {
if port.Port.Type == intstr.Int {
for _, msg := range validation.IsValidPortNum(int(port.Port.IntVal)) {
allErrs = append(allErrs, field.Invalid(portPath.Child("port"), port.Port.IntVal, msg))
}
} else {
for _, msg := range validation.IsValidPortName(port.Port.StrVal) {
allErrs = append(allErrs, field.Invalid(portPath.Child("port"), port.Port.StrVal, msg))
}
}
}
return allErrs
}
func (v *NSNPValidator) ValidateServiceSelector(serviceSelector *networkv1alpha1.ServiceSelector, fldPath *field.Path) field.ErrorList {
service := &corev1.Service{}
allErrs := field.ErrorList{}
err := v.Client.Get(context.TODO(), client.ObjectKey{Namespace: serviceSelector.Namespace, Name: serviceSelector.Name}, service)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, serviceSelector, "cannot get service"))
return allErrs
}
if len(service.Spec.Selector) <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath, serviceSelector, "service should have selector"))
}
return allErrs
}
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
func ValidateCIDR(cidr string) (*net.IPNet, error) {
_, net, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
return net, nil
}
// ValidateIPBlock validates a cidr and the except fields of an IpBlock NetworkPolicyPeer
func (v *NSNPValidator) ValidateIPBlock(ipb *k8snet.IPBlock, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(ipb.CIDR) == 0 || ipb.CIDR == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("cidr"), ""))
return allErrs
}
cidrIPNet, err := ValidateCIDR(ipb.CIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), ipb.CIDR, "not a valid CIDR"))
return allErrs
}
exceptCIDR := ipb.Except
for i, exceptIP := range exceptCIDR {
exceptPath := fldPath.Child("except").Index(i)
exceptCIDR, err := ValidateCIDR(exceptIP)
if err != nil {
allErrs = append(allErrs, field.Invalid(exceptPath, exceptIP, "not a valid CIDR"))
return allErrs
}
if !cidrIPNet.Contains(exceptCIDR.IP) {
allErrs = append(allErrs, field.Invalid(exceptPath, exceptCIDR.IP, "not within CIDR range"))
}
}
return allErrs
}
// ValidateNSNPPeer validates a NetworkPolicyPeer
func (v *NSNPValidator) ValidateNSNPPeer(peer *networkv1alpha1.NetworkPolicyPeer, peerPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
numPeers := 0
if peer.ServiceSelector != nil {
numPeers++
allErrs = append(allErrs, v.ValidateServiceSelector(peer.ServiceSelector, peerPath.Child("service"))...)
}
if peer.NamespaceSelector != nil {
numPeers++
}
if peer.IPBlock != nil {
numPeers++
allErrs = append(allErrs, v.ValidateIPBlock(peer.IPBlock, peerPath.Child("ipBlock"))...)
}
if numPeers == 0 {
allErrs = append(allErrs, field.Required(peerPath, "must specify a peer"))
} else if numPeers > 1 && peer.IPBlock != nil {
allErrs = append(allErrs, field.Forbidden(peerPath, "may not specify both ipBlock and another peer"))
}
return allErrs
}
func (v *NSNPValidator) ValidateNSNPSpec(spec *networkv1alpha1.NamespaceNetworkPolicySpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Validate ingress rules.
for i, ingress := range spec.Ingress {
ingressPath := fldPath.Child("ingress").Index(i)
for i, port := range ingress.Ports {
portPath := ingressPath.Child("ports").Index(i)
allErrs = append(allErrs, v.ValidateNetworkPolicyPort(&port, portPath)...)
}
for i, from := range ingress.From {
fromPath := ingressPath.Child("from").Index(i)
allErrs = append(allErrs, v.ValidateNSNPPeer(&from, fromPath)...)
}
}
// Validate egress rules
for i, egress := range spec.Egress {
egressPath := fldPath.Child("egress").Index(i)
for i, port := range egress.Ports {
portPath := egressPath.Child("ports").Index(i)
allErrs = append(allErrs, v.ValidateNetworkPolicyPort(&port, portPath)...)
}
for i, to := range egress.To {
toPath := egressPath.Child("to").Index(i)
allErrs = append(allErrs, v.ValidateNSNPPeer(&to, toPath)...)
}
}
return allErrs
}
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
const (
NSNPPrefix = "nsnp-"
)
package webhooks
import (
"context"
"encoding/json"
"net/http"
"sync"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// Defaulter defines functions for setting defaults on resources
type Defaulter interface {
Default(obj runtime.Object) error
}
type DefaulterWrap struct {
Obj runtime.Object
Helper Defaulter
}
type MutatingHandler struct {
C client.Client
decoder *admission.Decoder
}
var _ admission.DecoderInjector = &MutatingHandler{}
// InjectDecoder injects the decoder into a MutatingHandler.
func (h *MutatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
}
type defaulters struct {
ds map[string]*DefaulterWrap
lock sync.RWMutex
}
var (
ds defaulters
)
func init() {
ds = defaulters{
ds: make(map[string]*DefaulterWrap),
lock: sync.RWMutex{},
}
}
func RegisterDefaulter(name string, d *DefaulterWrap) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.ds[name] = d
}
func UnRegisterDefaulter(name string) {
ds.lock.Lock()
defer ds.lock.Unlock()
delete(ds.ds, name)
}
func GetDefaulter(name string) *DefaulterWrap {
ds.lock.Lock()
defer ds.lock.Unlock()
return ds.ds[name]
}
// Handle handles admission requests.
func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
defaulter := GetDefaulter(req.Kind.String())
if defaulter == nil {
return admission.Denied("crd has webhook configured, but the controller does not register the corresponding processing logic and refuses the operation by default.")
}
// Get the object in the request
obj := defaulter.Obj.DeepCopyObject()
err := h.decoder.Decode(req, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
// Default the object
defaulter.Helper.Default(obj)
marshalled, err := json.Marshal(obj)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
// Create the patch
return admission.PatchResponseFromRaw(req.Object.Raw, marshalled)
}
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhooks
import (
"context"
"net/http"
"sync"
"k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// Validator defines functions for validating an operation
type Validator interface {
ValidateCreate(obj runtime.Object) error
ValidateUpdate(old runtime.Object, new runtime.Object) error
ValidateDelete(obj runtime.Object) error
}
type ValidatorWrap struct {
Obj runtime.Object
Helper Validator
}
type validators struct {
vs map[string]*ValidatorWrap
lock sync.RWMutex
}
var (
vs validators
)
func init() {
vs = validators{
vs: make(map[string]*ValidatorWrap),
lock: sync.RWMutex{},
}
}
func RegisterValidator(name string, v *ValidatorWrap) {
vs.lock.Lock()
defer vs.lock.Unlock()
vs.vs[name] = v
}
func UnRegisterValidator(name string) {
vs.lock.Lock()
defer vs.lock.Unlock()
delete(vs.vs, name)
}
func GetValidator(name string) *ValidatorWrap {
vs.lock.Lock()
defer vs.lock.Unlock()
return vs.vs[name]
}
type ValidatingHandler struct {
C client.Client
decoder *admission.Decoder
}
var _ admission.DecoderInjector = &ValidatingHandler{}
// InjectDecoder injects the decoder into a ValidatingHandler.
func (h *ValidatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
}
// Handle handles admission requests.
func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
validator := GetValidator(req.Kind.String())
if validator == nil {
return admission.Denied("crd has webhook configured, but the controller does not register the corresponding processing logic and refuses the operation by default.")
}
// Get the object in the request
obj := validator.Obj.DeepCopyObject()
if req.Operation == v1beta1.Create {
err := h.decoder.Decode(req, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateCreate(obj)
if err != nil {
return admission.Denied(err.Error())
}
}
if req.Operation == v1beta1.Update {
oldObj := obj.DeepCopyObject()
err := h.decoder.DecodeRaw(req.Object, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = h.decoder.DecodeRaw(req.OldObject, oldObj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateUpdate(oldObj, obj)
if err != nil {
return admission.Denied(err.Error())
}
}
if req.Operation == v1beta1.Delete {
// In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346
// OldObject contains the object being deleted
err := h.decoder.DecodeRaw(req.OldObject, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateDelete(obj)
if err != nil {
return admission.Denied(err.Error())
}
}
return admission.Allowed("")
}
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ippool
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
k8sinformers "k8s.io/client-go/informers"
"kubesphere.io/kubesphere/pkg/api"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/query"
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
)
type ippoolGetter struct {
informers informers.SharedInformerFactory
k8sInformers k8sinformers.SharedInformerFactory
}
func New(informers informers.SharedInformerFactory, k8sInformers k8sinformers.SharedInformerFactory) v1alpha3.Interface {
return &ippoolGetter{
informers: informers,
k8sInformers: k8sInformers,
}
}
func (n ippoolGetter) Get(namespace, name string) (runtime.Object, error) {
return n.informers.Network().V1alpha1().IPPools().Lister().Get(name)
}
func (n ippoolGetter) List(namespace string, query *query.Query) (*api.ListResult, error) {
var result []runtime.Object
if namespace != "" {
workspace := ""
ns, err := n.k8sInformers.Core().V1().Namespaces().Lister().Get(namespace)
if err != nil {
return nil, err
}
if ns.Labels != nil {
workspace = ns.Labels[constants.WorkspaceLabelKey]
}
ps, err := n.informers.Network().V1alpha1().IPPools().Lister().List(labels.SelectorFromSet(
map[string]string{
networkv1alpha1.IPPoolDefaultLabel: "",
}))
if err != nil {
return nil, err
}
for _, p := range ps {
result = append(result, p)
}
if workspace != "" {
query.LabelSelector = labels.SelectorFromSet(
map[string]string{
constants.WorkspaceLabelKey: workspace,
}).String()
ps, err := n.informers.Network().V1alpha1().IPPools().Lister().List(query.Selector())
if err != nil {
return nil, err
}
for _, p := range ps {
result = append(result, p)
}
}
} else {
ps, err := n.informers.Network().V1alpha1().IPPools().Lister().List(labels.Everything())
if err != nil {
return nil, err
}
for _, p := range ps {
result = append(result, p)
}
}
return v1alpha3.DefaultList(result, query, n.compare, n.filter), nil
}
func (n ippoolGetter) filter(item runtime.Object, filter query.Filter) bool {
p, ok := item.(*networkv1alpha1.IPPool)
if !ok {
return false
}
return v1alpha3.DefaultObjectMetaFilter(p.ObjectMeta, filter)
}
func (n ippoolGetter) compare(left runtime.Object, right runtime.Object, field query.Field) bool {
leftP, ok := left.(*networkv1alpha1.IPPool)
if !ok {
return false
}
rightP, ok := right.(*networkv1alpha1.IPPool)
if !ok {
return true
}
return v1alpha3.DefaultObjectMetaCompare(leftP.ObjectMeta, rightP.ObjectMeta, field)
}
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ippool
import (
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
"kubesphere.io/kubesphere/pkg/api"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/query"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
"testing"
)
func TestListIPPools(t *testing.T) {
tests := []struct {
description string
namespace string
query *query.Query
expected *api.ListResult
expectedErr error
}{
{
"test name filter",
"",
&query.Query{
Pagination: &query.Pagination{
Limit: 10,
Offset: 0,
},
SortBy: query.FieldName,
Ascending: false,
Filters: map[query.Field]query.Value{
query.FieldName: query.Value("foo2"),
},
},
&api.ListResult{
Items: []interface{}{foo2},
TotalItems: 1,
},
nil,
},
{
"test namespace filter",
"ns1",
&query.Query{
Pagination: &query.Pagination{
Limit: 10,
Offset: 0,
},
SortBy: query.FieldName,
Ascending: false,
},
&api.ListResult{
Items: []interface{}{foo1},
TotalItems: 1,
},
nil,
},
}
getter := prepare()
for _, test := range tests {
got, err := getter.List(test.namespace, test.query)
if test.expectedErr != nil && err != test.expectedErr {
t.Errorf("expected error, got nothing")
} else if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, test.expected); diff != "" {
t.Errorf("%T differ (-got, +want): %s", test.expected, diff)
}
}
}
var (
foo1 = &v1alpha1.IPPool{
ObjectMeta: metav1.ObjectMeta{
Name: "foo1",
Labels: map[string]string{
constants.WorkspaceLabelKey: "wk1",
},
},
}
foo2 = &v1alpha1.IPPool{
ObjectMeta: metav1.ObjectMeta{
Name: "foo2",
},
}
foo3 = &v1alpha1.IPPool{
ObjectMeta: metav1.ObjectMeta{
Name: "foo3",
},
}
ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns1",
Labels: map[string]string{
constants.WorkspaceLabelKey: "wk1",
},
},
}
wk = &tenantv1alpha1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: "wk1",
},
}
ps = []interface{}{foo1, foo2, foo3}
)
func prepare() v1alpha3.Interface {
client := fake.NewSimpleClientset()
k8sClient := k8sfake.NewSimpleClientset()
informer := informers.NewSharedInformerFactory(client, 0)
k8sInformer := k8sinformers.NewSharedInformerFactory(k8sClient, 0)
for _, p := range ps {
informer.Network().V1alpha1().IPPools().Informer().GetIndexer().Add(p)
}
informer.Tenant().V1alpha1().Workspaces().Informer().GetIndexer().Add(wk)
k8sInformer.Core().V1().Namespaces().Informer().GetIndexer().Add(ns)
return New(informer, k8sInformer)
}
...@@ -26,6 +26,7 @@ import ( ...@@ -26,6 +26,7 @@ import (
"kubesphere.io/kubesphere/pkg/api" "kubesphere.io/kubesphere/pkg/api"
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3" devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1" typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1"
...@@ -55,6 +56,7 @@ import ( ...@@ -55,6 +56,7 @@ import (
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/group" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/group"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/groupbinding" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/groupbinding"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/ingress" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/ingress"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/ippool"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/job" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/job"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/loginrecord" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/loginrecord"
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/namespace" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/namespace"
...@@ -99,6 +101,7 @@ func NewResourceGetter(factory informers.InformerFactory) *ResourceGetter { ...@@ -99,6 +101,7 @@ func NewResourceGetter(factory informers.InformerFactory) *ResourceGetter {
// kubesphere resources // kubesphere resources
getters[devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject)] = devops.New(factory.KubeSphereSharedInformerFactory()) getters[devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject)] = devops.New(factory.KubeSphereSharedInformerFactory())
getters[tenantv1alpha1.SchemeGroupVersion.WithResource(tenantv1alpha1.ResourcePluralWorkspace)] = workspace.New(factory.KubeSphereSharedInformerFactory()) getters[tenantv1alpha1.SchemeGroupVersion.WithResource(tenantv1alpha1.ResourcePluralWorkspace)] = workspace.New(factory.KubeSphereSharedInformerFactory())
getters[networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralIPPool)] = ippool.New(factory.KubeSphereSharedInformerFactory(), factory.KubernetesSharedInformerFactory())
getters[tenantv1alpha1.SchemeGroupVersion.WithResource(tenantv1alpha2.ResourcePluralWorkspaceTemplate)] = workspacetemplate.New(factory.KubeSphereSharedInformerFactory()) getters[tenantv1alpha1.SchemeGroupVersion.WithResource(tenantv1alpha2.ResourcePluralWorkspaceTemplate)] = workspacetemplate.New(factory.KubeSphereSharedInformerFactory())
getters[iamv1alpha2.SchemeGroupVersion.WithResource(iamv1alpha2.ResourcesPluralGlobalRole)] = globalrole.New(factory.KubeSphereSharedInformerFactory()) getters[iamv1alpha2.SchemeGroupVersion.WithResource(iamv1alpha2.ResourcesPluralGlobalRole)] = globalrole.New(factory.KubeSphereSharedInformerFactory())
getters[iamv1alpha2.SchemeGroupVersion.WithResource(iamv1alpha2.ResourcesPluralWorkspaceRole)] = workspacerole.New(factory.KubeSphereSharedInformerFactory()) getters[iamv1alpha2.SchemeGroupVersion.WithResource(iamv1alpha2.ResourcesPluralWorkspaceRole)] = workspacerole.New(factory.KubeSphereSharedInformerFactory())
......
...@@ -17,31 +17,64 @@ limitations under the License. ...@@ -17,31 +17,64 @@ limitations under the License.
package calico package calico
import ( import (
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net"
"time"
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3" v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend/model" "github.com/projectcalico/libcalico-go/lib/backend/model"
cnet "github.com/projectcalico/libcalico-go/lib/net" cnet "github.com/projectcalico/libcalico-go/lib/net"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informercorev1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/klog" "k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/network/calicov3" "kubesphere.io/kubesphere/pkg/apis/network/calicov3"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned" kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/k8s"
calicoset "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/clientset/versioned" calicoset "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/clientset/versioned"
calicoInformer "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/informers/externalversions"
blockInformer "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/informers/externalversions/network/calicov3"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
) )
const ( const (
CalicoNamespaceAnnotationIPPoolV4 = "cni.projectcalico.org/ipv4pools" CalicoAnnotationIPPoolV4 = "cni.projectcalico.org/ipv4pools"
CalicoNamespaceAnnotationIPPoolV6 = "cni.projectcalico.org/ipv6pools" CalicoAnnotationIPPoolV6 = "cni.projectcalico.org/ipv6pools"
CalicoPodAnnotationIPAddr = "cni.projectcalico.org/ipAddrs" CalicoPodAnnotationIPAddr = "cni.projectcalico.org/ipAddrs"
CalicoPodAnnotationPodIP = "cni.projectcalico.org/podIP"
// Common attributes which may be set on allocations by clients.
IPAMBlockAttributePod = "pod"
IPAMBlockAttributeNamespace = "namespace"
IPAMBlockAttributeNode = "node"
IPAMBlockAttributeType = "type"
IPAMBlockAttributeTypeIPIP = "ipipTunnelAddress"
IPAMBlockAttributeTypeVXLAN = "vxlanTunnelAddress"
CALICO_IPV4POOL_IPIP = "CALICO_IPV4POOL_IPIP"
CALICO_IPV4POOL_VXLAN = "CALICO_IPV4POOL_VXLAN"
CALICO_IPV4POOL_NAT_OUTGOING = "CALICO_IPV4POOL_NAT_OUTGOING"
CalicoNodeDaemonset = "calico-node"
CalicoNodeNamespace = "kube-system"
DefaultBlockSize = 25
// default re-sync period for all informer factories
defaultResync = 600 * time.Second
) )
var ( var (
...@@ -49,9 +82,15 @@ var ( ...@@ -49,9 +82,15 @@ var (
) )
type provider struct { type provider struct {
client calicoset.Interface client calicoset.Interface
ksclient kubesphereclient.Interface ksclient kubesphereclient.Interface
options Options k8sclient clientset.Interface
pods informercorev1.PodInformer
block blockInformer.IPAMBlockInformer
queue workqueue.RateLimitingInterface
poolQueue workqueue.RateLimitingInterface
options Options
} }
func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error { func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
...@@ -70,6 +109,12 @@ func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error { ...@@ -70,6 +109,12 @@ func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
}, },
} }
_, cidr, _ := net.ParseCIDR(pool.Spec.CIDR)
size, _ := cidr.Mask.Size()
if size > DefaultBlockSize {
calicoPool.Spec.BlockSize = size
}
err := controllerutil.SetControllerReference(pool, calicoPool, scheme.Scheme) err := controllerutil.SetControllerReference(pool, calicoPool, scheme.Scheme)
if err != nil { if err != nil {
klog.Warningf("cannot set reference for calico ippool %s, err=%v", pool.Name, err) klog.Warningf("cannot set reference for calico ippool %s, err=%v", pool.Name, err)
...@@ -88,7 +133,7 @@ func (c provider) UpdateIPPool(pool *v1alpha1.IPPool) error { ...@@ -88,7 +133,7 @@ func (c provider) UpdateIPPool(pool *v1alpha1.IPPool) error {
} }
func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error) { func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error) {
stats := &v1alpha1.IPPool{} stats := pool.DeepCopy()
calicoPool, err := c.client.CrdCalicov3().IPPools().Get(pool.Name, v1.GetOptions{}) calicoPool, err := c.client.CrdCalicov3().IPPools().Get(pool.Name, v1.GetOptions{})
if err != nil { if err != nil {
...@@ -100,24 +145,46 @@ func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error ...@@ -100,24 +145,46 @@ func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error
return nil, err return nil, err
} }
stats.Status.Capacity = pool.NumAddresses() if stats.Status.Capacity == 0 {
stats.Status.Reserved = 0 stats.Status.Capacity = pool.NumAddresses()
stats.Status.Unallocated = 0 }
stats.Status.Synced = true stats.Status.Synced = true
stats.Status.Allocations = 0 stats.Status.Allocations = 0
stats.Status.Reserved = 0
if stats.Status.Workspaces == nil {
stats.Status.Workspaces = make(map[string]v1alpha1.WorkspaceStatus)
}
if len(blocks) <= 0 { if len(blocks) <= 0 {
stats.Status.Unallocated = pool.NumAddresses() stats.Status.Unallocated = pool.NumAddresses()
stats.Status.Allocations = 0 stats.Status.Allocations = 0
return stats, nil } else {
for _, block := range blocks {
stats.Status.Allocations += block.NumAddresses() - block.NumFreeAddresses() - block.NumReservedAddresses()
stats.Status.Reserved += block.NumReservedAddresses()
}
stats.Status.Unallocated = stats.Status.Capacity - stats.Status.Allocations - stats.Status.Reserved
} }
for _, block := range blocks { wks, err := c.getAssociatedWorkspaces(pool)
stats.Status.Allocations += block.NumAddresses() - block.NumFreeAddresses() - block.NumReservedAddresses() if err != nil {
stats.Status.Reserved += block.NumReservedAddresses() return nil, err
}
for _, wk := range wks {
status, err := c.getWorkspaceStatus(wk, pool.GetName())
if err != nil {
return nil, err
}
stats.Status.Workspaces[wk] = *status
} }
stats.Status.Unallocated = stats.Status.Capacity - stats.Status.Allocations - stats.Status.Reserved for name, wk := range stats.Status.Workspaces {
if wk.Allocations == 0 {
delete(stats.Status.Workspaces, name)
}
}
return stats, nil return stats, nil
} }
...@@ -240,6 +307,9 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) { ...@@ -240,6 +307,9 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
// Get the pool so that we can find the CIDR associated with it. // Get the pool so that we can find the CIDR associated with it.
calicoPool, err := c.client.CrdCalicov3().IPPools().Get(pool.Name, v1.GetOptions{}) calicoPool, err := c.client.CrdCalicov3().IPPools().Get(pool.Name, v1.GetOptions{})
if err != nil { if err != nil {
if k8serrors.IsNotFound(err) {
return true, nil
}
return false, err return false, err
} }
...@@ -318,6 +388,9 @@ func (c provider) syncIPPools() error { ...@@ -318,6 +388,9 @@ func (c provider) syncIPPools() error {
TypeMeta: v1.TypeMeta{}, TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: calicoPool.Name, Name: calicoPool.Name,
Labels: map[string]string{
v1alpha1.IPPoolDefaultLabel: "",
},
}, },
Spec: v1alpha1.IPPoolSpec{ Spec: v1alpha1.IPPoolSpec{
Type: v1alpha1.Calico, Type: v1alpha1.Calico,
...@@ -339,57 +412,233 @@ func (c provider) syncIPPools() error { ...@@ -339,57 +412,233 @@ func (c provider) syncIPPools() error {
return nil return nil
} }
func (c provider) SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error { func (p provider) getAssociatedWorkspaces(pool *v1alpha1.IPPool) ([]string, error) {
blockWatch, err := c.client.CrdCalicov3().IPAMBlocks().Watch(v1.ListOptions{}) var result []string
poolLabel := constants.WorkspaceLabelKey
if pool.GetLabels() == nil || pool.GetLabels()[poolLabel] == "" {
wks, err := p.ksclient.TenantV1alpha1().Workspaces().List(v1.ListOptions{})
if err != nil {
return nil, err
}
for _, wk := range wks.Items {
result = append(result, wk.GetName())
}
return result, nil
}
return append(result, pool.GetLabels()[poolLabel]), nil
}
func (p provider) getWorkspaceStatus(name string, poolName string) (*v1alpha1.WorkspaceStatus, error) {
var result v1alpha1.WorkspaceStatus
namespaces, err := p.k8sclient.CoreV1().Namespaces().List(v1.ListOptions{
LabelSelector: labels.SelectorFromSet(
map[string]string{
constants.WorkspaceLabelKey: name,
},
).String(),
})
if err != nil { if err != nil {
return err return nil, err
} }
ch := blockWatch.ResultChan() for _, ns := range namespaces.Items {
defer blockWatch.Stop() pods, err := p.k8sclient.CoreV1().Pods(ns.GetName()).List(v1.ListOptions{})
if err != nil {
return nil, err
}
for _, pod := range pods.Items {
if pod.GetLabels() != nil && pod.GetLabels()[v1alpha1.IPPoolNameLabel] == poolName {
result.Allocations++
}
}
}
return &result, nil
}
func (p provider) Type() string {
return v1alpha1.IPPoolTypeCalico
}
func (p provider) SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error {
defer utilruntime.HandleCrash()
defer p.queue.ShutDown()
klog.Info("starting calico block controller")
defer klog.Info("shutting down calico block controller")
p.poolQueue = q
go p.block.Informer().Run(stopCh)
if !cache.WaitForCacheSync(stopCh, p.pods.Informer().HasSynced, p.block.Informer().HasSynced) {
klog.Fatal("failed to wait for caches to sync")
}
for i := 0; i < 5; i++ {
go wait.Until(p.runWorker, time.Second, stopCh)
}
for { <-stopCh
select { return nil
case <-stopCh: }
func (p provider) processBlock(name string) error {
block, err := p.block.Lister().Get(name)
if err != nil {
if k8serrors.IsNotFound(err) {
return nil return nil
case event, ok := <-ch: }
if !ok { return err
// End of results. }
return fmt.Errorf("calico ipamblock watch closed") _, blockCIDR, _ := cnet.ParseCIDR(block.Spec.CIDR)
}
if event.Type == watch.Added || event.Type == watch.Deleted || event.Type == watch.Modified { poolName := block.Labels[v1alpha1.IPPoolNameLabel]
block := event.Object.(*calicov3.IPAMBlock) if poolName == "" {
_, blockCIDR, _ := cnet.ParseCIDR(block.Spec.CIDR) pools, err := p.ksclient.NetworkV1alpha1().IPPools().List(v1.ListOptions{})
if err != nil {
return err
}
if block.Labels[v1alpha1.IPPoolNameLabel] != "" { for _, pool := range pools.Items {
q.Add(block.Labels[v1alpha1.IPPoolNameLabel]) _, poolCIDR, _ := cnet.ParseCIDR(pool.Spec.CIDR)
continue if poolCIDR.IsNetOverlap(blockCIDR.IPNet) {
} poolName = pool.Name
pools, err := c.ksclient.NetworkV1alpha1().IPPools().List(v1.ListOptions{}) block.Labels = map[string]string{
if err != nil { v1alpha1.IPPoolNameLabel: pool.Name,
continue
} }
p.client.CrdCalicov3().IPAMBlocks().Update(block)
break
}
}
}
for _, pool := range pools.Items { for _, podAttr := range block.Spec.Attributes {
_, poolCIDR, _ := cnet.ParseCIDR(pool.Spec.CIDR) name := podAttr.AttrSecondary[IPAMBlockAttributePod]
if poolCIDR.IsNetOverlap(blockCIDR.IPNet) { namespace := podAttr.AttrSecondary[IPAMBlockAttributeNamespace]
q.Add(pool.Name)
if name == "" || namespace == "" {
block.Labels = map[string]string{ continue
v1alpha1.IPPoolNameLabel: pool.Name, }
}
c.client.CrdCalicov3().IPAMBlocks().Update(block) pod, err := p.pods.Lister().Pods(namespace).Get(name)
break if err != nil {
} continue
}
labels := pod.GetLabels()
if labels != nil {
poolLabel := labels[v1alpha1.IPPoolNameLabel]
if poolLabel != "" {
continue
}
}
retry.RetryOnConflict(retry.DefaultBackoff, func() error {
pod, err = p.k8sclient.CoreV1().Pods(namespace).Get(name, v1.GetOptions{})
if err != nil {
return err
}
labels := pod.GetLabels()
if labels != nil {
poolLabel := labels[v1alpha1.IPPoolNameLabel]
if poolLabel != "" {
return nil
} }
} else {
pod.Labels = make(map[string]string)
}
if pod.GetAnnotations() == nil {
pod.Annotations = make(map[string]string)
} }
annostrs, _ := json.Marshal([]string{poolName})
pod.GetAnnotations()[CalicoAnnotationIPPoolV4] = string(annostrs)
pod.Labels[v1alpha1.IPPoolNameLabel] = poolName
_, err = p.k8sclient.CoreV1().Pods(namespace).Update(pod)
return err
})
}
p.poolQueue.Add(poolName)
return nil
}
func (p provider) processBlockItem() bool {
key, quit := p.queue.Get()
if quit {
return false
}
defer p.queue.Done(key)
err := p.processBlock(key.(string))
if err == nil {
p.queue.Forget(key)
return true
}
utilruntime.HandleError(fmt.Errorf("error processing calico block %v (will retry): %v", key, err))
p.queue.AddRateLimited(key)
return true
}
func (p provider) runWorker() {
for p.processBlockItem() {
}
}
func (p provider) addBlock(obj interface{}) {
block, ok := obj.(*calicov3.IPAMBlock)
if !ok {
return
}
p.queue.Add(block.Name)
}
func (p provider) Default(obj runtime.Object) error {
pod, ok := obj.(*corev1.Pod)
if !ok {
return nil
}
annos := pod.GetAnnotations()
if annos == nil {
pod.Annotations = make(map[string]string)
}
if annos[CalicoAnnotationIPPoolV4] == "" {
pools, err := p.ksclient.NetworkV1alpha1().IPPools().List(v1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
v1alpha1.IPPoolDefaultLabel: "",
}).String(),
})
if err != nil {
return err
}
var poolNames []string
for _, pool := range pools.Items {
poolNames = append(poolNames, pool.Name)
}
if len(poolNames) > 0 {
annostrs, _ := json.Marshal(poolNames)
pod.Annotations[CalicoAnnotationIPPoolV4] = string(annostrs)
} }
} }
return nil
} }
func NewProvider(ksclient kubesphereclient.Interface, options Options, k8sOptions *k8s.KubernetesOptions) provider { func NewProvider(podInformer informercorev1.PodInformer, ksclient kubesphereclient.Interface, k8sClient clientset.Interface, k8sOptions *k8s.KubernetesOptions) provider {
config, err := clientcmd.BuildConfigFromFlags("", k8sOptions.KubeConfig) config, err := clientcmd.BuildConfigFromFlags("", k8sOptions.KubeConfig)
if err != nil { if err != nil {
klog.Fatalf("failed to build k8s config , err=%v", err) klog.Fatalf("failed to build k8s config , err=%v", err)
...@@ -401,11 +650,49 @@ func NewProvider(ksclient kubesphereclient.Interface, options Options, k8sOption ...@@ -401,11 +650,49 @@ func NewProvider(ksclient kubesphereclient.Interface, options Options, k8sOption
klog.Fatalf("failed to new calico client , err=%v", err) klog.Fatalf("failed to new calico client , err=%v", err)
} }
p := provider{ ds, err := k8sClient.AppsV1().DaemonSets(CalicoNodeNamespace).Get(CalicoNodeDaemonset, v1.GetOptions{})
client: client, if err != nil {
ksclient: ksclient, klog.Fatalf("failed to get calico-node deployment in kube-system, err=%v", err)
options: options,
} }
opts := Options{
IPIPMode: "Always",
VXLANMode: "Never",
NATOutgoing: true,
}
envs := ds.Spec.Template.Spec.Containers[0].Env
for _, env := range envs {
if env.Name == CALICO_IPV4POOL_IPIP {
opts.IPIPMode = env.Value
}
if env.Name == CALICO_IPV4POOL_VXLAN {
opts.VXLANMode = env.Value
}
if env.Name == CALICO_IPV4POOL_NAT_OUTGOING {
if env.Value != "true" {
opts.NATOutgoing = false
}
}
}
p := provider{
client: client,
ksclient: ksclient,
k8sclient: k8sClient,
pods: podInformer,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "calicoBlock"),
options: opts,
}
blockI := calicoInformer.NewSharedInformerFactory(client, defaultResync).Crd().Calicov3().IPAMBlocks()
blockI.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: p.addBlock,
UpdateFunc: func(old, new interface{}) {
p.addBlock(new)
},
})
p.block = blockI
if err := p.syncIPPools(); err != nil { if err := p.syncIPPools(); err != nil {
klog.Fatalf("failed to sync calico ippool to kubesphere ippool, err=%v", err) klog.Fatalf("failed to sync calico ippool to kubesphere ippool, err=%v", err)
......
...@@ -18,14 +18,17 @@ package calico ...@@ -18,14 +18,17 @@ package calico
import ( import (
"flag" "flag"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
"testing"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog" "k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake" ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
"kubesphere.io/kubesphere/pkg/constants"
calicofake "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/clientset/versioned/fake" calicofake "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico/client/clientset/versioned/fake"
"testing"
) )
func TestCalicoIPPoolSuit(t *testing.T) { func TestCalicoIPPoolSuit(t *testing.T) {
...@@ -43,6 +46,9 @@ var _ = Describe("test calico ippool", func() { ...@@ -43,6 +46,9 @@ var _ = Describe("test calico ippool", func() {
TypeMeta: v1.TypeMeta{}, TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "testippool", Name: "testippool",
Labels: map[string]string{
constants.WorkspaceLabelKey: "wk1",
},
}, },
Spec: v1alpha1.IPPoolSpec{ Spec: v1alpha1.IPPoolSpec{
Type: v1alpha1.Calico, Type: v1alpha1.Calico,
...@@ -52,7 +58,23 @@ var _ = Describe("test calico ippool", func() { ...@@ -52,7 +58,23 @@ var _ = Describe("test calico ippool", func() {
Status: v1alpha1.IPPoolStatus{}, Status: v1alpha1.IPPoolStatus{},
} }
ksclient := ksfake.NewSimpleClientset(pool) wk1 := &tenantv1alpha1.Workspace{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "wk1",
},
Spec: tenantv1alpha1.WorkspaceSpec{},
Status: tenantv1alpha1.WorkspaceStatus{},
}
wk2 := &tenantv1alpha1.Workspace{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "wk2",
},
Spec: tenantv1alpha1.WorkspaceSpec{},
Status: tenantv1alpha1.WorkspaceStatus{},
}
ksclient := ksfake.NewSimpleClientset(pool, wk1, wk2)
client := calicofake.NewSimpleClientset() client := calicofake.NewSimpleClientset()
p := provider{ p := provider{
...@@ -69,4 +91,15 @@ var _ = Describe("test calico ippool", func() { ...@@ -69,4 +91,15 @@ var _ = Describe("test calico ippool", func() {
err := p.CreateIPPool(pool) err := p.CreateIPPool(pool)
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
}) })
It("test get workspace", func() {
result, err := p.getAssociatedWorkspaces(pool)
Expect(err).ShouldNot(HaveOccurred())
Expect(len(result)).Should(Equal(1))
pool.Labels = nil
result, err = p.getAssociatedWorkspaces(pool)
Expect(err).ShouldNot(HaveOccurred())
Expect(len(result)).Should(Equal(2))
})
}) })
...@@ -17,9 +17,14 @@ limitations under the License. ...@@ -17,9 +17,14 @@ limitations under the License.
package ippool package ippool
import ( import (
"k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned" kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
calicoclient "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/calico"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam" "kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam"
) )
...@@ -30,6 +35,8 @@ type Provider interface { ...@@ -30,6 +35,8 @@ type Provider interface {
UpdateIPPool(pool *networkv1alpha1.IPPool) error UpdateIPPool(pool *networkv1alpha1.IPPool) error
GetIPPoolStats(pool *networkv1alpha1.IPPool) (*networkv1alpha1.IPPool, error) GetIPPoolStats(pool *networkv1alpha1.IPPool) (*networkv1alpha1.IPPool, error)
SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error
Type() string
Default(obj runtime.Object) error
} }
type provider struct { type provider struct {
...@@ -37,6 +44,14 @@ type provider struct { ...@@ -37,6 +44,14 @@ type provider struct {
ipamclient ipam.IPAMClient ipamclient ipam.IPAMClient
} }
func (p provider) Type() string {
return networkv1alpha1.IPPoolTypeLocal
}
func (p provider) Default(obj runtime.Object) error {
return nil
}
func (p provider) DeleteIPPool(pool *networkv1alpha1.IPPool) (bool, error) { func (p provider) DeleteIPPool(pool *networkv1alpha1.IPPool) (bool, error) {
blocks, err := p.ipamclient.ListBlocks(pool.Name) blocks, err := p.ipamclient.ListBlocks(pool.Name)
if err != nil { if err != nil {
...@@ -77,22 +92,36 @@ func (p provider) GetIPPoolStats(pool *networkv1alpha1.IPPool) (*networkv1alpha1 ...@@ -77,22 +92,36 @@ func (p provider) GetIPPoolStats(pool *networkv1alpha1.IPPool) (*networkv1alpha1
} }
stat := stats[0] stat := stats[0]
return &networkv1alpha1.IPPool{ clone := pool.DeepCopy()
Status: networkv1alpha1.IPPoolStatus{ clone.Status = networkv1alpha1.IPPoolStatus{
Allocations: stat.Allocate, Allocations: stat.Allocate,
Unallocated: stat.Unallocated, Unallocated: stat.Unallocated,
Reserved: stat.Reserved, Reserved: stat.Reserved,
Capacity: stat.Capacity, Capacity: stat.Capacity,
Synced: true, Synced: true,
}, }
}, nil return clone, nil
} }
func NewProvider(clientset kubesphereclient.Interface, options Options) provider { func newProvider(clientset kubesphereclient.Interface) provider {
vlanProvider := provider{ return provider{
kubesphereClient: clientset, kubesphereClient: clientset,
ipamclient: ipam.NewIPAMClient(clientset, networkv1alpha1.VLAN), ipamclient: ipam.NewIPAMClient(clientset, networkv1alpha1.VLAN),
} }
}
func NewProvider(podInformer v1.PodInformer, clientset kubesphereclient.Interface, client clientset.Interface, pt string, k8sOptions *k8s.KubernetesOptions) Provider {
var p Provider
switch pt {
case networkv1alpha1.IPPoolTypeLocal:
p = provider{
kubesphereClient: clientset,
ipamclient: ipam.NewIPAMClient(clientset, networkv1alpha1.VLAN),
}
case networkv1alpha1.IPPoolTypeCalico:
p = calicoclient.NewProvider(podInformer, clientset, client, k8sOptions)
}
return vlanProvider return p
} }
...@@ -26,7 +26,7 @@ import ( ...@@ -26,7 +26,7 @@ import (
) )
func testNewProvider() provider { func testNewProvider() provider {
return NewProvider(fakeks.NewSimpleClientset(), Options{}) return newProvider(fakeks.NewSimpleClientset())
} }
func TestProvider_GetIPPoolStats(t *testing.T) { func TestProvider_GetIPPoolStats(t *testing.T) {
......
...@@ -18,8 +18,6 @@ package network ...@@ -18,8 +18,6 @@ package network
import ( import (
"github.com/spf13/pflag" "github.com/spf13/pflag"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
) )
type NSNPOptions struct { type NSNPOptions struct {
...@@ -27,24 +25,20 @@ type NSNPOptions struct { ...@@ -27,24 +25,20 @@ type NSNPOptions struct {
} }
type Options struct { type Options struct {
EnableNetworkPolicy bool `json:"enableNetworkPolicy,omitempty" yaml:"enableNetworkPolicy"` EnableNetworkPolicy bool `json:"enableNetworkPolicy,omitempty" yaml:"enableNetworkPolicy"`
NSNPOptions NSNPOptions `json:"nsnpOptions,omitempty" yaml:"nsnpOptions,omitempty"` NSNPOptions NSNPOptions `json:"nsnpOptions,omitempty" yaml:"nsnpOptions,omitempty"`
EnableIPPool bool `json:"enableIPPool,omitempty" yaml:"enableIPPool"` WeaveScopeHost string `json:"weaveScopeHost,omitempty" yaml:"weaveScopeHost,omitempty"`
IPPoolOptions ippool.Options `json:"ippoolOptions,omitempty" yaml:"ippoolOptions,omitempty"` IPPoolType string `json:"ippoolType,omitempty" yaml:"ippoolType,omitempty"`
WeaveScopeHost string `json:"weaveScopeHost,omitempty" yaml:"weaveScopeHost,omitempty"`
} }
// NewNetworkOptions returns a `zero` instance // NewNetworkOptions returns a `zero` instance
func NewNetworkOptions() *Options { func NewNetworkOptions() *Options {
return &Options{ return &Options{
EnableNetworkPolicy: false, EnableNetworkPolicy: false,
EnableIPPool: false, IPPoolType: "none",
NSNPOptions: NSNPOptions{ NSNPOptions: NSNPOptions{
AllowedIngressNamespaces: []string{}, AllowedIngressNamespaces: []string{},
}, },
IPPoolOptions: ippool.Options{
Calico: nil,
},
WeaveScopeHost: "", WeaveScopeHost: "",
} }
} }
...@@ -56,16 +50,15 @@ func (s *Options) Validate() []error { ...@@ -56,16 +50,15 @@ func (s *Options) Validate() []error {
func (s *Options) ApplyTo(options *Options) { func (s *Options) ApplyTo(options *Options) {
options.EnableNetworkPolicy = s.EnableNetworkPolicy options.EnableNetworkPolicy = s.EnableNetworkPolicy
options.EnableIPPool = s.EnableIPPool options.IPPoolType = s.IPPoolType
options.NSNPOptions = s.NSNPOptions options.NSNPOptions = s.NSNPOptions
options.IPPoolOptions = s.IPPoolOptions
options.WeaveScopeHost = s.WeaveScopeHost options.WeaveScopeHost = s.WeaveScopeHost
} }
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) { func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
fs.BoolVar(&s.EnableNetworkPolicy, "enable-network-policy", c.EnableNetworkPolicy, fs.BoolVar(&s.EnableNetworkPolicy, "enable-network-policy", c.EnableNetworkPolicy,
"This field instructs KubeSphere to enable network policy or not.") "This field instructs KubeSphere to enable network policy or not.")
fs.BoolVar(&s.EnableIPPool, "enable-ippool", c.EnableIPPool, fs.StringVar(&s.IPPoolType, "ippool-type", c.IPPoolType,
"This field instructs KubeSphere to enable ippool or not.") "This field instructs KubeSphere to enable ippool or not.")
fs.StringVar(&s.WeaveScopeHost, "weave-scope-host", c.WeaveScopeHost, fs.StringVar(&s.WeaveScopeHost, "weave-scope-host", c.WeaveScopeHost,
"Weave Scope service endpoint which build a topology API of the applications and the containers running on the hosts") "Weave Scope service endpoint which build a topology API of the applications and the containers running on the hosts")
......
...@@ -96,6 +96,10 @@ func main() { ...@@ -96,6 +96,10 @@ func main() {
mapper.AddSpecific(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindNamespaceNetworkPolicy), mapper.AddSpecific(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindNamespaceNetworkPolicy),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralNamespaceNetworkPolicy), networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralNamespaceNetworkPolicy),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourceSingularNamespaceNetworkPolicy), meta.RESTScopeRoot) networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourceSingularNamespaceNetworkPolicy), meta.RESTScopeRoot)
mapper.AddSpecific(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindIPPool),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralIPPool),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourceSingularIPPool), meta.RESTScopeRoot)
mapper.AddSpecific(devopsv1alpha3.SchemeGroupVersion.WithKind(devopsv1alpha3.ResourceKindDevOpsProject), mapper.AddSpecific(devopsv1alpha3.SchemeGroupVersion.WithKind(devopsv1alpha3.ResourceKindDevOpsProject),
devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject), devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject),
devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourceSingularDevOpsProject), meta.RESTScopeRoot) devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourceSingularDevOpsProject), meta.RESTScopeRoot)
...@@ -147,6 +151,7 @@ func main() { ...@@ -147,6 +151,7 @@ func main() {
devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilderTemplate), devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilderTemplate),
devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilder), devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilder),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralNamespaceNetworkPolicy), networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralNamespaceNetworkPolicy),
networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralIPPool),
devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject), devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralDevOpsProject),
devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralPipeline), devopsv1alpha3.SchemeGroupVersion.WithResource(devopsv1alpha3.ResourcePluralPipeline),
clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralCluster), clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralCluster),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册