未验证 提交 693a4143 编写于 作者: Z zryfish 提交者: GitHub

reconcile host cluster (#2354)

print friendly error when component failed

update dockerfile
上级 99aff3c7
......@@ -4,13 +4,12 @@ on:
push:
branches:
- 'master'
- 'dev'
- 'release*'
tags:
- 'release-*'
- 'v*'
pull_request:
branches:
- 'master'
- 'dev'
jobs:
build:
......@@ -39,7 +38,7 @@ jobs:
run: bash hack/install_kubebuilder.sh
- name: Build
run: make all
run: make test
- name: Make OpenAPI Spec
run: make openapi
......
......@@ -34,7 +34,7 @@ define ALL_HELP_INFO
# debugging tools like delve.
endef
.PHONY: all
all: test hypersphere ks-apiserver controller-manager
all: test ks-apiserver controller-manager
# Build ks-apiserver binary
ks-apiserver: fmt vet
......@@ -44,10 +44,6 @@ ks-apiserver: fmt vet
controller-manager: fmt vet
hack/gobuild.sh cmd/controller-manager
# Build hypersphere binary
hypersphere: fmt vet
hack/gobuild.sh cmd/hypersphere
# Run go fmt against code
fmt: generate
gofmt -w ./pkg ./cmd ./tools ./api
......
# Copyright 2018 The KubeSphere Authors. All rights reserved.
# Use of this source code is governed by an Apache license
# that can be found in the LICENSE file.
FROM golang:1.12 as ks-apiserver-builder
COPY / /go/src/kubesphere.io/kubesphere
WORKDIR /go/src/kubesphere.io/kubesphere
RUN GIT_VERSION=$(git describe --always --dirty) && \
GIT_HASH=$(git rev-parse HEAD) && \
BUILDDATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') && \
CGO_ENABLED=0 GO111MODULE=on GOOS=linux GOARCH=amd64 GOFLAGS=-mod=vendor \
go build -i -ldflags \
'-w -s -X kubesphere.io/kubesphere/pkg/version.version=$(GIT_VERSION) \
-X kubesphere.io/kubesphere/pkg/version.gitCommit=$(GIT_HASH) \
-X kubesphere.io/kubesphere/pkg/version.buildDate=$(BUILDDATE)' \
-o ks-apiserver cmd/ks-apiserver/apiserver.go
FROM alpine:3.9
RUN apk add --update ca-certificates && update-ca-certificates
COPY --from=ks-apiserver-builder /go/src/kubesphere.io/kubesphere/ks-apiserver /usr/local/bin/
COPY /bin/cmd/ks-apiserver /usr/local/bin/
CMD ["sh"]
# Copyright 2018 The KubeSphere Authors. All rights reserved.
# Use of this source code is governed by an Apache license
# that can be found in the LICENSE file.
FROM golang:1.12 as controller-manager-builder
COPY / /go/src/kubesphere.io/kubesphere
WORKDIR /go/src/kubesphere.io/kubesphere
RUN CGO_ENABLED=0 GO111MODULE=on GOOS=linux GOARCH=amd64 GOFLAGS=-mod=vendor go build --ldflags "-extldflags -static" -o controller-manager ./cmd/controller-manager/
FROM alpine:3.7
RUN apk add --update ca-certificates && update-ca-certificates
COPY --from=controller-manager-builder /go/src/kubesphere.io/kubesphere/controller-manager /usr/local/bin/
COPY /bin/cmd/controller-manager /usr/local/bin/
CMD controller-manager
......@@ -82,6 +82,7 @@ func NewControllerManagerCommand() *cobra.Command {
os.Exit(1)
}
},
SilenceUsage: true,
}
fs := cmd.Flags()
......@@ -111,24 +112,21 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
if s.DevopsOptions != nil && len(s.DevopsOptions.Host) != 0 {
devopsClient, err = jenkins.NewDevopsClient(s.DevopsOptions)
if err != nil {
klog.Errorf("Failed to create devops client %v", err)
return err
return fmt.Errorf("failed to connect jenkins, please check jenkins status, error: %v", err)
}
}
var ldapClient ldapclient.Interface
if s.LdapOptions == nil || len(s.LdapOptions.Host) == 0 {
klog.Errorf("Failed to create devops client invalid ldap options")
return err
}
if s.LdapOptions.Host == ldapclient.FAKE_HOST {
ldapClient = ldapclient.NewSimpleLdap()
return fmt.Errorf("ldap service address MUST not be empty")
} else {
ldapClient, err = ldapclient.NewLdapClient(s.LdapOptions, stopCh)
if err != nil {
klog.Errorf("Failed to create ldap client %v", err)
return err
if s.LdapOptions.Host == ldapclient.FAKE_HOST { // for debug only
ldapClient = ldapclient.NewSimpleLdap()
} else {
ldapClient, err = ldapclient.NewLdapClient(s.LdapOptions, stopCh)
if err != nil {
return fmt.Errorf("failed to connect to ldap service, please check ldap status, error: %v", err)
}
}
}
......@@ -136,8 +134,7 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
if s.OpenPitrixOptions != nil && !s.OpenPitrixOptions.IsEmpty() {
openpitrixClient, err = openpitrix.NewClient(s.OpenPitrixOptions)
if err != nil {
klog.Errorf("Failed to create openpitrix client %v", err)
return err
return fmt.Errorf("failed to connect to openpitrix, please check openpitrix status, error: %v", err)
}
}
......@@ -145,13 +142,17 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
if s.S3Options != nil && len(s.S3Options.Endpoint) != 0 {
s3Client, err = s3.NewS3Client(s.S3Options)
if err != nil {
klog.Errorf("Failed to create s3 client %v", err)
return err
return fmt.Errorf("failed to connect to s3, please check s3 service status, error: %v", err)
}
}
informerFactory := informers.NewInformerFactories(kubernetesClient.Kubernetes(), kubernetesClient.KubeSphere(),
kubernetesClient.Istio(), kubernetesClient.Application(), kubernetesClient.Snapshot(), kubernetesClient.ApiExtensions())
informerFactory := informers.NewInformerFactories(
kubernetesClient.Kubernetes(),
kubernetesClient.KubeSphere(),
kubernetesClient.Istio(),
kubernetesClient.Application(),
kubernetesClient.Snapshot(),
kubernetesClient.ApiExtensions())
run := func(ctx context.Context) {
klog.V(0).Info("setting up manager")
......
......@@ -92,10 +92,12 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
kubernetesClient.Istio(), kubernetesClient.Application(), kubernetesClient.Snapshot(), kubernetesClient.ApiExtensions())
apiServer.InformerFactory = informerFactory
if s.MonitoringOptions.Endpoint != "" {
if s.MonitoringOptions == nil || len(s.MonitoringOptions.Endpoint) == 0 {
return nil, fmt.Errorf("moinitoring service address in configuration MUST not be empty, please check configmap/kubesphere-config in kubesphere-system namespace")
} else {
monitoringClient, err := prometheus.NewPrometheus(s.MonitoringOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to prometheus, please check prometheus status, error: %v", err)
}
apiServer.MonitoringClient = monitoringClient
}
......@@ -103,7 +105,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.LoggingOptions.Host != "" {
loggingClient, err := esclient.NewElasticsearch(s.LoggingOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to elasticsearch, please check elasticsearch status, error: %v", err)
}
apiServer.LoggingClient = loggingClient
}
......@@ -114,7 +116,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
} else {
s3Client, err := s3.NewS3Client(s.S3Options)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to s3, please check s3 service status, error: %v", err)
}
apiServer.S3Client = s3Client
}
......@@ -123,7 +125,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.DevopsOptions.Host != "" {
devopsClient, err := jenkins.NewDevopsClient(s.DevopsOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to jenkins, please check jenkins status, error: %v", err)
}
apiServer.DevopsClient = devopsClient
}
......@@ -131,19 +133,21 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.SonarQubeOptions.Host != "" {
sonarClient, err := sonarqube.NewSonarQubeClient(s.SonarQubeOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connecto to sonarqube, please check sonarqube status, error: %v", err)
}
apiServer.SonarClient = sonarqube.NewSonar(sonarClient.SonarQube())
}
var cacheClient cache.Interface
if s.RedisOptions.Host != "" {
if s.RedisOptions == nil || len(s.RedisOptions.Host) == 0 {
return nil, fmt.Errorf("redis service address MUST not be empty, please check configmap/kubesphere-config in kubesphere-system namespace")
} else {
if s.RedisOptions.Host == fakeInterface && s.DebugMode {
apiServer.CacheClient = cache.NewSimpleCache()
} else {
cacheClient, err = cache.NewRedisClient(s.RedisOptions, stopCh)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to redis service, please check redis status, error: %v", err)
}
apiServer.CacheClient = cacheClient
}
......@@ -152,7 +156,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.EventsOptions.Host != "" {
eventsClient, err := eventsclient.NewClient(s.EventsOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to elasticsearch, please check elasticsearch status, error: %v", err)
}
apiServer.EventsClient = eventsClient
}
......@@ -160,7 +164,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.AuditingOptions.Host != "" {
auditingClient, err := auditingclient.NewClient(s.AuditingOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to elasticsearch, please check elasticsearch status, error: %v", err)
}
apiServer.AuditingClient = auditingClient
}
......@@ -168,7 +172,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.OpenPitrixOptions != nil && !s.OpenPitrixOptions.IsEmpty() {
opClient, err := openpitrix.NewClient(s.OpenPitrixOptions)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to connect to openpitrix, please check openpitrix status, error: %v", err)
}
apiServer.OpenpitrixClient = opClient
}
......
......@@ -54,6 +54,7 @@ cluster's shared state through which all other components interact.`,
return Run(s, signals.SetupSignalHandler())
},
SilenceUsage: true,
}
fs := cmd.Flags()
......
......@@ -3,17 +3,17 @@
set -ex
set -o pipefail
tag_for_branch() {
local tag=$1
if [[ "${tag}" == "master" ]]; then
tag="latest"
fi
echo ${tag}
}
# push to kubespheredev with default latest tag
REPO=${REPO:-kubespheredev}
TAG=${TRAVIS_BRANCH:-latest}
# check if build was triggered by a travis cronjob
if [[ -z "$TRAVIS_EVENT_TYPE" ]]; then
echo "TRAVIS_EVENT_TYPE is empty, also normaly build"
elif [[ $TRAVIS_EVENT_TYPE == "cron" ]]; then
TAG=dev-$(date +%Y%m%d)
fi
TAG=$(tag_for_branch $1)
docker build -f build/ks-apiserver/Dockerfile -t $REPO/ks-apiserver:$TAG .
docker build -f build/ks-controller-manager/Dockerfile -t $REPO/ks-controller-manager:$TAG .
......
......@@ -20,6 +20,9 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
VERBOSE=${VERBOSE:-"0"}
V=""
if [[ "${VERBOSE}" == "1" ]];then
......@@ -33,13 +36,13 @@ OUTPUT_DIR=bin
BUILDPATH=./${1:?"path to build"}
OUT=${OUTPUT_DIR}/${1:?"output path"}
set -e
BUILD_GOOS=${GOOS:-linux}
BUILD_GOARCH=${GOARCH:-amd64}
GOBINARY=${GOBINARY:-go}
LDFLAGS=$(kube::version::ldflags)
# forgoing -i (incremental build) because it will be deprecated by tool chain.
time GOOS=${BUILD_GOOS} CGO_ENABLED=0 GOARCH=${BUILD_GOARCH} ${GOBINARY} build \
-ldflags="${LDFLAGS}" \
-o ${OUT} \
${BUILDPATH}
......@@ -10,7 +10,7 @@
# Check if the program is installed, otherwise exit
function command_exists () {
if ! [ -x "$(command -v $1)" ]; then
if ! [[ -x "$(command -v $1)" ]]; then
echo "Error: $1 program is not installed." >&2
exit 1
fi
......@@ -45,14 +45,14 @@ esac
command_exists curl
command_exists tar
KUBEBUILDER_VERSION=v1.0.8
KUBEBUILDER_VERSION=v2.3.1
KUBEBUILDER_VERSION=${KUBEBUILDER_VERSION#"v"}
KUBEBUILDER_VERSION_NAME="kubebuilder_${KUBEBUILDER_VERSION}"
KUBEBUILDER_DIR=/usr/local/kubebuilder
# Check if folder containing kubebuilder executable exists and is not empty
if [ -d "$KUBEBUILDER_DIR" ]; then
if [ "$(ls -A $KUBEBUILDER_DIR)" ]; then
if [[ -d "$KUBEBUILDER_DIR" ]]; then
if [[ "$(ls -A ${KUBEBUILDER_DIR})" ]]; then
echo "\n/usr/local/kubebuilder folder is not empty. Please delete or backup it before to install ${KUBEBUILDER_VERSION_NAME}"
exit 1
fi
......@@ -64,7 +64,7 @@ pushd $TMP_DIR
# Downloading Kubebuilder compressed file using curl program
URL="https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH}.tar.gz"
echo "Downloading ${KUBEBUILDER_VERSION_NAME}\nfrom $URL\n"
curl -L "$URL"| tar xz -C $TMP_DIR
curl -L "$URL"| tar xz -C ${TMP_DIR}
echo "Downloaded executable files"
ls "${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH}/bin"
......@@ -75,6 +75,6 @@ mv ${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH} kubebuilder && sudo mv -f kubebu
echo "Add kubebuilder to your path; e.g copy paste in your shell and/or edit your ~/.profile file"
echo "export PATH=\$PATH:/usr/local/kubebuilder/bin"
popd
rm -rf $TMP_DIR
rm -rf ${TMP_DIR}
export PATH=$PATH:/usr/local/kubebuilder/bin
#!/usr/bin/env bash
# This is a modified version of Kubernetes
KUBE_GO_PACKAGE=kubesphere.io/kubesphere
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
......@@ -26,3 +26,39 @@ EOF
return 2
fi
}
# Prints the value that needs to be passed to the -ldflags parameter of go build
# in order to set the Kubernetes based on the git tree status.
# IMPORTANT: if you update any of these, also update the lists in
# pkg/version/def.bzl and hack/print-workspace-status.sh.
kube::version::ldflags() {
kube::version::get_version_vars
local -a ldflags
function add_ldflag() {
local key=${1}
local val=${2}
# If you update these, also update the list component-base/version/def.bzl.
ldflags+=(
"-X '${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}'"
)
}
add_ldflag "buildDate" "$(date ${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} -u +'%Y-%m-%dT%H:%M:%SZ')"
if [[ -n ${KUBE_GIT_COMMIT-} ]]; then
add_ldflag "gitCommit" "${KUBE_GIT_COMMIT}"
add_ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}"
fi
if [[ -n ${KUBE_GIT_VERSION-} ]]; then
add_ldflag "gitVersion" "${KUBE_GIT_VERSION}"
fi
if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then
add_ldflag "gitMajor" "${KUBE_GIT_MAJOR}"
add_ldflag "gitMinor" "${KUBE_GIT_MINOR}"
fi
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}
\ No newline at end of file
......@@ -19,6 +19,8 @@ export THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin"
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/hack/lib/logging.sh"
source "${KUBE_ROOT}/hack/lib/version.sh"
kube::log::install_errexit
......
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Version management helpers. These functions help to set, save and load the
# following variables:
#
# KUBE_GIT_COMMIT - The git commit id corresponding to this
# source code.
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
# "dirty" indicates source code changes after the git commit id
# "archive" indicates the tree was produced by 'git archive'
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
# KUBE_GIT_MAJOR - The major part of the version
# KUBE_GIT_MINOR - The minor component of the version
# Grovels through git to set a set of env variables.
#
# If KUBE_GIT_VERSION_FILE, this function will load from that file instead of
# querying git.
kube::version::get_version_vars() {
# If the kubernetes source was exported through git archive, then
# we likely don't have a git tree, but these magic values may be filled in.
# shellcheck disable=SC2016,SC2050
# Disabled as we're not expanding these at runtime, but rather expecting
# that another tool may have expanded these and rewritten the source (!)
if [[ '$Format:%%$' == "%" ]]; then
KUBE_GIT_COMMIT='$Format:%H$'
KUBE_GIT_TREE_STATE="archive"
# When a 'git archive' is exported, the '$Format:%D$' below will look
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
# can be extracted from it.
if [[ '$Format:%D$' =~ tag:\ (v[^ ,]+) ]]; then
KUBE_GIT_VERSION="${BASH_REMATCH[1]}"
fi
fi
local git=(git --work-tree "${KUBE_ROOT}")
if [[ -n ${KUBE_GIT_COMMIT-} ]] || KUBE_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then
if [[ -z ${KUBE_GIT_TREE_STATE-} ]]; then
# Check if the tree is dirty. default to dirty
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
KUBE_GIT_TREE_STATE="clean"
else
KUBE_GIT_TREE_STATE="dirty"
fi
fi
# Use git describe to find the version based on tags.
if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --match='v*' --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then
# This translates the "git describe" to an actual semver.org
# compatible semantic version that looks something like this:
# v1.1.0-alpha.0.6+84c76d1142ea4d
#
# TODO: We continue calling this "git version" because so many
# downstream consumers are expecting it there.
#
# These regexes are painful enough in sed...
# We don't want to do them in pure shell, so disable SC2001
# shellcheck disable=SC2001
DASHES_IN_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/[^-]//g")
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
# shellcheck disable=SC2001
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/")
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
# shellcheck disable=SC2001
# We have distance to base tag (v1.1.0-1-gCommitHash)
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/")
fi
if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
KUBE_GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[4]}" ]]; then
KUBE_GIT_MINOR+="+"
fi
fi
# If KUBE_GIT_VERSION is not a valid Semantic Version, then refuse to build.
if ! [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
echo "KUBE_GIT_VERSION should be a valid Semantic Version. Current value: ${KUBE_GIT_VERSION}"
echo "Please see more details here: https://semver.org"
exit 1
fi
fi
fi
}
\ No newline at end of file
......@@ -10,7 +10,7 @@ const (
ResourcesSingularCluster = "cluster"
ResourcesPluralCluster = "clusters"
IsHostCluster = "cluster.kubesphere.io/is-host-cluster"
HostCluster = "cluster-role.kubesphere.io/host"
// Description of which region the cluster been placed
ClusterRegion = "cluster.kubesphere.io/region"
// Name of the cluster group
......
......@@ -260,10 +260,8 @@ func isClusterReady(cluster *clusterv1alpha1.Cluster) bool {
}
func isClusterHostCluster(cluster *clusterv1alpha1.Cluster) bool {
for key, value := range cluster.Annotations {
if key == clusterv1alpha1.IsHostCluster && value == "true" {
return true
}
if _, ok := cluster.Labels[clusterv1alpha1.HostCluster]; ok {
return true
}
return false
......
......@@ -70,8 +70,35 @@ const (
// proxy format
proxyFormat = "%s/api/v1/namespaces/kubesphere-system/services/:ks-apiserver:80/proxy/%s"
// mulitcluster configuration name
configzMultiCluster = "multicluster"
)
// Cluster template for reconcile host cluster if there is none.
var hostCluster = &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "host",
Annotations: map[string]string{
"kubesphere.io/description": "Automatically created by kubesphere, " +
"we encourage you use host cluster for cluster management only, " +
"deploy workloads to member clusters.",
},
Labels: map[string]string{
clusterv1alpha1.HostCluster: "",
clusterv1alpha1.ClusterGroup: "production",
},
},
Spec: clusterv1alpha1.ClusterSpec{
JoinFederation: true,
Enable: true,
Provider: "kubesphere",
Connection: clusterv1alpha1.Connection{
Type: clusterv1alpha1.ConnectionTypeDirect,
},
},
}
// ClusterData stores cluster client
type clusterData struct {
......@@ -176,11 +203,17 @@ func (c *clusterController) Run(workers int, stopCh <-chan struct{}) error {
go wait.Until(c.worker, c.workerLoopPeriod, stopCh)
}
// refresh cluster configz every 2 minutes
go wait.Until(func() {
if err := c.syncStatus(); err != nil {
klog.Errorf("Error periodically sync cluster status, %v", err)
}
}, 5*time.Minute, stopCh)
if err := c.reconcileHostCluster(); err != nil {
klog.Errorf("Error create host cluster, error %v", err)
}
}, 2*time.Minute, stopCh)
<-stopCh
return nil
......@@ -256,6 +289,26 @@ func (c *clusterController) syncStatus() error {
return nil
}
// reconcileHostCluster will create a host cluster if there are no clusters labeled 'cluster-role.kubesphere.io/host'
func (c *clusterController) reconcileHostCluster() error {
clusters, err := c.clusterLister.List(labels.SelectorFromSet(labels.Set{clusterv1alpha1.HostCluster: ""}))
if err != nil {
return err
}
if len(clusters) == 0 {
hostKubeConfig, err := buildKubeconfigFromRestConfig(c.hostConfig)
if err != nil {
return err
}
hostCluster.Spec.Connection.KubeConfig = hostKubeConfig
_, err = c.clusterClient.Create(hostCluster)
return err
}
return nil
}
func (c *clusterController) syncCluster(key string) error {
startTime := time.Now()
......@@ -524,6 +577,14 @@ func (c *clusterController) syncCluster(key string) error {
cluster.Status.Configz = configz
}
// label cluster host cluster if configz["multicluster"]==true, this is
if mc, ok := configz[configzMultiCluster]; ok && mc && c.checkIfClusterIsHostCluster(nodes) {
if cluster.Labels == nil {
cluster.Labels = make(map[string]string)
}
cluster.Labels[clusterv1alpha1.HostCluster] = ""
}
clusterReadyCondition := clusterv1alpha1.ClusterCondition{
Type: clusterv1alpha1.ClusterReady,
Status: v1.ConditionTrue,
......@@ -577,6 +638,27 @@ func (c *clusterController) syncCluster(key string) error {
return nil
}
func (c *clusterController) checkIfClusterIsHostCluster(memberClusterNodes *v1.NodeList) bool {
hostNodes, err := c.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return false
}
if hostNodes == nil || memberClusterNodes == nil {
return false
}
if len(hostNodes.Items) != len(memberClusterNodes.Items) {
return false
}
if len(hostNodes.Items) > 0 && (hostNodes.Items[0].Status.NodeInfo.MachineID != memberClusterNodes.Items[0].Status.NodeInfo.MachineID) {
return false
}
return true
}
// tryToFetchKubeSphereComponents will send requests to member cluster configz api using kube-apiserver proxy way
func (c *clusterController) tryToFetchKubeSphereComponents(host string, transport http.RoundTripper) (map[string]bool, error) {
client := http.Client{
......@@ -671,16 +753,6 @@ func (c *clusterController) updateClusterCondition(cluster *clusterv1alpha1.Clus
}
}
func isHostCluster(cluster *clusterv1alpha1.Cluster) bool {
for k, v := range cluster.Annotations {
if k == clusterv1alpha1.IsHostCluster && v == "true" {
return true
}
}
return false
}
// joinFederation joins a cluster into federation clusters.
// return nil error if kubefed cluster already exists.
func (c *clusterController) joinFederation(clusterConfig *rest.Config, joiningClusterName string, labels map[string]string) (*fedv1b1.KubeFedCluster, error) {
......
package cluster
import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
func buildKubeconfigFromRestConfig(config *rest.Config) ([]byte, error) {
apiConfig := api.NewConfig()
apiConfig.Clusters["kubernetes"] = &api.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
CertificateAuthority: config.CAFile,
}
apiConfig.AuthInfos["kubernetes-admin"] = &api.AuthInfo{
ClientCertificate: config.CertFile,
ClientCertificateData: config.CertData,
ClientKey: config.KeyFile,
ClientKeyData: config.KeyData,
TokenFile: config.BearerTokenFile,
Token: config.BearerToken,
Username: config.Username,
Password: config.Password,
}
apiConfig.Contexts["kubernetes-admin@kubernetes"] = &api.Context{
Cluster: "kubernetes",
AuthInfo: "kubernetes-admin",
}
apiConfig.CurrentContext = "kubernetes-admin@kubernetes"
return clientcmd.Write(*apiConfig)
}
......@@ -24,14 +24,18 @@ import (
)
var (
version = "v0.0.0"
gitVersion = "v0.0.0"
gitCommit = "unknown"
gitTreeState = "unknown"
buildDate = "unknown"
gitMajor = "unknown"
gitMinor = "unknown"
)
type Info struct {
Version string `json:"gitVersion"`
GitVersion string `json:"gitVersion"`
GitMajor string `json:"gitMajor"`
GitMinor string `json:"gitMinor"`
GitCommit string `json:"gitCommit"`
GitTreeState string `json:"gitTreeState"`
BuildDate string `json:"buildDate"`
......@@ -47,7 +51,8 @@ func Get() Info {
// These variables typically come from -ldflags settings and
// in their absence fallback to the default settings
return Info{
Version: version,
GitVersion: gitVersion,
GitCommit: gitCommit,
GitTreeState: gitTreeState,
BuildDate: buildDate,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册