未验证 提交 1a513a9a 编写于 作者: R runzexia

fix comment

Signed-off-by: Nrunzexia <runzexia@yunify.com>
上级 f00917b0
......@@ -27,9 +27,9 @@ const (
)
const (
StatusUploading = "Uploading"
StatusReady = "Ready"
StatusUnableToDownload = "UnableToDownload"
StatusUploading = "Uploading"
StatusReady = "Ready"
StatusUploadFailed = "UploadFailed"
)
const (
......
package v1alpha1
import (
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring"
corev1 "k8s.io/api/core/v1"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring"
)
// A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy
......
......@@ -4,7 +4,7 @@ import (
"code.cloudfoundry.org/bytefmt"
"fmt"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/errors"
"kubesphere.io/kubesphere/pkg/models/devops"
"kubesphere.io/kubesphere/pkg/utils/hashutil"
......@@ -17,38 +17,38 @@ func UploadS2iBinary(req *restful.Request, resp *restful.Response) {
err := req.Request.ParseMultipartForm(bytefmt.MEGABYTE * 20)
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(restful.NewError(http.StatusBadRequest, err.Error()), resp)
return
}
if len(req.Request.MultipartForm.File) == 0 {
err := restful.NewError(http.StatusBadRequest, "could not get file from form")
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(restful.NewError(http.StatusBadRequest, err.Error()), resp)
return
}
if len(req.Request.MultipartForm.File["s2ibinary"]) == 0 {
err := restful.NewError(http.StatusBadRequest, "could not get file from form")
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(err, resp)
return
}
if len(req.Request.MultipartForm.File["s2ibinary"]) > 1 {
err := restful.NewError(http.StatusBadRequest, "s2ibinary should only have one file")
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(err, resp)
return
}
defer req.Request.MultipartForm.RemoveAll()
file, err := req.Request.MultipartForm.File["s2ibinary"][0].Open()
if err != nil {
glog.Error(err)
klog.Error(err)
errors.ParseSvcErr(err, resp)
return
}
filemd5, err := hashutil.GetMD5(file)
if err != nil {
glog.Error(err)
klog.Error(err)
errors.ParseSvcErr(err, resp)
return
}
......@@ -56,7 +56,7 @@ func UploadS2iBinary(req *restful.Request, resp *restful.Response) {
if ok && len(req.Request.MultipartForm.Value["md5"]) > 0 {
if md5[0] != filemd5 {
err := restful.NewError(http.StatusBadRequest, fmt.Sprintf("md5 not match, origin: %+v, calculate: %+v", md5[0], filemd5))
glog.Error(err)
klog.Error(err)
errors.ParseSvcErr(err, resp)
return
}
......@@ -64,7 +64,7 @@ func UploadS2iBinary(req *restful.Request, resp *restful.Response) {
s2ibin, err := devops.UploadS2iBinary(ns, name, filemd5, req.Request.MultipartForm.File["s2ibinary"][0])
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(err, resp)
return
}
......@@ -78,7 +78,7 @@ func DownloadS2iBinary(req *restful.Request, resp *restful.Response) {
fileName := req.PathParameter("file")
url, err := devops.DownloadS2iBinary(ns, name, fileName)
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
errors.ParseSvcErr(err, resp)
return
}
......
......@@ -19,7 +19,6 @@ import (
"k8s.io/kubernetes/pkg/util/metrics"
"kubesphere.io/kubesphere/pkg/simple/client/s2is3"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"time"
devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1"
......@@ -28,8 +27,6 @@ import (
devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha1"
)
var log = logf.Log.WithName("s2ibinary-controller")
type S2iBinaryController struct {
client clientset.Interface
devopsClient devopsclient.Interface
......@@ -51,7 +48,7 @@ func NewController(devopsclientset devopsclient.Interface,
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
log.Info(fmt.Sprintf(format, args))
klog.Info(fmt.Sprintf(format, args))
})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "s2ibinary-controller"})
......@@ -73,24 +70,24 @@ func NewController(devopsclientset devopsclient.Interface,
v.eventRecorder = recorder
s2ibinInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.enqueueFoo,
AddFunc: v.enqueueS2iBinary,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*devopsv1alpha1.S2iBinary)
new := newObj.(*devopsv1alpha1.S2iBinary)
if old.ResourceVersion == new.ResourceVersion {
return
}
v.enqueueFoo(newObj)
v.enqueueS2iBinary(newObj)
},
DeleteFunc: v.enqueueFoo,
DeleteFunc: v.enqueueS2iBinary,
})
return v
}
// enqueueFoo takes a Foo resource and converts it into a namespace/name
// enqueueS2iBinary takes a Foo resource and converts it into a namespace/name
// string which is then put onto the work workqueue. This method should *not* be
// passed resources of any type other than Foo.
func (c *S2iBinaryController) enqueueFoo(obj interface{}) {
// passed resources of any type other than S2iBinary.
func (c *S2iBinaryController) enqueueS2iBinary(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
......@@ -107,46 +104,27 @@ func (c *S2iBinaryController) processNextWorkItem() bool {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
log.Error(err, "could not reconcile s2ibinary")
klog.Error(err, "could not reconcile s2ibinary")
utilruntime.HandleError(err)
return true
}
......@@ -168,8 +146,8 @@ func (c *S2iBinaryController) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
log.Info("starting s2ibinary controller")
defer log.Info("shutting down s2ibinary controller")
klog.Info("starting s2ibinary controller")
defer klog.Info("shutting down s2ibinary controller")
if !cache.WaitForCacheSync(stopCh, c.s2iBinarySynced) {
return fmt.Errorf("failed to wait for caches to sync")
......@@ -189,16 +167,16 @@ func (c *S2iBinaryController) Run(workers int, stopCh <-chan struct{}) error {
func (c *S2iBinaryController) syncHandler(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.Error(err, fmt.Sprintf("could not split s2ibin meta %s ", key))
klog.Error(err, fmt.Sprintf("could not split s2ibin meta %s ", key))
return nil
}
s2ibin, err := c.s2iBinaryLister.S2iBinaries(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
log.Info(fmt.Sprintf("s2ibin '%s' in work queue no longer exists ", key))
klog.Info(fmt.Sprintf("s2ibin '%s' in work queue no longer exists ", key))
return nil
}
log.Error(err, fmt.Sprintf("could not get s2ibin %s ", key))
klog.Error(err, fmt.Sprintf("could not get s2ibin %s ", key))
return err
}
if s2ibin.ObjectMeta.DeletionTimestamp.IsZero() {
......@@ -206,7 +184,7 @@ func (c *S2iBinaryController) syncHandler(key string) error {
s2ibin.ObjectMeta.Finalizers = append(s2ibin.ObjectMeta.Finalizers, devopsv1alpha1.S2iBinaryFinalizerName)
_, err := c.devopsClient.DevopsV1alpha1().S2iBinaries(namespace).Update(s2ibin)
if err != nil {
log.Error(err, fmt.Sprintf("failed to update s2ibin %s ", key))
klog.Error(err, fmt.Sprintf("failed to update s2ibin %s ", key))
return err
}
}
......@@ -214,7 +192,7 @@ func (c *S2iBinaryController) syncHandler(key string) error {
} else {
if sliceutil.HasString(s2ibin.ObjectMeta.Finalizers, devopsv1alpha1.S2iBinaryFinalizerName) {
if err := c.DeleteBinaryInS3(s2ibin); err != nil {
log.Error(err, fmt.Sprintf("failed to delete resource %s in s3", key))
klog.Error(err, fmt.Sprintf("failed to delete resource %s in s3", key))
return err
}
s2ibin.ObjectMeta.Finalizers = sliceutil.RemoveString(s2ibin.ObjectMeta.Finalizers, func(item string) bool {
......@@ -222,7 +200,7 @@ func (c *S2iBinaryController) syncHandler(key string) error {
})
_, err := c.devopsClient.DevopsV1alpha1().S2iBinaries(namespace).Update(s2ibin)
if err != nil {
log.Error(err, fmt.Sprintf("failed to update s2ibin %s ", key))
klog.Error(err, fmt.Sprintf("failed to update s2ibin %s ", key))
return err
}
}
......@@ -244,11 +222,11 @@ func (c *S2iBinaryController) DeleteBinaryInS3(s2ibin *devopsv1alpha1.S2iBinary)
case s3.ErrCodeNoSuchKey:
return nil
default:
log.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s in s3", s2ibin.Namespace, s2ibin.Name))
klog.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s in s3", s2ibin.Namespace, s2ibin.Name))
return err
}
} else {
log.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s in s3", s2ibin.Namespace, s2ibin.Name))
klog.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s in s3", s2ibin.Namespace, s2ibin.Name))
return err
}
}
......
......@@ -15,7 +15,6 @@ import (
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/metrics"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"time"
s2iv1alpha1 "github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1"
......@@ -28,8 +27,6 @@ import (
devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha1"
)
var log = logf.Log.WithName("s2irun-controller")
type S2iRunController struct {
client clientset.Interface
s2iClient s2iclient.Interface
......@@ -56,7 +53,7 @@ func NewController(devopsclientset devopsclient.Interface, s2iclientset s2iclien
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
log.Info(fmt.Sprintf(format, args))
klog.Info(fmt.Sprintf(format, args))
})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "s2irun-controller"})
......@@ -81,16 +78,16 @@ func NewController(devopsclientset devopsclient.Interface, s2iclientset s2iclien
v.eventRecorder = recorder
s2iRunInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.enqueueFoo,
AddFunc: v.enqueueS2iRun,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*s2iv1alpha1.S2iRun)
new := newObj.(*s2iv1alpha1.S2iRun)
if old.ResourceVersion == new.ResourceVersion {
return
}
v.enqueueFoo(newObj)
v.enqueueS2iRun(newObj)
},
DeleteFunc: v.enqueueFoo,
DeleteFunc: v.enqueueS2iRun,
})
return v
}
......@@ -98,7 +95,7 @@ func NewController(devopsclientset devopsclient.Interface, s2iclientset s2iclien
// enqueueFoo takes a Foo resource and converts it into a namespace/name
// string which is then put onto the work workqueue. This method should *not* be
// passed resources of any type other than Foo.
func (c *S2iRunController) enqueueFoo(obj interface{}) {
func (c *S2iRunController) enqueueS2iRun(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
......@@ -115,46 +112,26 @@ func (c *S2iRunController) processNextWorkItem() bool {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
log.Error(err, "could not reconcile s2irun")
klog.Error(err, "could not reconcile s2irun")
utilruntime.HandleError(err)
return true
}
......@@ -176,8 +153,8 @@ func (c *S2iRunController) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
log.Info("starting s2irun controller")
defer log.Info("shutting down s2irun controller")
klog.Info("starting s2irun controller")
defer klog.Info("shutting down s2irun controller")
if !cache.WaitForCacheSync(stopCh, c.s2iBinarySynced) {
return fmt.Errorf("failed to wait for caches to sync")
......@@ -197,16 +174,16 @@ func (c *S2iRunController) Run(workers int, stopCh <-chan struct{}) error {
func (c *S2iRunController) syncHandler(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.Error(err, fmt.Sprintf("could not split s2irun meta %s ", key))
klog.Error(err, fmt.Sprintf("could not split s2irun meta %s ", key))
return nil
}
s2irun, err := c.s2iRunLister.S2iRuns(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
log.Info(fmt.Sprintf("s2irun '%s' in work queue no longer exists ", key))
klog.Info(fmt.Sprintf("s2irun '%s' in work queue no longer exists ", key))
return nil
}
log.Error(err, fmt.Sprintf("could not get s2irun %s ", key))
klog.Error(err, fmt.Sprintf("could not get s2irun %s ", key))
return err
}
if s2irun.Labels != nil {
......@@ -217,7 +194,7 @@ func (c *S2iRunController) syncHandler(key string) error {
s2irun.ObjectMeta.Finalizers = append(s2irun.ObjectMeta.Finalizers, devopsv1alpha1.S2iBinaryFinalizerName)
_, err := c.s2iClient.DevopsV1alpha1().S2iRuns(namespace).Update(s2irun)
if err != nil {
log.Error(err, fmt.Sprintf("failed to update s2irun %s", key))
klog.Error(err, fmt.Sprintf("failed to update s2irun %s", key))
return err
}
}
......@@ -225,7 +202,7 @@ func (c *S2iRunController) syncHandler(key string) error {
} else {
if sliceutil.HasString(s2irun.ObjectMeta.Finalizers, devopsv1alpha1.S2iBinaryFinalizerName) {
if err := c.DeleteS2iBinary(s2irun); err != nil {
log.Error(err, fmt.Sprintf("failed to delete s2ibin %s in", key))
klog.Error(err, fmt.Sprintf("failed to delete s2ibin %s in", key))
return err
}
s2irun.ObjectMeta.Finalizers = sliceutil.RemoveString(s2irun.ObjectMeta.Finalizers, func(item string) bool {
......@@ -233,7 +210,7 @@ func (c *S2iRunController) syncHandler(key string) error {
})
_, err := c.s2iClient.DevopsV1alpha1().S2iRuns(namespace).Update(s2irun)
if err != nil {
log.Error(err, fmt.Sprintf("failed to update s2irun %s ", key))
klog.Error(err, fmt.Sprintf("failed to update s2irun %s ", key))
return err
}
}
......@@ -249,19 +226,19 @@ func (c *S2iRunController) DeleteS2iBinary(s2irun *s2iv1alpha1.S2iRun) error {
s2iBin, err := c.s2iBinaryLister.S2iBinaries(s2irun.Namespace).Get(s2iBinName)
if err != nil {
if errors.IsNotFound(err) {
log.Info(fmt.Sprintf("s2ibin '%s/%s' has been delted ", s2irun.Namespace, s2iBinName))
klog.Info(fmt.Sprintf("s2ibin '%s/%s' has been delted ", s2irun.Namespace, s2iBinName))
return nil
}
log.Error(err, fmt.Sprintf("failed to get s2ibin %s/%s ", s2irun.Namespace, s2iBinName))
klog.Error(err, fmt.Sprintf("failed to get s2ibin %s/%s ", s2irun.Namespace, s2iBinName))
return err
}
err = c.devopsClient.DevopsV1alpha1().S2iBinaries(s2iBin.Namespace).Delete(s2iBinName, nil)
if err != nil {
if errors.IsNotFound(err) {
log.Info(fmt.Sprintf("s2ibin '%s/%s' has been delted ", s2irun.Namespace, s2iBinName))
klog.Info(fmt.Sprintf("s2ibin '%s/%s' has been delted ", s2irun.Namespace, s2iBinName))
return nil
}
log.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s ", s2irun.Namespace, s2iBinName))
klog.Error(err, fmt.Sprintf("failed to delete s2ibin %s/%s ", s2irun.Namespace, s2iBinName))
return err
}
......
......@@ -8,9 +8,9 @@ import (
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1"
"kubesphere.io/kubesphere/pkg/informers"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
......@@ -28,20 +28,20 @@ const (
func UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHeader) (*v1alpha1.S2iBinary, error) {
binFile, err := fileHeader.Open()
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
return nil, err
}
defer binFile.Close()
origin, err := informers.KsSharedInformerFactory().Devops().V1alpha1().S2iBinaries().Lister().S2iBinaries(namespace).Get(name)
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
return nil, err
}
//Check file is uploading
if origin.Status.Phase == v1alpha1.StatusUploading {
err := restful.NewError(http.StatusConflict, "file is uploading, please try later")
glog.Error(err)
klog.Error(err)
return nil, err
}
copy := origin.DeepCopy()
......@@ -54,13 +54,14 @@ func UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHead
}
//Set status Uploading to lock resource
origin, err = SetS2iBinaryStatus(origin, v1alpha1.StatusUploading)
uploading, err := SetS2iBinaryStatus(copy, v1alpha1.StatusUploading)
if err != nil {
err := restful.NewError(http.StatusConflict, fmt.Sprintf("could not set status: %+v", err))
glog.Error(err)
klog.Error(err)
return nil, err
}
copy = origin.DeepCopy()
copy = uploading.DeepCopy()
copy.Spec.MD5 = md5
copy.Spec.Size = bytefmt.ByteSize(uint64(fileHeader.Size))
copy.Spec.FileName = fileHeader.Filename
......@@ -68,7 +69,12 @@ func UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHead
s3session := s2is3.Session()
if s3session == nil {
err := fmt.Errorf("could not connect to s2i s3")
glog.Error(err)
klog.Error(err)
_, serr := SetS2iBinaryStatusWithRetry(copy, origin.Status.Phase)
if serr != nil {
klog.Error(serr)
return nil, err
}
return nil, err
}
uploader := s3manager.NewUploader(s3session, func(uploader *s3manager.Uploader) {
......@@ -87,22 +93,22 @@ func UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHead
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchBucket:
glog.Error(err)
_, serr := SetS2iBinaryStatusWithRetry(origin, origin.Status.Phase)
klog.Error(err)
_, serr := SetS2iBinaryStatusWithRetry(copy, origin.Status.Phase)
if serr != nil {
glog.Error(serr)
klog.Error(serr)
}
return nil, err
default:
glog.Error(err)
_, serr := SetS2iBinaryStatusWithRetry(origin, v1alpha1.StatusUnableToDownload)
klog.Error(err)
_, serr := SetS2iBinaryStatusWithRetry(copy, v1alpha1.StatusUploadFailed)
if serr != nil {
glog.Error(serr)
klog.Error(serr)
}
return nil, err
}
}
glog.Error(err)
klog.Error(err)
return nil, err
}
......@@ -110,40 +116,40 @@ func UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHead
copy.Spec.UploadTimeStamp = new(metav1.Time)
}
*copy.Spec.UploadTimeStamp = metav1.Now()
resp, err := k8s.KsClient().DevopsV1alpha1().S2iBinaries(namespace).Update(copy)
copy, err = k8s.KsClient().DevopsV1alpha1().S2iBinaries(namespace).Update(copy)
if err != nil {
glog.Error(err)
klog.Error(err)
return nil, err
}
resp, err = SetS2iBinaryStatusWithRetry(resp, v1alpha1.StatusReady)
copy, err = SetS2iBinaryStatusWithRetry(copy, v1alpha1.StatusReady)
if err != nil {
glog.Error(err)
klog.Error(err)
return nil, err
}
return resp, nil
return copy, nil
}
func DownloadS2iBinary(namespace, name, fileName string) (string, error) {
origin, err := informers.KsSharedInformerFactory().Devops().V1alpha1().S2iBinaries().Lister().S2iBinaries(namespace).Get(name)
if err != nil {
glog.Errorf("%+v", err)
klog.Errorf("%+v", err)
return "", err
}
if origin.Spec.FileName != fileName {
err := fmt.Errorf("could not fould file %s", fileName)
glog.Error(err)
klog.Error(err)
return "", err
}
if origin.Status.Phase != v1alpha1.StatusReady {
err := restful.NewError(http.StatusBadRequest, "file is not ready, please try later")
glog.Error(err)
klog.Error(err)
return "", err
}
s3Client := s2is3.Client()
if s3Client == nil {
err := fmt.Errorf("could not get s3 client")
glog.Error(err)
klog.Error(err)
return "", err
}
req, _ := s3Client.GetObjectRequest(&s3.GetObjectInput{
......@@ -153,7 +159,7 @@ func DownloadS2iBinary(namespace, name, fileName string) (string, error) {
})
url, err := req.Presign(5 * time.Minute)
if err != nil {
glog.Error(err)
klog.Error(err)
return "", err
}
return url, nil
......@@ -165,7 +171,7 @@ func SetS2iBinaryStatus(s2ibin *v1alpha1.S2iBinary, status string) (*v1alpha1.S2
copy.Status.Phase = status
copy, err := k8s.KsClient().DevopsV1alpha1().S2iBinaries(s2ibin.Namespace).Update(copy)
if err != nil {
glog.Error(err)
klog.Error(err)
return nil, err
}
return copy, nil
......@@ -178,19 +184,19 @@ func SetS2iBinaryStatusWithRetry(s2ibin *v1alpha1.S2iBinary, status string) (*v1
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
bin, err = informers.KsSharedInformerFactory().Devops().V1alpha1().S2iBinaries().Lister().S2iBinaries(s2ibin.Namespace).Get(s2ibin.Name)
if err != nil {
glog.Error(err)
klog.Error(err)
return err
}
bin.Status.Phase = status
bin, err = k8s.KsClient().DevopsV1alpha1().S2iBinaries(s2ibin.Namespace).Update(bin)
if err != nil {
glog.Error(err)
klog.Error(err)
return err
}
return nil
})
if err != nil {
glog.Error(err)
klog.Error(err)
return nil, err
}
......
......@@ -6,7 +6,8 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/golang/glog"
"k8s.io/klog"
"sync"
)
var (
......@@ -19,8 +20,12 @@ var (
s3SessionToken string
s3Bucket string
)
var s2iS3 *s3.S3
var s2iS3Session *session.Session
var (
s2iS3 *s3.S3
s2iS3Session *session.Session
sessionInitMutex sync.Mutex
clientInitMutex sync.Mutex
)
func init() {
flag.StringVar(&s3Region, "s2i-s3-region", "us-east-1", "region of s2i s3")
......@@ -37,6 +42,23 @@ func Client() *s3.S3 {
if s2iS3 != nil {
return s2iS3
}
clientInitMutex.Lock()
defer clientInitMutex.Unlock()
if s2iS3Session == nil {
if sess := Session(); sess != nil {
klog.Error("failed to connect to s2i s3")
return nil
}
}
s2iS3 = s3.New(s2iS3Session)
return s2iS3
}
func Session() *session.Session {
if s2iS3Session != nil {
return s2iS3Session
}
sessionInitMutex.Lock()
defer sessionInitMutex.Unlock()
creds := credentials.NewStaticCredentials(
s3AccessKeyID, s3SecretAccessKey, s3SessionToken,
)
......@@ -49,18 +71,10 @@ func Client() *s3.S3 {
}
sess, err := session.NewSession(config)
if err != nil {
glog.Errorf("failed to connect to s2i s3: %+v", err)
klog.Errorf("failed to connect to s2i s3: %+v", err)
return nil
}
s2iS3Session = sess
s2iS3 = s3.New(sess)
return s2iS3
}
func Session() *session.Session {
if s2iS3Session != nil {
return s2iS3Session
}
Client()
return s2iS3Session
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册