未验证 提交 ff2523e5 编写于 作者: K KubeSphere CI Bot 提交者: GitHub

Merge pull request #1984 from huanggze/dev-monitoring

monitor: add tests
......@@ -103,8 +103,13 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
informerFactory := informers.NewInformerFactories(kubernetesClient.Kubernetes(), kubernetesClient.KubeSphere(), kubernetesClient.Istio(), kubernetesClient.Application())
apiServer.InformerFactory = informerFactory
monitoringClient := prometheus.NewPrometheus(s.MonitoringOptions)
apiServer.MonitoringClient = monitoringClient
if s.MonitoringOptions.Endpoint != "" {
monitoringClient, err := prometheus.NewPrometheus(s.MonitoringOptions)
if err != nil {
return nil, err
}
apiServer.MonitoringClient = monitoringClient
}
if s.LoggingOptions.Host != "" {
loggingClient, err := esclient.NewElasticsearch(s.LoggingOptions)
......
......@@ -68,6 +68,7 @@ require (
github.com/openshift/api v0.0.0-20180801171038-322a19404e37 // indirect
github.com/pkg/errors v0.8.1
github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce
github.com/prometheus/client_golang v0.9.3
github.com/prometheus/common v0.4.0
github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009
github.com/speps/go-hashids v2.0.0+incompatible
......@@ -77,7 +78,7 @@ require (
github.com/stretchr/testify v1.4.0
github.com/xanzy/ssh-agent v0.2.1 // indirect
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
google.golang.org/grpc v1.23.1
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/go-playground/validator.v9 v9.29.1 // indirect
......@@ -86,12 +87,12 @@ require (
gopkg.in/yaml.v2 v2.2.4
istio.io/api v0.0.0-20191111210003-35e06ef8d838
istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2
k8s.io/api v0.18.0
k8s.io/api v0.0.0-20191114100352-16d7abae0d2a
k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833
k8s.io/apimachinery v0.18.0
k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb
k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682
k8s.io/client-go v0.0.0-20191114101535-6c5935290e33
k8s.io/code-generator v0.18.0
k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894
k8s.io/component-base v0.0.0-20191114102325-35a9586014f7
k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e // indirect
k8s.io/klog v1.0.0
......
......@@ -28,7 +28,7 @@ import (
configv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/config/v1alpha2"
iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2"
monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha2"
monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3"
"kubesphere.io/kubesphere/pkg/kapis/oauth"
openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1"
operationsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/operations/v1alpha2"
......@@ -138,7 +138,7 @@ func (s *APIServer) installKubeSphereAPIs() {
// Need to refactor devops api registration, too much dependencies
//urlruntime.Must(devopsv1alpha2.AddToContainer(s.container, s.DevopsClient, s.DBClient.Database(), nil, s.KubernetesClient.KubeSphere(), s.InformerFactory.KubeSphereSharedInformerFactory(), s.S3Client))
urlruntime.Must(loggingv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.LoggingClient))
urlruntime.Must(monitoringv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.MonitoringClient))
urlruntime.Must(monitoringv1alpha3.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.MonitoringClient))
urlruntime.Must(openpitrixv1.AddToContainer(s.container, s.InformerFactory, s.OpenpitrixClient))
urlruntime.Must(operationsv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes()))
urlruntime.Must(resourcesv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.InformerFactory))
......
package v1alpha2
import (
"fmt"
"github.com/emicklei/go-restful"
"github.com/pkg/errors"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"strconv"
"time"
)
const (
DefaultStep = 10 * time.Minute
DefaultFilter = ".*"
DefaultOrder = model.OrderDescending
DefaultPage = 1
DefaultLimit = 5
)
type params struct {
time time.Time
start, end time.Time
step time.Duration
target string
identifier string
order string
page int
limit int
option monitoring.QueryOption
}
func (p params) isRangeQuery() bool {
return !p.time.IsZero()
}
func (p params) shouldSort() bool {
return p.target != ""
}
func (h handler) parseRequestParams(req *restful.Request, lvl monitoring.MonitoringLevel) (params, error) {
timestamp := req.QueryParameter("time")
start := req.QueryParameter("start")
end := req.QueryParameter("end")
step := req.QueryParameter("step")
target := req.QueryParameter("sort_metric")
order := req.QueryParameter("sort_type")
page := req.QueryParameter("page")
limit := req.QueryParameter("limit")
metricFilter := req.QueryParameter("metrics_filter")
resourceFilter := req.QueryParameter("resources_filter")
nodeName := req.PathParameter("node")
workspaceName := req.PathParameter("workspace")
namespaceName := req.PathParameter("namespace")
workloadKind := req.PathParameter("kind")
workloadName := req.PathParameter("workload")
podName := req.PathParameter("pod")
containerName := req.PathParameter("container")
pvcName := req.PathParameter("pvc")
storageClassName := req.PathParameter("storageclass")
componentType := req.PathParameter("component")
var p params
var err error
if start != "" && end != "" {
p.start, err = time.Parse(time.RFC3339, start)
if err != nil {
return p, err
}
p.end, err = time.Parse(time.RFC3339, end)
if err != nil {
return p, err
}
if step == "" {
p.step = DefaultStep
} else {
p.step, err = time.ParseDuration(step)
if err != nil {
return p, err
}
}
} else if start == "" && end == "" {
if timestamp == "" {
p.time = time.Now()
} else {
p.time, err = time.Parse(time.RFC3339, req.QueryParameter("time"))
if err != nil {
return p, err
}
}
} else {
return p, errors.Errorf("'time' and the combination of 'start' and 'end' are mutually exclusive.")
}
// hide metrics from a deleted namespace having the same name
namespace := req.QueryParameter("namespace")
if req.QueryParameter("namespace") != "" {
ns, err := h.k.Kubernetes().CoreV1().Namespaces().Get(namespace, corev1.GetOptions{})
if err != nil {
return p, err
}
cts := ns.CreationTimestamp.Time
if p.start.Before(cts) {
p.start = cts
}
if p.end.Before(cts) {
return p, errors.Errorf("End timestamp must not be before namespace creation time.")
}
}
if resourceFilter == "" {
resourceFilter = DefaultFilter
}
if metricFilter == "" {
metricFilter = DefaultFilter
}
if componentType != "" {
metricFilter = fmt.Sprintf("/^(?=.*%s)(?=.*%s)/s", componentType, metricFilter)
}
// should sort
if target != "" {
p.page = DefaultPage
p.limit = DefaultLimit
if order != model.OrderAscending {
p.order = DefaultOrder
}
if page != "" {
p.page, err = strconv.Atoi(req.QueryParameter("page"))
if err != nil || p.page <= 0 {
return p, errors.Errorf("Invalid parameter 'page'.")
}
}
if limit != "" {
p.limit, err = strconv.Atoi(req.QueryParameter("limit"))
if err != nil || p.limit <= 0 {
return p, errors.Errorf("Invalid parameter 'limit'.")
}
}
}
switch lvl {
case monitoring.LevelCluster:
p.option = monitoring.ClusterOption{MetricFilter: metricFilter}
case monitoring.LevelNode:
p.identifier = model.IdentifierNode
p.option = monitoring.NodeOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NodeName: nodeName,
}
case monitoring.LevelWorkspace:
p.identifier = model.IdentifierWorkspace
p.option = monitoring.WorkspaceOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
WorkspaceName: workspaceName,
}
case monitoring.LevelNamespace:
p.identifier = model.IdentifierNamespace
p.option = monitoring.NamespaceOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
WorkspaceName: workspaceName,
NamespaceName: namespaceName,
}
case monitoring.LevelWorkload:
p.identifier = model.IdentifierWorkload
p.option = monitoring.WorkloadOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
WorkloadKind: workloadKind,
WorkloadName: workloadName,
}
case monitoring.LevelPod:
p.identifier = model.IdentifierPod
p.option = monitoring.PodOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NodeName: nodeName,
NamespaceName: namespaceName,
WorkloadKind: workloadKind,
WorkloadName: workloadName,
PodName: podName,
}
case monitoring.LevelContainer:
p.identifier = model.IdentifierContainer
p.option = monitoring.ContainerOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
PodName: podName,
ContainerName: containerName,
}
case monitoring.LevelPVC:
p.identifier = model.IdentifierPVC
p.option = monitoring.PVCOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
StorageClassName: storageClassName,
PersistentVolumeClaimName: pvcName,
}
case monitoring.LevelComponent:
p.option = monitoring.ComponentOption{
MetricFilter: metricFilter,
}
}
return p, nil
}
......@@ -16,130 +16,179 @@
*/
package v1alpha2
package v1alpha3
import (
"github.com/emicklei/go-restful"
"k8s.io/client-go/kubernetes"
"kubesphere.io/kubesphere/pkg/api"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"regexp"
)
type handler struct {
k k8s.Client
k kubernetes.Interface
mo model.MonitoringOperator
}
func newHandler(k k8s.Client, m monitoring.Interface) *handler {
func newHandler(k kubernetes.Interface, m monitoring.Interface) *handler {
return &handler{k, model.NewMonitoringOperator(m)}
}
func (h handler) handleClusterMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelCluster)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelCluster)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleNodeMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelNode)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelNode)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleWorkspaceMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelWorkspace)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelWorkspace)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleNamespaceMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelNamespace)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelNamespace)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleWorkloadMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelWorkload)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelWorkload)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handlePodMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelPod)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelPod)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleContainerMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelContainer)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelContainer)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handlePVCMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelPVC)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelPVC)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleComponentMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelComponent)
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelComponent)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleNamedMetricsQuery(resp *restful.Response, p params) {
var res v1alpha2.APIResponse
var err error
func handleNoHit(namedMetrics []string) model.Metrics {
var res model.Metrics
for _, metic := range namedMetrics {
res.Results = append(res.Results, monitoring.Metric{
MetricName: metic,
MetricData: monitoring.MetricData{},
})
}
return res
}
if p.isRangeQuery() {
res, err = h.mo.GetNamedMetricsOverTime(p.start, p.end, p.step, p.option)
if err != nil {
api.HandleInternalError(resp, nil, err)
return
}
} else {
res, err = h.mo.GetNamedMetrics(p.time, p.option)
if err != nil {
api.HandleInternalError(resp, nil, err)
return
}
func (h handler) handleNamedMetricsQuery(resp *restful.Response, q queryOptions) {
var res model.Metrics
if p.shouldSort() {
var rows int
res, rows = h.mo.SortMetrics(res, p.target, p.order, p.identifier)
res = h.mo.PageMetrics(res, p.page, p.limit, rows)
var metrics []string
for _, metric := range q.namedMetrics {
ok, _ := regexp.MatchString(q.metricFilter, metric)
if ok {
metrics = append(metrics, metric)
}
}
if len(metrics) == 0 {
resp.WriteAsJson(res)
return
}
if q.isRangeQuery() {
res = h.mo.GetNamedMetricsOverTime(metrics, q.start, q.end, q.step, q.option)
} else {
res = h.mo.GetNamedMetrics(metrics, q.time, q.option)
if q.shouldSort() {
res = *res.Sort(q.target, q.order, q.identifier).Page(q.page, q.limit)
}
}
resp.WriteAsJson(res)
}
package v1alpha3
import (
"github.com/emicklei/go-restful"
"github.com/pkg/errors"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"strconv"
"time"
)
const (
DefaultStep = 10 * time.Minute
DefaultFilter = ".*"
DefaultOrder = model.OrderDescending
DefaultPage = 1
DefaultLimit = 5
ComponentEtcd = "etcd"
ComponentAPIServer = "apiserver"
ComponentScheduler = "scheduler"
ErrNoHit = "'end' must be after the namespace creation time."
ErrParamConflict = "'time' and the combination of 'start' and 'end' are mutually exclusive."
ErrInvalidStartEnd = "'start' must be before 'end'."
ErrInvalidPage = "Invalid parameter 'page'."
ErrInvalidLimit = "Invalid parameter 'limit'."
)
type reqParams struct {
time string
start string
end string
step string
target string
order string
page string
limit string
metricFilter string
resourceFilter string
nodeName string
workspaceName string
namespaceName string
workloadKind string
workloadName string
podName string
containerName string
pvcName string
storageClassName string
componentType string
}
type queryOptions struct {
metricFilter string
namedMetrics []string
start time.Time
end time.Time
time time.Time
step time.Duration
target string
identifier string
order string
page int
limit int
option monitoring.QueryOption
}
func (q queryOptions) isRangeQuery() bool {
return !q.time.IsZero()
}
func (q queryOptions) shouldSort() bool {
return q.target != "" && q.identifier != ""
}
func parseRequestParams(req *restful.Request) reqParams {
var r reqParams
r.time = req.QueryParameter("time")
r.start = req.QueryParameter("start")
r.end = req.QueryParameter("end")
r.step = req.QueryParameter("step")
r.target = req.QueryParameter("sort_metric")
r.order = req.QueryParameter("sort_type")
r.page = req.QueryParameter("page")
r.limit = req.QueryParameter("limit")
r.metricFilter = req.QueryParameter("metrics_filter")
r.resourceFilter = req.QueryParameter("resources_filter")
r.nodeName = req.PathParameter("node")
r.workspaceName = req.PathParameter("workspace")
r.namespaceName = req.PathParameter("namespace")
r.workloadKind = req.PathParameter("kind")
r.workloadName = req.PathParameter("workload")
r.podName = req.PathParameter("pod")
r.containerName = req.PathParameter("container")
r.pvcName = req.PathParameter("pvc")
r.storageClassName = req.PathParameter("storageclass")
r.componentType = req.PathParameter("component")
return r
}
func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOptions, err error) {
if r.resourceFilter == "" {
r.resourceFilter = DefaultFilter
}
q.metricFilter = r.metricFilter
if r.metricFilter == "" {
q.metricFilter = DefaultFilter
}
switch lvl {
case monitoring.LevelCluster:
q.option = monitoring.ClusterOption{}
q.namedMetrics = model.ClusterMetrics
case monitoring.LevelNode:
q.identifier = model.IdentifierNode
q.namedMetrics = model.NodeMetrics
q.option = monitoring.NodeOption{
ResourceFilter: r.resourceFilter,
NodeName: r.nodeName,
}
case monitoring.LevelWorkspace:
q.identifier = model.IdentifierWorkspace
q.namedMetrics = model.WorkspaceMetrics
q.option = monitoring.WorkspaceOption{
ResourceFilter: r.resourceFilter,
WorkspaceName: r.workspaceName,
}
case monitoring.LevelNamespace:
q.identifier = model.IdentifierNamespace
q.namedMetrics = model.NamespaceMetrics
q.option = monitoring.NamespaceOption{
ResourceFilter: r.resourceFilter,
WorkspaceName: r.workspaceName,
NamespaceName: r.namespaceName,
}
case monitoring.LevelWorkload:
q.identifier = model.IdentifierWorkload
q.namedMetrics = model.WorkloadMetrics
q.option = monitoring.WorkloadOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
WorkloadKind: r.workloadKind,
}
case monitoring.LevelPod:
q.identifier = model.IdentifierPod
q.namedMetrics = model.PodMetrics
q.option = monitoring.PodOption{
ResourceFilter: r.resourceFilter,
NodeName: r.nodeName,
NamespaceName: r.namespaceName,
WorkloadKind: r.workloadKind,
WorkloadName: r.workloadName,
PodName: r.podName,
}
case monitoring.LevelContainer:
q.identifier = model.IdentifierContainer
q.namedMetrics = model.ContainerMetrics
q.option = monitoring.ContainerOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
PodName: r.podName,
ContainerName: r.containerName,
}
case monitoring.LevelPVC:
q.identifier = model.IdentifierPVC
q.namedMetrics = model.PVCMetrics
q.option = monitoring.PVCOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
StorageClassName: r.storageClassName,
PersistentVolumeClaimName: r.pvcName,
}
case monitoring.LevelComponent:
q.option = monitoring.ComponentOption{}
switch r.componentType {
case ComponentEtcd:
q.namedMetrics = model.EtcdMetrics
case ComponentAPIServer:
q.namedMetrics = model.APIServerMetrics
case ComponentScheduler:
q.namedMetrics = model.SchedulerMetrics
}
}
// Parse time params
if r.start != "" && r.end != "" {
startInt, err := strconv.ParseInt(r.start, 10, 64)
if err != nil {
return q, err
}
q.start = time.Unix(startInt, 0)
endInt, err := strconv.ParseInt(r.end, 10, 64)
if err != nil {
return q, err
}
q.end = time.Unix(endInt, 0)
if r.step == "" {
q.step = DefaultStep
} else {
q.step, err = time.ParseDuration(r.step)
if err != nil {
return q, err
}
}
if q.start.After(q.end) {
return q, errors.New(ErrInvalidStartEnd)
}
} else if r.start == "" && r.end == "" {
if r.time == "" {
q.time = time.Now()
} else {
timeInt, err := strconv.ParseInt(r.time, 10, 64)
if err != nil {
return q, err
}
q.time = time.Unix(timeInt, 0)
}
} else {
return q, errors.Errorf(ErrParamConflict)
}
// Ensure query start time to be after the namespace creation time
if r.namespaceName != "" {
ns, err := h.k.CoreV1().Namespaces().Get(r.namespaceName, corev1.GetOptions{})
if err != nil {
return q, err
}
cts := ns.CreationTimestamp.Time
if q.start.Before(cts) {
q.start = cts
}
if q.end.Before(cts) {
return q, errors.New(ErrNoHit)
}
}
// Parse sorting and paging params
if r.target != "" {
q.page = DefaultPage
q.limit = DefaultLimit
if q.order != model.OrderAscending {
r.order = DefaultOrder
}
if r.page != "" {
q.page, err = strconv.Atoi(r.page)
if err != nil || q.page <= 0 {
return q, errors.New(ErrInvalidPage)
}
}
if r.limit != "" {
q.limit, err = strconv.Atoi(r.limit)
if err != nil || q.limit <= 0 {
return q, errors.New(ErrInvalidLimit)
}
}
}
return q, nil
}
package v1alpha3
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"reflect"
"testing"
"time"
)
func TestParseRequestParams(t *testing.T) {
tests := []struct {
params reqParams
lvl monitoring.Level
namespace corev1.Namespace
expected queryOptions
expectedErr bool
}{
{
params: reqParams{
time: "abcdef",
},
lvl: monitoring.LevelCluster,
expectedErr: true,
},
{
params: reqParams{
time: "1585831995",
},
lvl: monitoring.LevelCluster,
expected: queryOptions{
time: time.Unix(1585831995, 0),
metricFilter: ".*",
namedMetrics: model.ClusterMetrics,
option: monitoring.ClusterOption{},
},
expectedErr: false,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "default",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1585836666, 0),
},
},
},
expected: queryOptions{
start: time.Unix(1585836666, 0),
end: time.Unix(1585839999, 0),
step: time.Minute,
identifier: model.IdentifierNamespace,
metricFilter: ".*",
namedMetrics: model.NamespaceMetrics,
option: monitoring.NamespaceOption{
ResourceFilter: ".*",
NamespaceName: "default",
},
},
expectedErr: false,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "default",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1589999999, 0),
},
},
},
expectedErr: true,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "non-exist",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1589999999, 0),
},
},
},
expectedErr: true,
},
{
params: reqParams{
time: "1585830000",
componentType: "etcd",
metricFilter: "etcd_server_list",
},
lvl: monitoring.LevelComponent,
expected: queryOptions{
time: time.Unix(1585830000, 0),
metricFilter: "etcd_server_list",
namedMetrics: model.EtcdMetrics,
option: monitoring.ComponentOption{},
},
expectedErr: false,
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
client := fake.NewSimpleClientset(&tt.namespace)
handler := newHandler(client, nil)
result, err := handler.makeQueryOptions(tt.params, tt.lvl)
if err != nil {
if !tt.expectedErr {
t.Fatalf("unexpected err: %s.", err.Error())
}
return
}
if tt.expectedErr {
t.Fatalf("failed to catch error.")
}
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected return: %v.", result)
}
})
}
}
......@@ -19,19 +19,15 @@
package monitoring
import (
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"time"
)
type MonitoringOperator interface {
GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error)
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error)
GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error)
GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error)
SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int)
PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse
GetMetrics(stmts []string, time time.Time) Metrics
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics
GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics
}
type monitoringOperator struct {
......@@ -43,27 +39,21 @@ func NewMonitoringOperator(client monitoring.Interface) MonitoringOperator {
}
// TODO(huanggze): reserve for custom monitoring
func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error) {
func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) Metrics {
panic("implement me")
}
// TODO(huanggze): reserve for custom monitoring
func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error) {
func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics {
panic("implement me")
}
func (mo monitoringOperator) GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) {
metrics, err := mo.c.GetNamedMetrics(time, opt)
if err != nil {
klog.Error(err)
}
return v1alpha2.APIResponse{Results: metrics}, err
func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetrics(metrics, time, opt)
return Metrics{Results: ress}
}
func (mo monitoringOperator) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) {
metrics, err := mo.c.GetNamedMetricsOverTime(start, end, step, opt)
if err != nil {
klog.Error(err)
}
return v1alpha2.APIResponse{Results: metrics}, err
func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetricsOverTime(metrics, start, end, step, opt)
return Metrics{Results: ress}
}
package monitoring
type MonitoringLevel int
const (
LevelCluster = MonitoringLevel(1) << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
var ClusterMetrics = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
......@@ -161,7 +147,6 @@ var WorkloadMetrics = []string{
"workload_memory_usage_wo_cache",
"workload_net_bytes_transmitted",
"workload_net_bytes_received",
"workload_deployment_replica",
"workload_deployment_replica_available",
"workload_statefulset_replica",
......@@ -198,7 +183,7 @@ var PVCMetrics = []string{
"pvc_bytes_utilisation",
}
var ComponentMetrics = []string{
var EtcdMetrics = []string{
"etcd_server_list",
"etcd_server_total",
"etcd_server_up_total",
......@@ -219,34 +204,20 @@ var ComponentMetrics = []string{
"etcd_disk_wal_fsync_duration_quantile",
"etcd_disk_backend_commit_duration",
"etcd_disk_backend_commit_duration_quantile",
}
var APIServerMetrics = []string{
"apiserver_up_sum",
"apiserver_request_rate",
"apiserver_request_by_verb_rate",
"apiserver_request_latencies",
"apiserver_request_by_verb_latencies",
}
var SchedulerMetrics = []string{
"scheduler_up_sum",
"scheduler_schedule_attempts",
"scheduler_schedule_attempt_rate",
"scheduler_e2e_scheduling_latency",
"scheduler_e2e_scheduling_latency_quantile",
"controller_manager_up_sum",
"coredns_up_sum",
"coredns_cache_hits",
"coredns_cache_misses",
"coredns_dns_request_rate",
"coredns_dns_request_duration",
"coredns_dns_request_duration_quantile",
"coredns_dns_request_by_type_rate",
"coredns_dns_request_by_rcode_rate",
"coredns_panic_rate",
"coredns_proxy_request_rate",
"coredns_proxy_request_duration",
"coredns_proxy_request_duration_quantile",
"prometheus_up_sum",
"prometheus_tsdb_head_samples_appended_rate",
}
......@@ -19,7 +19,6 @@
package monitoring
import (
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"math"
"sort"
......@@ -41,7 +40,7 @@ const (
type wrapper struct {
monitoring.MetricData
by func(p, q *monitoring.MetricValue) bool
identifier, order string
}
func (w wrapper) Len() int {
......@@ -49,156 +48,142 @@ func (w wrapper) Len() int {
}
func (w wrapper) Less(i, j int) bool {
return w.by(&w.MetricValues[i], &w.MetricValues[j])
p := w.MetricValues[i]
q := w.MetricValues[j]
if p.Sample.Value() == q.Sample.Value() {
return p.Metadata[w.identifier] < q.Metadata[w.identifier]
}
switch w.order {
case OrderAscending:
return p.Sample.Value() < q.Sample.Value()
default:
return p.Sample.Value() > q.Sample.Value()
}
}
func (w wrapper) Swap(i, j int) {
w.MetricValues[i], w.MetricValues[j] = w.MetricValues[j], w.MetricValues[i]
func (id wrapper) Swap(i, j int) {
id.MetricValues[i], id.MetricValues[j] = id.MetricValues[j], id.MetricValues[i]
}
// The sortMetrics sorts a group of resources by a given metric
// SortMetrics sorts a group of resources by a given metric. Range query doesn't support ranking.
// Example:
//
// before sorting
// |------| Metric 1 | Metric 2 | Metric 3 |
// | ID a | 1 | XL | |
// | ID b | 1 | S | |
// | ID c | 3 | M | |
// Before sorting:
// | ID | Metric 1 | Metric 2 | Metric 3 |
// | a | 1 | XL | |
// | b | 1 | S | |
// | c | 3 | M | |
//
// sort by metrics_2
// |------| Metric 1 | Metric 2 (asc) | Metric 3 |
// | ID a | 1 | XL | |
// | ID c | 3 | M | |
// | ID b | 1 | S | |
//
// ranking can only be applied to instant query results, not range query
func (mo monitoringOperator) SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int) {
if target == "" || len(raw.Results) == 0 {
return raw, -1
}
if order == "" {
order = OrderDescending
// After sorting: target=metric_2, order=asc, identifier=id
// | ID | Metric 1 | Metric 2 (asc) | Metric 3 |
// | a | 1 | XL | |
// | c | 3 | M | |
// | b | 1 | S | |
func (raw *Metrics) Sort(target, order, identifier string) *Metrics {
if target == "" || identifier == "" || len(raw.Results) == 0 {
return raw
}
var currentResourceMap = make(map[string]int)
// resource-ordinal map
var indexMap = make(map[string]int)
i := 0
resourceSet := make(map[string]bool) // resource set records possible values of the identifier
resourceOrdinal := make(map[string]int) // resource-ordinal map
ordinal := 0
for _, item := range raw.Results {
if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess {
if item.MetricName == target {
if order == OrderAscending {
sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool {
if p.Sample[1] == q.Sample[1] {
return p.Metadata[identifier] < q.Metadata[identifier]
}
return p.Sample[1] < q.Sample[1]
}})
} else {
sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool {
if p.Sample[1] == q.Sample[1] {
return p.Metadata[identifier] > q.Metadata[identifier]
}
return p.Sample[1] > q.Sample[1]
}})
}
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
for _, r := range item.MetricValues {
// record the ordinal of resource to indexMap
resourceName, exist := r.Metadata[identifier]
if exist {
if _, exist := indexMap[resourceName]; !exist {
indexMap[resourceName] = i
i = i + 1
}
}
if item.MetricName == target {
sort.Sort(wrapper{
MetricData: item.MetricData,
identifier: identifier,
order: order,
})
for _, mv := range item.MetricValues {
// Record ordinals in the final result
v, ok := mv.Metadata[identifier]
if ok && v != "" {
resourceOrdinal[v] = ordinal
ordinal++
}
}
}
// get total number of rows
for _, r := range item.MetricValues {
k, ok := r.Metadata[identifier]
if ok {
currentResourceMap[k] = 1
}
// Add every unique identifier value to the set
for _, mv := range item.MetricValues {
v, ok := mv.Metadata[identifier]
if ok && v != "" {
resourceSet[v] = true
}
}
}
var keys []string
for k := range currentResourceMap {
keys = append(keys, k)
var resourceList []string
for k := range resourceSet {
resourceList = append(resourceList, k)
}
sort.Strings(keys)
sort.Strings(resourceList)
for _, resource := range keys {
if _, exist := indexMap[resource]; !exist {
indexMap[resource] = i
i = i + 1
// Fill resource-ordinal map with resources never present in the target, and give them ordinals.
for _, r := range resourceList {
if _, ok := resourceOrdinal[r]; !ok {
resourceOrdinal[r] = ordinal
ordinal++
}
}
// sort other metrics
for i := 0; i < len(raw.Results); i++ {
item := raw.Results[i]
if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess {
sortedMetric := make([]monitoring.MetricValue, len(indexMap))
for j := 0; j < len(item.MetricValues); j++ {
r := item.MetricValues[j]
k, exist := r.Metadata[identifier]
if exist {
index, exist := indexMap[k]
if exist {
sortedMetric[index] = r
}
}
}
// Sort metrics
for i, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
raw.Results[i].MetricValues = sortedMetric
sorted := make([]monitoring.MetricValue, len(resourceList))
for _, mv := range item.MetricValues {
v, ok := mv.Metadata[identifier]
if ok && v != "" {
ordinal, _ := resourceOrdinal[v]
sorted[ordinal] = mv
}
}
raw.Results[i].MetricValues = sorted
}
return raw, len(indexMap)
raw.CurrentPage = 1
raw.TotalPages = 1
raw.TotalItems = len(resourceList)
return raw
}
func (mo monitoringOperator) PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse {
if page <= 0 || limit <= 0 || rows <= 0 || len(raw.Results) == 0 {
func (raw *Metrics) Page(page, limit int) *Metrics {
if page < 1 || limit < 1 || len(raw.Results) == 0 {
return raw
}
// matrix type can not be sorted
for _, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector {
return raw
}
}
// the i page: [(page-1) * limit, (page) * limit - 1]
start := (page - 1) * limit
end := (page)*limit - 1
end := page * limit
for i := 0; i < len(raw.Results); i++ {
if raw.Results[i].MetricType != monitoring.MetricTypeVector || raw.Results[i].Status != monitoring.StatusSuccess {
for i, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
resultLen := len(raw.Results[i].MetricValues)
if start >= resultLen {
total := len(item.MetricValues)
if start >= total {
raw.Results[i].MetricValues = nil
continue
}
if end >= resultLen {
end = resultLen - 1
if end >= total {
end = total
}
slice := raw.Results[i].MetricValues[start : end+1]
raw.Results[i].MetricValues = slice
raw.Results[i].MetricValues = item.MetricValues[start:end]
}
raw.CurrentPage = page
raw.TotalPage = int(math.Ceil(float64(rows) / float64(limit)))
raw.TotalItem = rows
raw.TotalPages = int(math.Ceil(float64(raw.TotalItems) / float64(limit)))
return raw
}
package monitoring
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/json-iterator/go"
"io/ioutil"
"testing"
)
func TestSort(t *testing.T) {
tests := []struct {
name string
target string
order string
identifier string
source string
expected string
}{
{"sort in ascending order", "node_cpu_utilisation", "asc", "node", "source-node-metrics.json", "sorted-node-metrics-asc.json"},
{"sort in descending order", "node_memory_utilisation", "desc", "node", "source-node-metrics.json", "sorted-node-metrics-desc.json"},
{"sort faulty metrics", "node_memory_utilisation", "desc", "node", "faulty-node-metrics.json", "faulty-node-metrics-sorted.json"},
{"sort metrics with an blank node", "node_memory_utilisation", "desc", "node", "blank-node-metrics.json", "blank-node-metrics-sorted.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
source, expected, err := jsonFromFile(tt.source, tt.expected)
if err != nil {
t.Fatal(err)
}
result := source.Sort(tt.target, tt.order, tt.identifier)
if diff := cmp.Diff(*result, *expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestPage(t *testing.T) {
tests := []struct {
name string
page int
limit int
source string
expected string
}{
{"page 0 limit 5", 0, 5, "sorted-node-metrics-asc.json", "sorted-node-metrics-asc.json"},
{"page 1 limit 5", 1, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-1.json"},
{"page 2 limit 5", 2, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-2.json"},
{"page 3 limit 5", 3, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-3.json"},
{"page faulty metrics", 1, 2, "faulty-node-metrics-sorted.json", "faulty-node-metrics-paged.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
source, expected, err := jsonFromFile(tt.source, tt.expected)
if err != nil {
t.Fatal(err)
}
result := source.Page(tt.page, tt.limit)
if diff := cmp.Diff(*result, *expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func jsonFromFile(sourceFile, expectedFile string) (*Metrics, *Metrics, error) {
sourceJson := &Metrics{}
expectedJson := &Metrics{}
json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", sourceFile))
if err != nil {
return nil, nil, err
}
err = jsoniter.Unmarshal(json, sourceJson)
if err != nil {
return nil, nil, err
}
json, err = ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
if err != nil {
return nil, nil, err
}
err = jsoniter.Unmarshal(json, expectedJson)
if err != nil {
return nil, nil, err
}
return sourceJson, expectedJson, nil
}
{
"results":[
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":3
}
\ No newline at end of file
{
"results": [
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": ""
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": ""
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
}
]
}
}
]
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"error":"error"
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
}
]
}
}
],
"page":1,
"total_page":2,
"total_item":4
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"error":"error"
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":4
}
\ No newline at end of file
{
"results": [
{
"metric_name": "node_cpu_utilisation",
"error": "error"
},
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
}
]
}
}
]
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
}
]
}
}
],
"page":1,
"total_page":2,
"total_item":8
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
}
]
}
}
],
"page":2,
"total_page":2,
"total_item":8
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector"
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector"
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector"
}
}
],
"page":3,
"total_page":2,
"total_item":8
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":8
}
\ No newline at end of file
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":8
}
\ No newline at end of file
{
"results": [
{
"metric_name": "node_cpu_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.18095833333306172
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.03250000000007276
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.07443750000044626
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.05066666666655995
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.193,
0.05210416666595847
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.193,
0.07756249999996119
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.193,
0.021645833333483702
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.193,
0.06745833333334303
]
}
]
}
},
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.193,
0.21351118996831508
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.193,
0.4329682466178235
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.193,
0.3335848564534758
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.193,
0.35981263055856705
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.195,
0.21291125105270192
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.195,
0.823247832787681
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.195,
0.12824588180084573
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.195,
0.40309723127991315
]
}
]
}
}
]
}
\ No newline at end of file
package v1alpha2
package monitoring
import "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
type APIResponse struct {
type Metrics struct {
Results []monitoring.Metric `json:"results" description:"actual array of results"`
CurrentPage int `json:"page,omitempty" description:"current page returned"`
TotalPage int `json:"total_page,omitempty" description:"total number of pages"`
TotalItem int `json:"total_item,omitempty" description:"page size"`
TotalPages int `json:"total_page,omitempty" description:"total number of pages"`
TotalItems int `json:"total_item,omitempty" description:"page size"`
}
......@@ -170,7 +170,9 @@ func (d *FakeDevops) GetCredentialInProject(projectId, id string, content bool)
func (d *FakeDevops) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) {
return nil, nil
}
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) { return nil, nil }
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) {
return nil, nil
}
// BuildGetter
func (d *FakeDevops) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) {
......
......@@ -2,40 +2,9 @@ package monitoring
import "time"
const (
StatusSuccess = "success"
StatusError = "error"
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
Status string `json:"status" description:"result status, one of error, success"`
MetricData `json:"data" description:"actual metric result"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
type Interface interface {
// The `stmts` defines statements, expressions or rules (eg. promql in Prometheus) for querying specific metrics.
GetMetrics(stmts []string, time time.Time) ([]Metric, error)
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]Metric, error)
// Get named metrics (eg. node_cpu_usage)
GetNamedMetrics(time time.Time, opt QueryOption) ([]Metric, error)
GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt QueryOption) ([]Metric, error)
GetMetrics(exprs []string, time time.Time) []Metric
GetMetricsOverTime(exprs []string, start, end time.Time, step time.Duration) []Metric
GetNamedMetrics(metrics []string, time time.Time, opt QueryOption) []Metric
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt QueryOption) []Metric
}
package prometheus
import (
"fmt"
"github.com/json-iterator/go"
"io/ioutil"
"context"
"github.com/prometheus/client_golang/api"
apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/url"
"regexp"
"sync"
"time"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// prometheus implements monitoring interface backed by Prometheus
type prometheus struct {
options *Options
client *http.Client
client apiv1.API
}
func NewPrometheus(options *Options) monitoring.Interface {
return &prometheus{
options: options,
client: &http.Client{Timeout: 10 * time.Second},
func NewPrometheus(options *Options) (monitoring.Interface, error) {
cfg := api.Config{
Address: options.Endpoint,
}
client, err := api.NewClient(cfg)
return prometheus{client: apiv1.NewAPI(client)}, err
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetrics(stmts []string, time time.Time) ([]monitoring.Metric, error) {
func (p prometheus) GetMetrics(stmts []string, time time.Time) []monitoring.Metric {
panic("implement me")
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]monitoring.Metric, error) {
func (p prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) []monitoring.Metric {
panic("implement me")
}
func (p *prometheus) GetNamedMetrics(ts time.Time, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetrics(metrics []string, ts time.Time, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.query(exp, ts)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.Query(context.Background(), makeExpr(metric, *opts), ts)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryResp(value)
}
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
}
return res
}
func (p *prometheus) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.rangeQuery(exp, start, end, step)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
timeRange := apiv1.Range{
Start: start,
End: end,
Step: step,
}
wg.Wait()
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.QueryRange(context.Background(), makeExpr(metric, *opts), timeRange)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryRangeResp(value)
}
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
return res
}
func (p prometheus) query(exp string, ts time.Time) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("time", ts.Format(time.RFC3339))
params.Set("query", exp)
func parseQueryRangeResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeMatrix}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Matrix)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
}
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
for _, k := range v.Values {
mv.Series = append(mv.Series, monitoring.Point{float64(k.Timestamp) / 1000, float64(k.Value)})
}
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
res.MetricValues = append(res.MetricValues, mv)
}
return m, nil
return res
}
func (p prometheus) rangeQuery(exp string, start, end time.Time, step time.Duration) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("start", start.Format(time.RFC3339))
params.Set("end", end.Format(time.RFC3339))
params.Set("step", step.String())
params.Set("query", exp)
func parseQueryResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeVector}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Vector)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
}
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
mv.Sample = monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)}
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
res.MetricValues = append(res.MetricValues, mv)
}
return m, nil
return res
}
package prometheus
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/json-iterator/go"
"io/ioutil"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestGetNamedMetrics(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-vector-type-prom.json", "metrics-vector-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetrics([]string{"cluster_cpu_utilisation"}, time.Now(), monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestGetNamedMetricsOverTime(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-matrix-type-prom.json", "metrics-matrix-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query_range", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetricsOverTime([]string{"cluster_cpu_utilisation"}, time.Now().Add(-time.Minute*3), time.Now(), time.Minute, monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func mockPrometheusService(pattern, fakeResp string) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
b, _ := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp))
res.Write(b)
})
return httptest.NewServer(mux)
}
func jsonFromFile(expectedFile string) ([]monitoring.Metric, error) {
expectedJson := []monitoring.Metric{}
json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
if err != nil {
return expectedJson, err
}
err = jsoniter.Unmarshal(json, &expectedJson)
if err != nil {
return expectedJson, err
}
return expectedJson, nil
}
......@@ -25,7 +25,6 @@ const (
Deployment = "Deployment"
)
//TODO(huanggze): move this part to a ConfigMap
var promQLTemplates = map[string]string{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
......@@ -256,31 +255,33 @@ var promQLTemplates = map[string]string{
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
}
func makeExpression(metric string, opt monitoring.QueryOptions) string {
func makeExpr(metric string, opt monitoring.QueryOptions) string {
tmpl := promQLTemplates[metric]
switch opt.Level {
case monitoring.LevelCluster:
return tmpl
case monitoring.LevelNode:
makeNodeMetricExpression(tmpl, opt)
return makeNodeMetricExpr(tmpl, opt)
case monitoring.LevelWorkspace:
makeWorkspaceMetricExpression(tmpl, opt)
return makeWorkspaceMetricExpr(tmpl, opt)
case monitoring.LevelNamespace:
makeNamespaceMetricExpression(tmpl, opt)
return makeNamespaceMetricExpr(tmpl, opt)
case monitoring.LevelWorkload:
makeWorkloadMetricExpression(tmpl, opt)
return makeWorkloadMetricExpr(tmpl, opt)
case monitoring.LevelPod:
makePodMetricExpression(tmpl, opt)
return makePodMetricExpr(tmpl, opt)
case monitoring.LevelContainer:
makeContainerMetricExpression(tmpl, opt)
return makeContainerMetricExpr(tmpl, opt)
case monitoring.LevelPVC:
makePVCMetricExpression(tmpl, opt)
return makePVCMetricExpr(tmpl, opt)
case monitoring.LevelComponent:
return tmpl
default:
return tmpl
}
return tmpl
}
func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNodeMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var nodeSelector string
if o.NodeName != "" {
nodeSelector = fmt.Sprintf(`node="%s"`, o.NodeName)
......@@ -290,7 +291,7 @@ func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.Replace(tmpl, "$1", nodeSelector, -1)
}
func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkspaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var workspaceSelector string
if o.WorkspaceName != "" {
workspaceSelector = fmt.Sprintf(`label_kubesphere_io_workspace="%s"`, o.WorkspaceName)
......@@ -300,7 +301,7 @@ func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", workspaceSelector, -1)
}
func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNamespaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var namespaceSelector string
// For monitoring namespaces in the specific workspace
......@@ -321,7 +322,7 @@ func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", namespaceSelector, -1)
}
func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkloadMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var kindSelector, workloadSelector string
switch o.WorkloadKind {
case "deployment":
......@@ -341,7 +342,7 @@ func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string
return strings.NewReplacer("$1", workloadSelector, "$2", kindSelector).Replace(tmpl)
}
func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var podSelector, workloadSelector string
// For monitoriong pods of the specific workload
......@@ -371,7 +372,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
// For monitoring pods on the specific node
// GET /nodes/{node}/pods/{pod}
if o.PodName != "" {
if o.NodeName != "" {
if o.PodName != "" {
podSelector = fmt.Sprintf(`pod="%s", node="%s"`, o.PodName, o.NodeName)
} else {
......@@ -381,7 +382,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.NewReplacer("$1", workloadSelector, "$2", podSelector).Replace(tmpl)
}
func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeContainerMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var containerSelector string
if o.ContainerName != "" {
containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name="%s"`, o.PodName, o.NamespaceName, o.ContainerName)
......@@ -391,7 +392,7 @@ func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", containerSelector, -1)
}
func makePVCMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePVCMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var pvcSelector string
// For monitoring persistentvolumeclaims in the specific namespace
......
package prometheus
import (
"github.com/google/go-cmp/cmp"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus/testdata"
"testing"
)
func TestMakeExpr(t *testing.T) {
tests := []struct {
name string
opt monitoring.QueryOptions
}{
{"cluster_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelCluster}},
{"node_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelNode, NodeName: "i-2dazc1d6"}},
{"node_cpu_total", monitoring.QueryOptions{Level: monitoring.LevelNode, ResourceFilter: "i-2dazc1d6|i-ezjb7gsk"}},
{"workspace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, WorkspaceName: "system-workspace"}},
{"workspace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, ResourceFilter: "system-workspace|demo"}},
{"namespace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, NamespaceName: "kube-system"}},
{"namespace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, ResourceFilter: "kube-system|default"}},
{"namespace_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelNamespace, WorkspaceName: "system-workspace", ResourceFilter: "kube-system|default"}},
{"workload_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: "deployment", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"workload_deployment_replica_available", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: ".*", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"pod_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", WorkloadKind: "deployment", WorkloadName: "elasticsearch", ResourceFilter: "elasticsearch-0"}},
{"pod_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", PodName: "elasticsearch-12345"}},
{"pod_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelPod, NodeName: "i-2dazc1d6", PodName: "elasticsearch-12345"}},
{"container_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ContainerName: "syscall"}},
{"container_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ResourceFilter: "syscall"}},
{"pvc_inodes_available", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", PersistentVolumeClaimName: "db-123"}},
{"pvc_inodes_used", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", ResourceFilter: "db-123"}},
{"pvc_inodes_total", monitoring.QueryOptions{Level: monitoring.LevelPVC, StorageClassName: "default", ResourceFilter: "db-123"}},
{"etcd_server_list", monitoring.QueryOptions{Level: monitoring.LevelComponent}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected := testdata.PromQLs[tt.name]
result := makeExpr(tt.name, tt.opt)
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
{
"status":"error",
"errorType":"internal",
"error":"inconsistent body for response code"
}
\ No newline at end of file
[
{
"metric_name": "cluster_cpu_utilisation",
"error": "inconsistent body for response code"
}
]
\ No newline at end of file
{
"status":"success",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
}
]
}
}
\ No newline at end of file
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
}
]
}
}
]
\ No newline at end of file
{
"status":"success",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
}
]
}
}
\ No newline at end of file
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
}
]
}
}
]
\ No newline at end of file
package testdata
var PromQLs = map[string]string{
"cluster_cpu_utilisation": `:node_cpu_utilisation:avg1m`,
"node_cpu_utilisation": `node:node_cpu_utilisation:avg1m{node="i-2dazc1d6"}`,
"node_cpu_total": `node:node_num_cpu:sum{node=~"i-2dazc1d6|i-ezjb7gsk"}`,
"workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", label_kubesphere_io_workspace="system-workspace"}), 0.001)`,
"workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", label_kubesphere_io_workspace=~"system-workspace|demo", label_kubesphere_io_workspace!=""})`,
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="kube-system"}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"kube-system|default"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", label_kubesphere_io_workspace="system-workspace", namespace=~"kube-system|default"}`,
"workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="default", workload=~"Deployment:apiserver|coredns"}, 0.001)`,
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^deployment-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`,
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`,
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`,
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name="syscall"}[5m])), 0.001)`,
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name=~"syscall"})`,
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim="db-123"}`,
"pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim=~"db-123"}`,
"pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{storageclass="default", persistentvolumeclaim=~"db-123"}`,
"etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`,
}
package monitoring
type Level int
const (
LevelCluster = 1 << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
type QueryOption interface {
Apply(*QueryOptions)
}
type QueryOptions struct {
Level MonitoringLevel
NamedMetrics []string
Level Level
MetricFilter string
ResourceFilter string
NodeName string
WorkspaceName string
......@@ -25,44 +37,35 @@ func NewQueryOptions() *QueryOptions {
return &QueryOptions{}
}
type ClusterOption struct {
MetricFilter string
}
type ClusterOption struct{}
func (co ClusterOption) Apply(o *QueryOptions) {
func (_ ClusterOption) Apply(o *QueryOptions) {
o.Level = LevelCluster
o.NamedMetrics = ClusterMetrics
}
type NodeOption struct {
MetricFilter string
ResourceFilter string
NodeName string
}
func (no NodeOption) Apply(o *QueryOptions) {
o.Level = LevelNode
o.NamedMetrics = NodeMetrics
o.ResourceFilter = no.ResourceFilter
o.NodeName = no.NodeName
}
type WorkspaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
}
func (wo WorkspaceOption) Apply(o *QueryOptions) {
o.Level = LevelWorkspace
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.WorkspaceName = wo.WorkspaceName
}
type NamespaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
NamespaceName string
......@@ -70,33 +73,25 @@ type NamespaceOption struct {
func (no NamespaceOption) Apply(o *QueryOptions) {
o.Level = LevelNamespace
o.NamedMetrics = NamespaceMetrics
o.MetricFilter = no.MetricFilter
o.ResourceFilter = no.ResourceFilter
o.WorkspaceName = no.WorkspaceName
o.NamespaceName = no.NamespaceName
}
type WorkloadOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
WorkloadKind string
WorkloadName string
}
func (wo WorkloadOption) Apply(o *QueryOptions) {
o.Level = LevelWorkload
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.NamespaceName = wo.NamespaceName
o.WorkloadKind = wo.WorkloadKind
o.WorkloadName = wo.WorkloadName
}
type PodOption struct {
MetricFilter string
ResourceFilter string
NodeName string
NamespaceName string
......@@ -107,8 +102,6 @@ type PodOption struct {
func (po PodOption) Apply(o *QueryOptions) {
o.Level = LevelPod
o.NamedMetrics = PodMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.WorkloadKind = po.WorkloadKind
......@@ -116,7 +109,6 @@ func (po PodOption) Apply(o *QueryOptions) {
}
type ContainerOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
PodName string
......@@ -125,8 +117,6 @@ type ContainerOption struct {
func (co ContainerOption) Apply(o *QueryOptions) {
o.Level = LevelContainer
o.NamedMetrics = ContainerMetrics
o.MetricFilter = co.MetricFilter
o.ResourceFilter = co.ResourceFilter
o.NamespaceName = co.NamespaceName
o.PodName = co.PodName
......@@ -134,7 +124,6 @@ func (co ContainerOption) Apply(o *QueryOptions) {
}
type PVCOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
StorageClassName string
......@@ -143,20 +132,14 @@ type PVCOption struct {
func (po PVCOption) Apply(o *QueryOptions) {
o.Level = LevelPVC
o.NamedMetrics = PVCMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.StorageClassName = po.StorageClassName
o.PersistentVolumeClaimName = po.PersistentVolumeClaimName
}
type ComponentOption struct {
MetricFilter string
}
type ComponentOption struct{}
func (co ComponentOption) Apply(o *QueryOptions) {
func (_ ComponentOption) Apply(o *QueryOptions) {
o.Level = LevelComponent
o.NamedMetrics = ComponentMetrics
o.MetricFilter = co.MetricFilter
}
package monitoring
const (
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
MetricData `json:"data,omitempty" description:"actual metric result"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType,omitempty" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result,omitempty" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
func (p Point) Timestamp() float64 {
return p[0]
}
func (p Point) Value() float64 {
return p[1]
}
......@@ -32,7 +32,7 @@ import (
devopsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2"
iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2"
monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha2"
monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3"
openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1"
operationsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/operations/v1alpha2"
resourcesv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/resources/v1alpha2"
......@@ -61,7 +61,7 @@ func generateSwaggerJson() {
urlruntime.Must(devopsv1alpha2.AddToContainer(container, nil, nil, nil, nil, nil, nil))
urlruntime.Must(iamv1alpha2.AddToContainer(container, nil, nil, nil, nil, nil))
urlruntime.Must(loggingv1alpha2.AddToContainer(container, nil, nil))
urlruntime.Must(monitoringv1alpha2.AddToContainer(container, nil, nil))
urlruntime.Must(monitoringv1alpha3.AddToContainer(container, nil, nil))
urlruntime.Must(openpitrixv1.AddToContainer(container, nil, nil))
urlruntime.Must(operationsv1alpha2.AddToContainer(container, nil))
urlruntime.Must(resourcesv1alpha2.AddToContainer(container, nil, nil))
......
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
......@@ -176,7 +175,18 @@
END OF TERMS AND CONDITIONS
Copyright 2020 Red Hat, Inc.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
......
package v1
const (
// DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
// Used for specifying the reason for cancellation or failure of a deployment
// This is on replication controller set by deployer controller.
DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason"
// DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the name of the deployer Pod which will act upon the ReplicationController
// to implement the deployment behavior.
// This is set on replication controller by deployer controller.
DeploymentPodAnnotation = "openshift.io/deployer-pod.name"
// DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
// DeploymentConfig on which the deployment is based.
// This is set on replication controller pod template by deployer controller.
DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
// DeploymentCancelledAnnotation indicates that the deployment has been cancelled
// The annotation value does not matter and its mere presence indicates cancellation.
// This is set on replication controller by deployment config controller or oc rollout cancel command.
DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
// DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded
// DeploymentConfig on which a given deployment is based.
// This is set on replication controller by deployer controller.
DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
// DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
// the deployment.
// This is set on replication controller pod template by deployment config controller.
DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
// DeployerPodForDeploymentLabel is a label which groups pods related to a
// deployment. The value is a deployment name. The deployer pod and hook pods
// created by the internal strategies will have this label. Custom
// strategies can apply this label to any pods they create, enabling
// platform-provided cancellation and garbage collection support.
// This is set on deployer pod by deployer controller.
DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name"
// DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of
// a deployment.
// This is set on replication controller by deployer controller.
DeploymentStatusAnnotation = "openshift.io/deployment.phase"
)
type DeploymentConditionReason string
var (
// ReplicationControllerUpdatedReason is added in a deployment config when one of its replication
// controllers is updated as part of the rollout process.
ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated"
// ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication
// controller.
ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError"
// ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication
// controller.
NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated"
// NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made
// available ie. the number of new pods that have passed readiness checks and run for at least
// minReadySeconds is at least the minimum available pods that need to run for the deployment config.
NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable"
// ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show
// any progress within the given deadline (progressDeadlineSeconds).
ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded"
// DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be
// estimated once a deployment config is paused.
DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused"
// DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally
// deployment configs that paused amidst a rollout.
DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed"
// RolloutCancelledReason is added in a deployment config when its newest rollout was
// interrupted by cancellation.
RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled"
)
// DeploymentStatus describes the possible states a deployment can be in.
type DeploymentStatus string
var (
// DeploymentStatusNew means the deployment has been accepted but not yet acted upon.
DeploymentStatusNew DeploymentStatus = "New"
// DeploymentStatusPending means the deployment been handed over to a deployment strategy,
// but the strategy has not yet declared the deployment to be running.
DeploymentStatusPending DeploymentStatus = "Pending"
// DeploymentStatusRunning means the deployment strategy has reported the deployment as
// being in-progress.
DeploymentStatusRunning DeploymentStatus = "Running"
// DeploymentStatusComplete means the deployment finished without an error.
DeploymentStatusComplete DeploymentStatus = "Complete"
// DeploymentStatusFailed means the deployment finished with an error.
DeploymentStatusFailed DeploymentStatus = "Failed"
)
package v1
// This file contains consts that are not shared between components and set just internally.
// They will likely be removed in (near) future.
const (
// DeployerPodCreatedAtAnnotation is an annotation on a deployment that
// records the time in RFC3339 format of when the deployer pod for this particular
// deployment was created.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at"
// DeployerPodStartedAtAnnotation is an annotation on a deployment that
// records the time in RFC3339 format of when the deployer pod for this particular
// deployment was started.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at"
// DeployerPodCompletedAtAnnotation is an annotation on deployment that records
// the time in RFC3339 format of when the deployer pod finished.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at"
// DesiredReplicasAnnotation represents the desired number of replicas for a
// new deployment.
// This is set by deployer controller, but not consumed by any command or internally.
// DEPRECATED: will be removed soon
DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas"
// DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
// of the deployment (a ReplicationController) on which the deployer Pod acts.
// This is set by deployer controller and consumed internally and in oc adm top command.
// DEPRECATED: will be removed soon
DeploymentAnnotation = "openshift.io/deployment.name"
)
......@@ -7,6 +7,7 @@ package github.com.openshift.api.apps.v1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
......@@ -15,7 +16,7 @@ option go_package = "v1";
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
message CustomDeploymentStrategyParams {
// Image specifies a container image which can carry out a deployment.
// Image specifies a Docker image which can carry out a deployment.
optional string image = 1;
// Environment holds the environment which will be given to the container for Image.
......@@ -67,24 +68,25 @@ message DeploymentCondition {
// A single deployment configuration is usually analogous to a single micro-service. Can support many different
// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as
// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.
//
//
// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed.
// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment
// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment
// is triggered by any means.
message DeploymentConfig {
// Standard object's metadata.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec represents a desired deployment state and how to deploy to it.
optional DeploymentConfigSpec spec = 2;
// Status represents the current deployment state.
// +optional
optional DeploymentConfigStatus status = 3;
}
// DeploymentConfigList is a collection of deployment configs.
message DeploymentConfigList {
// Standard object's metadata.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is a list of deployment configs
......@@ -360,7 +362,7 @@ message ExecNewPodHook {
repeated k8s.io.api.core.v1.EnvVar env = 2;
// ContainerName is the name of a container in the deployment pod template
// whose container image will be used for the hook pod's container.
// whose Docker image will be used for the hook pod's container.
optional string containerName = 3;
// Volumes is a list of named volumes from the pod template which should be
......@@ -420,9 +422,9 @@ message RollingDeploymentStrategyParams {
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding down.
//
//
// This cannot be 0 if MaxSurge is 0. By default, 25% is used.
//
//
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old
// RC can be scaled down further, followed by scaling up the new RC,
......@@ -434,9 +436,9 @@ message RollingDeploymentStrategyParams {
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
//
// This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
//
//
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been
// killed, new RC can be scaled up further, ensuring that total number of
......
package v1
import (
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
&DeploymentRequest{},
&DeploymentLog{},
&DeploymentLogOptions{},
&extensionsv1beta1.Scale{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
package v1
import (
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
LegacyGroupName = ""
GroupName = "apps.openshift.io"
)
var (
GroupName = "apps.openshift.io"
GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
// Install is a function which adds this version to a scheme
Install = schemeBuilder.AddToScheme
// SchemeGroupVersion generated code relies on this name
// Deprecated
SchemeGroupVersion = GroupVersion
// AddToScheme exists solely to keep the old generators creating valid code
// DEPRECATED
AddToScheme = schemeBuilder.AddToScheme
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"}
LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Resource generated code relies on this being here, but it logically belongs to the group
// DEPRECATED
func Resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: GroupName, Resource: resource}
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Adds the list of known types to api.Scheme.
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
&DeploymentRequest{},
&DeploymentLog{},
&DeploymentLogOptions{},
&extensionsv1beta1.Scale{},
}
scheme.AddKnownTypes(LegacySchemeGroupVersion, types...)
return nil
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
scheme.AddKnownTypes(SchemeGroupVersion,
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
......@@ -40,6 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&DeploymentLogOptions{},
&extensionsv1beta1.Scale{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
......@@ -25,14 +25,14 @@ import (
// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment
// is triggered by any means.
type DeploymentConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec represents a desired deployment state and how to deploy to it.
Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current deployment state.
// +optional
Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
......@@ -120,7 +120,7 @@ const (
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
type CustomDeploymentStrategyParams struct {
// Image specifies a container image which can carry out a deployment.
// Image specifies a Docker image which can carry out a deployment.
Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
// Environment holds the environment which will be given to the container for Image.
Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"`
......@@ -225,7 +225,7 @@ type ExecNewPodHook struct {
// Env is a set of environment variables to supply to the hook pod's container.
Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
// ContainerName is the name of a container in the deployment pod template
// whose container image will be used for the hook pod's container.
// whose Docker image will be used for the hook pod's container.
ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"`
// Volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
......@@ -264,7 +264,7 @@ type DeploymentTriggerType string
const (
// DeploymentTriggerOnImageChange will create new deployments in response to updated tags from
// a container image repository.
// a Docker image repository.
DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange"
// DeploymentTriggerOnConfigChange will create new deployments in response to changes to
// the ControllerTemplate of a DeploymentConfig.
......@@ -389,6 +389,7 @@ type DeploymentCondition struct {
// DeploymentConfigList is a collection of deployment configs.
type DeploymentConfigList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of deployment configs
......
......@@ -8,12 +8,12 @@ package v1
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-swagger-docs.sh
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_CustomDeploymentStrategyParams = map[string]string{
"": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.",
"image": "Image specifies a container image which can carry out a deployment.",
"image": "Image specifies a Docker image which can carry out a deployment.",
"environment": "Environment holds the environment which will be given to the container for Image.",
"command": "Command is optional and overrides CMD in the container Image.",
}
......@@ -56,9 +56,10 @@ func (DeploymentCondition) SwaggerDoc() map[string]string {
}
var map_DeploymentConfig = map[string]string{
"": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.",
"spec": "Spec represents a desired deployment state and how to deploy to it.",
"status": "Status represents the current deployment state.",
"": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.",
"metadata": "Standard object's metadata.",
"spec": "Spec represents a desired deployment state and how to deploy to it.",
"status": "Status represents the current deployment state.",
}
func (DeploymentConfig) SwaggerDoc() map[string]string {
......@@ -66,8 +67,9 @@ func (DeploymentConfig) SwaggerDoc() map[string]string {
}
var map_DeploymentConfigList = map[string]string{
"": "DeploymentConfigList is a collection of deployment configs.",
"items": "Items is a list of deployment configs",
"": "DeploymentConfigList is a collection of deployment configs.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of deployment configs",
}
func (DeploymentConfigList) SwaggerDoc() map[string]string {
......@@ -223,7 +225,7 @@ var map_ExecNewPodHook = map[string]string{
"": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.",
"command": "Command is the action command and its arguments.",
"env": "Env is a set of environment variables to supply to the hook pod's container.",
"containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.",
"containerName": "ContainerName is the name of a container in the deployment pod template whose Docker image will be used for the hook pod's container.",
"volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
}
......@@ -255,7 +257,7 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
}
var map_RollingDeploymentStrategyParams = map[string]string{
"": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
"": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
"updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
"intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
"timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
......
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
)
func addLegacyKnownTypes(scheme *runtime.Scheme) error {
types := []runtime.Object{
&Project{},
&ProjectList{},
&ProjectRequest{},
}
scheme.AddKnownTypes(legacyGroupVersion, types...)
return nil
}
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册