未验证 提交 6c6bfb26 编写于 作者: G Guangzhe Huang 提交者: GitHub

[WIP] logging refactor (#1794)

* refactor logging
Signed-off-by: Nhuanggze <loganhuang@yunify.com>

* refactor logging
Signed-off-by: Nhuanggze <loganhuang@yunify.com>
上级 a9e1183f
无相关合并请求
......@@ -6,8 +6,8 @@ import (
"k8s.io/klog"
genericoptions "kubesphere.io/kubesphere/pkg/server/options"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
esclient "kubesphere.io/kubesphere/pkg/simple/client/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
esclient "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
"kubesphere.io/kubesphere/pkg/simple/client/prometheus"
......
#!/bin/bash
set -e
GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 devops:v1alpha1 logging:v1alpha2"
GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 devops:v1alpha1"
rm -rf ./pkg/client
./hack/generate_group.sh "client,lister,informer" kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "$GV" --output-base=./ -h "$PWD/hack/boilerplate.go.txt"
......
package v1alpha2
import (
"encoding/json"
"time"
)
import "kubesphere.io/kubesphere/pkg/simple/client/logging"
const (
OperationQuery int = iota
OperationStatistics
OperationHistogram
OperationExport
)
// elasticsearch client config
type Config struct {
Host string
Port string
Index string
VersionMajor string
}
type QueryParameters struct {
// when true, indicates the provided `namespaces` or `namespace_query` doesn't match any namespace
NamespaceNotFound bool
// a map of namespace with creation time
NamespaceWithCreationTime map[string]string
// filter for literally matching
// query for fuzzy matching
WorkloadFilter []string
WorkloadQuery []string
PodFilter []string
PodQuery []string
ContainerFilter []string
ContainerQuery []string
LogQuery []string
Operation int
Interval string
StartTime string
EndTime string
Sort string
From int64
Size int64
ScrollTimeout time.Duration
}
// elasticsearch request body
type Request struct {
From int64 `json:"from"`
Size int64 `json:"size"`
Sorts []Sort `json:"sort,omitempty"`
MainQuery BoolQuery `json:"query"`
Aggs interface{} `json:"aggs,omitempty"`
}
type Sort struct {
Order Order `json:"time"`
}
type Order struct {
Order string `json:"order"`
}
type BoolQuery struct {
Bool interface{} `json:"bool"`
}
// user filter instead of must
// filter ignores scoring
type BoolFilter struct {
Filter []interface{} `json:"filter"`
}
type BoolShould struct {
Should []interface{} `json:"should"`
MinimumShouldMatch int64 `json:"minimum_should_match"`
}
type RangeQuery struct {
RangeSpec RangeSpec `json:"range"`
}
type RangeSpec struct {
TimeRange TimeRange `json:"time"`
}
type TimeRange struct {
Gte string `json:"gte,omitempty"`
Lte string `json:"lte,omitempty"`
}
type MatchPhrase struct {
MatchPhrase map[string]string `json:"match_phrase"`
}
type MatchPhrasePrefix struct {
MatchPhrasePrefix interface{} `json:"match_phrase_prefix"`
}
type RegexpQuery struct {
Regexp interface{} `json:"regexp"`
}
// StatisticsAggs, the struct for `aggs` of type Request, holds a cardinality aggregation for distinct container counting
type StatisticsAggs struct {
ContainerAgg ContainerAgg `json:"containers"`
}
type ContainerAgg struct {
Cardinality AggField `json:"cardinality"`
}
type AggField struct {
Field string `json:"field"`
}
type HistogramAggs struct {
HistogramAgg HistogramAgg `json:"histogram"`
}
type HistogramAgg struct {
DateHistogram DateHistogram `json:"date_histogram"`
}
type DateHistogram struct {
Field string `json:"field"`
Interval string `json:"interval"`
}
// Fore more info, refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/getting-started-search-API.html
// Response body from the elasticsearch engine
type Response struct {
ScrollId string `json:"_scroll_id"`
Shards Shards `json:"_shards"`
Hits Hits `json:"hits"`
Aggregations json.RawMessage `json:"aggregations"`
}
type Shards struct {
Total int64 `json:"total"`
Successful int64 `json:"successful"`
Skipped int64 `json:"skipped"`
Failed int64 `json:"failed"`
}
type Hits struct {
// As of ElasticSearch v7.x, hits.total is changed
Total interface{} `json:"total"`
Hits []Hit `json:"hits"`
}
type Hit struct {
Source Source `json:"_source"`
Sort []int64 `json:"sort"`
}
type Source struct {
Log string `json:"log"`
Time string `json:"time"`
Kubernetes Kubernetes `json:"kubernetes"`
}
type Kubernetes struct {
Namespace string `json:"namespace_name"`
Pod string `json:"pod_name"`
Container string `json:"container_name"`
Host string `json:"host"`
}
type LogRecord struct {
Time string `json:"time,omitempty" description:"log timestamp"`
Log string `json:"log,omitempty" description:"log message"`
Namespace string `json:"namespace,omitempty" description:"namespace"`
Pod string `json:"pod,omitempty" description:"pod name"`
Container string `json:"container,omitempty" description:"container name"`
Host string `json:"host,omitempty" description:"node id"`
}
type ReadResult struct {
ScrollID string `json:"_scroll_id,omitempty"`
Total int64 `json:"total" description:"total number of matched results"`
Records []LogRecord `json:"records,omitempty" description:"actual array of results"`
}
// StatisticsResponseAggregations, the struct for `aggregations` of type Response, holds return results from the aggregation StatisticsAggs
type StatisticsResponseAggregations struct {
ContainerCount ContainerCount `json:"containers"`
}
type ContainerCount struct {
Value int64 `json:"value"`
}
type HistogramAggregations struct {
HistogramAggregation HistogramAggregation `json:"histogram"`
}
type HistogramAggregation struct {
Histograms []HistogramStatistics `json:"buckets"`
}
type HistogramStatistics struct {
Time int64 `json:"key"`
Count int64 `json:"doc_count"`
}
type HistogramRecord struct {
Time int64 `json:"time" description:"timestamp"`
Count int64 `json:"count" description:"total number of logs at intervals"`
}
type StatisticsResult struct {
Containers int64 `json:"containers" description:"total number of containers"`
Logs int64 `json:"logs" description:"total number of logs"`
}
type HistogramResult struct {
Total int64 `json:"total" description:"total number of logs"`
Histograms []HistogramRecord `json:"histograms" description:"actual array of histogram results"`
}
// Wrap elasticsearch response
type QueryResult struct {
Read *ReadResult `json:"query,omitempty" description:"query results"`
Statistics *StatisticsResult `json:"statistics,omitempty" description:"statistics results"`
Histogram *HistogramResult `json:"histogram,omitempty" description:"histogram results"`
type APIResponse struct {
Logs *logging.Logs `json:"query,omitempty" description:"query results"`
Statistics *logging.Statistics `json:"statistics,omitempty" description:"statistics results"`
Histogram *logging.Histogram `json:"histogram,omitempty" description:"histogram results"`
}
package apis
import "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
func init() {
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
AddToSchemes = append(AddToSchemes, v1alpha2.SchemeBuilder.AddToScheme)
}
package logging
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha2 contains API Schema definitions for the servicemesh v1alpha2 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/logging
// +k8s:defaulter-gen=TypeMeta
// +groupName=logging.kubesphere.io
package v1alpha2
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type FluentBit struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec FluentBitSpec `json:"spec"`
Status FluentBitStatus `json:"status,omitempty"`
}
// FluentBitSpec holds the spec for the operator
type FluentBitSpec struct {
Service []Plugin `json:"service"`
Input []Plugin `json:"input"`
Filter []Plugin `json:"filter"`
Output []Plugin `json:"output"`
Settings []Plugin `json:"settings"`
}
// FluentBitStatus holds the status info for the operator
type FluentBitStatus struct {
// Fill me
}
// Plugin struct for fluent-bit plugins
type Plugin struct {
Type string `json:"type" description:"output plugin type, eg. fluentbit-output-es"`
Name string `json:"name" description:"output plugin name, eg. fluentbit-output-es"`
Parameters []Parameter `json:"parameters" description:"output plugin configuration parameters"`
}
// Fluent-bit output plugins
type OutputPlugin struct {
Plugin
Id string `json:"id,omitempty" description:"output uuid"`
Enable bool `json:"enable" description:"active status, one of true, false"`
Updatetime *metav1.Time `json:"updatetime,omitempty" description:"last updatetime"`
}
// Parameter generic parameter type to handle values from different sources
type Parameter struct {
Name string `json:"name" description:"configuration parameter key, eg. Name. refer to Fluent bit's Output Plugins Section for more configuration parameters."`
ValueFrom *ValueFrom `json:"valueFrom,omitempty"`
Value string `json:"value" description:"configuration parameter value, eg. es. refer to Fluent bit's Output Plugins Section for more configuration parameters."`
}
// ValueFrom generic type to determine value origin
type ValueFrom struct {
SecretKeyRef KubernetesSecret `json:"secretKeyRef"`
}
// KubernetesSecret is a ValueFrom type
type KubernetesSecret struct {
Name string `json:"name"`
Key string `json:"key"`
Namespace string `json:"namespace"`
}
// FluentBitList auto generated by the sdk
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type FluentBitList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []FluentBit `json:"items"`
}
type FluentbitOutputsResult struct {
Status int `json:"status" description:"response status"`
Error string `json:"error,omitempty" description:"debug information"`
Outputs []OutputPlugin `json:"outputs,omitempty" description:"array of fluent bit output plugins"`
}
package v1alpha2
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the tenant v1alpha1 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/logging
// +k8s:defaulter-gen=TypeMeta
// +groupName=logging.kubesphere.io
package v1alpha2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "logging.kubesphere.io", Version: "v1alpha2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme is required by pkg/client/...
AddToScheme = SchemeBuilder.AddToScheme
)
// Resource is required by pkg/client/listers/...
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
package v1alpha2
// +build !ignore_autogenerated
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentBit) DeepCopyInto(out *FluentBit) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBit.
func (in *FluentBit) DeepCopy() *FluentBit {
if in == nil {
return nil
}
out := new(FluentBit)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FluentBit) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentBitList) DeepCopyInto(out *FluentBitList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FluentBit, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitList.
func (in *FluentBitList) DeepCopy() *FluentBitList {
if in == nil {
return nil
}
out := new(FluentBitList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FluentBitList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentBitSpec) DeepCopyInto(out *FluentBitSpec) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = make([]Plugin, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Input != nil {
in, out := &in.Input, &out.Input
*out = make([]Plugin, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Filter != nil {
in, out := &in.Filter, &out.Filter
*out = make([]Plugin, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Output != nil {
in, out := &in.Output, &out.Output
*out = make([]Plugin, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Settings != nil {
in, out := &in.Settings, &out.Settings
*out = make([]Plugin, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitSpec.
func (in *FluentBitSpec) DeepCopy() *FluentBitSpec {
if in == nil {
return nil
}
out := new(FluentBitSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentBitStatus) DeepCopyInto(out *FluentBitStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitStatus.
func (in *FluentBitStatus) DeepCopy() *FluentBitStatus {
if in == nil {
return nil
}
out := new(FluentBitStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesSecret) DeepCopyInto(out *KubernetesSecret) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecret.
func (in *KubernetesSecret) DeepCopy() *KubernetesSecret {
if in == nil {
return nil
}
out := new(KubernetesSecret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OutputPlugin) DeepCopyInto(out *OutputPlugin) {
*out = *in
in.Plugin.DeepCopyInto(&out.Plugin)
if in.Updatetime != nil {
in, out := &in.Updatetime, &out.Updatetime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputPlugin.
func (in *OutputPlugin) DeepCopy() *OutputPlugin {
if in == nil {
return nil
}
out := new(OutputPlugin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Parameter) DeepCopyInto(out *Parameter) {
*out = *in
if in.ValueFrom != nil {
in, out := &in.ValueFrom, &out.ValueFrom
*out = new(ValueFrom)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter.
func (in *Parameter) DeepCopy() *Parameter {
if in == nil {
return nil
}
out := new(Parameter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugin) DeepCopyInto(out *Plugin) {
*out = *in
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make([]Parameter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin.
func (in *Plugin) DeepCopy() *Plugin {
if in == nil {
return nil
}
out := new(Plugin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValueFrom) DeepCopyInto(out *ValueFrom) {
*out = *in
out.SecretKeyRef = in.SecretKeyRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom.
func (in *ValueFrom) DeepCopy() *ValueFrom {
if in == nil {
return nil
}
out := new(ValueFrom)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"bytes"
"fmt"
"github.com/emicklei/go-restful"
"io"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/models/log"
"kubesphere.io/kubesphere/pkg/server/errors"
cs "kubesphere.io/kubesphere/pkg/simple/client"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"net/http"
"strconv"
"strings"
"time"
)
func LoggingQueryCluster(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelCluster, request)
if param.Operation == v1alpha2.OperationExport {
logExport(param, request, response)
} else {
logQuery(param, response)
}
}
func LoggingQueryWorkspace(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelWorkspace, request)
logQuery(param, response)
}
func LoggingQueryNamespace(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelNamespace, request)
logQuery(param, response)
}
func LoggingQueryWorkload(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelWorkload, request)
logQuery(param, response)
}
func LoggingQueryPod(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelPod, request)
logQuery(param, response)
}
func LoggingQueryContainer(request *restful.Request, response *restful.Response) {
param := parseRequest(log.QueryLevelContainer, request)
if param.Operation == v1alpha2.OperationExport {
logExport(param, request, response)
} else {
logQuery(param, response)
}
}
func LoggingQueryFluentbitOutputs(request *restful.Request, response *restful.Response) {
res := log.FluentbitOutputsQuery()
if res.Status != http.StatusOK {
response.WriteHeaderAndEntity(res.Status, errors.New(res.Error))
return
}
response.WriteAsJson(res)
}
func LoggingInsertFluentbitOutput(request *restful.Request, response *restful.Response) {
var output fb.OutputPlugin
var res *log.FluentbitOutputsResult
err := request.ReadEntity(&output)
if err != nil {
klog.Errorln(err)
response.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err))
return
}
res = log.FluentbitOutputInsert(output)
if res.Status != http.StatusOK {
response.WriteHeaderAndEntity(res.Status, errors.New(res.Error))
return
}
response.WriteAsJson(res)
}
func LoggingUpdateFluentbitOutput(request *restful.Request, response *restful.Response) {
var output fb.OutputPlugin
id := request.PathParameter("output")
err := request.ReadEntity(&output)
if err != nil {
klog.Errorln(err)
response.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err))
return
}
res := log.FluentbitOutputUpdate(output, id)
if res.Status != http.StatusOK {
response.WriteHeaderAndEntity(res.Status, errors.New(res.Error))
return
}
response.WriteAsJson(res)
}
func LoggingDeleteFluentbitOutput(request *restful.Request, response *restful.Response) {
var res *log.FluentbitOutputsResult
id := request.PathParameter("output")
res = log.FluentbitOutputDelete(id)
if res.Status != http.StatusOK {
response.WriteHeaderAndEntity(res.Status, errors.New(res.Error))
return
}
response.WriteAsJson(res)
}
func logQuery(param v1alpha2.QueryParameters, response *restful.Response) {
es, err := cs.ClientSets().ElasticSearch()
if err != nil {
response.WriteHeaderAndEntity(http.StatusServiceUnavailable, errors.Wrap(err))
return
}
res, err := es.Query(param)
if err != nil {
response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err))
return
}
response.WriteAsJson(res)
}
func logExport(param v1alpha2.QueryParameters, request *restful.Request, response *restful.Response) {
es, err := cs.ClientSets().ElasticSearch()
if err != nil {
response.WriteHeaderAndEntity(http.StatusServiceUnavailable, errors.Wrap(err))
return
}
response.Header().Set(restful.HEADER_ContentType, "text/plain")
response.Header().Set("Content-Disposition", "attachment")
// keep search context alive for 1m
param.ScrollTimeout = time.Minute
// export 1000 records in every iteration
param.Size = 1000
// from is not allowed in a scroll context
param.From = 0
var scrollId string
// limit to retrieve max 100k records
for i := 0; i < 100; i++ {
var res *v1alpha2.QueryResult
if scrollId == "" {
res, err = es.Query(param)
if err != nil {
response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err))
return
}
} else {
res, err = es.Scroll(scrollId)
if err != nil {
break
}
}
if res.Read == nil || len(res.Read.Records) == 0 {
break
}
output := new(bytes.Buffer)
for _, r := range res.Read.Records {
output.WriteString(fmt.Sprintf(`%s`, stringutils.StripAnsi(r.Log)))
}
_, err = io.Copy(response, output)
if err != nil {
klog.Error(err)
break
}
scrollId = res.Read.ScrollID
select {
case <-request.Request.Context().Done():
break
default:
}
}
if scrollId != "" {
es.ClearScroll(scrollId)
}
}
func parseRequest(level log.LogQueryLevel, request *restful.Request) v1alpha2.QueryParameters {
var param v1alpha2.QueryParameters
switch level {
case log.QueryLevelCluster:
var namespaces []string
param.NamespaceNotFound, namespaces = log.MatchNamespace(stringutils.Split(request.QueryParameter("namespaces"), ","),
stringutils.Split(strings.ToLower(request.QueryParameter("namespace_query")), ","),
stringutils.Split(request.QueryParameter("workspaces"), ","),
stringutils.Split(strings.ToLower(request.QueryParameter("workspace_query")), ","))
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.WorkloadFilter = stringutils.Split(request.QueryParameter("workloads"), ",")
param.WorkloadQuery = stringutils.Split(request.QueryParameter("workload_query"), ",")
param.PodFilter = stringutils.Split(request.QueryParameter("pods"), ",")
param.PodQuery = stringutils.Split(request.QueryParameter("pod_query"), ",")
param.ContainerFilter = stringutils.Split(request.QueryParameter("containers"), ",")
param.ContainerQuery = stringutils.Split(request.QueryParameter("container_query"), ",")
case log.QueryLevelWorkspace:
var namespaces []string
param.NamespaceNotFound, namespaces = log.MatchNamespace(stringutils.Split(request.QueryParameter("namespaces"), ","),
stringutils.Split(strings.ToLower(request.QueryParameter("namespace_query")), ","),
stringutils.Split(request.PathParameter("workspace"), ","), nil)
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.WorkloadFilter = stringutils.Split(request.QueryParameter("workloads"), ",")
param.WorkloadQuery = stringutils.Split(request.QueryParameter("workload_query"), ",")
param.PodFilter = stringutils.Split(request.QueryParameter("pods"), ",")
param.PodQuery = stringutils.Split(request.QueryParameter("pod_query"), ",")
param.ContainerFilter = stringutils.Split(request.QueryParameter("containers"), ",")
param.ContainerQuery = stringutils.Split(request.QueryParameter("container_query"), ",")
case log.QueryLevelNamespace:
namespaces := []string{request.PathParameter("namespace")}
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.WorkloadFilter = stringutils.Split(request.QueryParameter("workloads"), ",")
param.WorkloadQuery = stringutils.Split(request.QueryParameter("workload_query"), ",")
param.PodFilter = stringutils.Split(request.QueryParameter("pods"), ",")
param.PodQuery = stringutils.Split(request.QueryParameter("pod_query"), ",")
param.ContainerFilter = stringutils.Split(request.QueryParameter("containers"), ",")
param.ContainerQuery = stringutils.Split(request.QueryParameter("container_query"), ",")
case log.QueryLevelWorkload:
namespaces := []string{request.PathParameter("namespace")}
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.WorkloadFilter = []string{request.PathParameter("workload")}
param.PodFilter = stringutils.Split(request.QueryParameter("pods"), ",")
param.PodQuery = stringutils.Split(request.QueryParameter("pod_query"), ",")
param.ContainerFilter = stringutils.Split(request.QueryParameter("containers"), ",")
param.ContainerQuery = stringutils.Split(request.QueryParameter("container_query"), ",")
case log.QueryLevelPod:
namespaces := []string{request.PathParameter("namespace")}
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.PodFilter = []string{request.PathParameter("pod")}
param.ContainerFilter = stringutils.Split(request.QueryParameter("containers"), ",")
param.ContainerQuery = stringutils.Split(request.QueryParameter("container_query"), ",")
case log.QueryLevelContainer:
namespaces := []string{request.PathParameter("namespace")}
param.NamespaceWithCreationTime = log.MakeNamespaceCreationTimeMap(namespaces)
param.PodFilter = []string{request.PathParameter("pod")}
param.ContainerFilter = []string{request.PathParameter("container")}
}
param.LogQuery = stringutils.Split(request.QueryParameter("log_query"), ",")
param.Interval = request.QueryParameter("interval")
param.StartTime = request.QueryParameter("start_time")
param.EndTime = request.QueryParameter("end_time")
param.Sort = request.QueryParameter("sort")
switch request.QueryParameter("operation") {
case "statistics":
param.Operation = v1alpha2.OperationStatistics
case "histogram":
param.Operation = v1alpha2.OperationHistogram
case "export":
param.Operation = v1alpha2.OperationExport
default:
param.Operation = v1alpha2.OperationQuery
}
var err error
param.From, err = strconv.ParseInt(request.QueryParameter("from"), 10, 64)
if err != nil {
param.From = 0
}
param.Size, err = strconv.ParseInt(request.QueryParameter("size"), 10, 64)
if err != nil {
param.Size = 10
}
return param
}
......@@ -25,7 +25,6 @@ import (
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
devopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/logging/v1alpha2"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1"
......@@ -34,7 +33,6 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
DevopsV1alpha1() devopsv1alpha1.DevopsV1alpha1Interface
LoggingV1alpha2() loggingv1alpha2.LoggingV1alpha2Interface
NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface
ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface
TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface
......@@ -45,7 +43,6 @@ type Interface interface {
type Clientset struct {
*discovery.DiscoveryClient
devopsV1alpha1 *devopsv1alpha1.DevopsV1alpha1Client
loggingV1alpha2 *loggingv1alpha2.LoggingV1alpha2Client
networkV1alpha1 *networkv1alpha1.NetworkV1alpha1Client
servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client
tenantV1alpha1 *tenantv1alpha1.TenantV1alpha1Client
......@@ -56,11 +53,6 @@ func (c *Clientset) DevopsV1alpha1() devopsv1alpha1.DevopsV1alpha1Interface {
return c.devopsV1alpha1
}
// LoggingV1alpha2 retrieves the LoggingV1alpha2Client
func (c *Clientset) LoggingV1alpha2() loggingv1alpha2.LoggingV1alpha2Interface {
return c.loggingV1alpha2
}
// NetworkV1alpha1 retrieves the NetworkV1alpha1Client
func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface {
return c.networkV1alpha1
......@@ -101,10 +93,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
if err != nil {
return nil, err
}
cs.loggingV1alpha2, err = loggingv1alpha2.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.networkV1alpha1, err = networkv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
......@@ -130,7 +118,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.devopsV1alpha1 = devopsv1alpha1.NewForConfigOrDie(c)
cs.loggingV1alpha2 = loggingv1alpha2.NewForConfigOrDie(c)
cs.networkV1alpha1 = networkv1alpha1.NewForConfigOrDie(c)
cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c)
cs.tenantV1alpha1 = tenantv1alpha1.NewForConfigOrDie(c)
......@@ -143,7 +130,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.devopsV1alpha1 = devopsv1alpha1.New(c)
cs.loggingV1alpha2 = loggingv1alpha2.New(c)
cs.networkV1alpha1 = networkv1alpha1.New(c)
cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c)
cs.tenantV1alpha1 = tenantv1alpha1.New(c)
......
......@@ -27,8 +27,6 @@ import (
clientset "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
devopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1"
fakedevopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1/fake"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/logging/v1alpha2"
fakeloggingv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/logging/v1alpha2/fake"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
fakenetworkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1/fake"
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2"
......@@ -89,11 +87,6 @@ func (c *Clientset) DevopsV1alpha1() devopsv1alpha1.DevopsV1alpha1Interface {
return &fakedevopsv1alpha1.FakeDevopsV1alpha1{Fake: &c.Fake}
}
// LoggingV1alpha2 retrieves the LoggingV1alpha2Client
func (c *Clientset) LoggingV1alpha2() loggingv1alpha2.LoggingV1alpha2Interface {
return &fakeloggingv1alpha2.FakeLoggingV1alpha2{Fake: &c.Fake}
}
// NetworkV1alpha1 retrieves the NetworkV1alpha1Client
func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface {
return &fakenetworkv1alpha1.FakeNetworkV1alpha1{Fake: &c.Fake}
......
......@@ -25,7 +25,6 @@ import (
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
......@@ -36,7 +35,6 @@ var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
devopsv1alpha1.AddToScheme,
loggingv1alpha2.AddToScheme,
networkv1alpha1.AddToScheme,
servicemeshv1alpha2.AddToScheme,
tenantv1alpha1.AddToScheme,
......
......@@ -25,7 +25,6 @@ import (
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
......@@ -36,7 +35,6 @@ var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
devopsv1alpha1.AddToScheme,
loggingv1alpha2.AddToScheme,
networkv1alpha1.AddToScheme,
servicemeshv1alpha2.AddToScheme,
tenantv1alpha1.AddToScheme,
......
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha2
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
)
// FakeFluentBits implements FluentBitInterface
type FakeFluentBits struct {
Fake *FakeLoggingV1alpha2
ns string
}
var fluentbitsResource = schema.GroupVersionResource{Group: "logging.kubesphere.io", Version: "v1alpha2", Resource: "fluentbits"}
var fluentbitsKind = schema.GroupVersionKind{Group: "logging.kubesphere.io", Version: "v1alpha2", Kind: "FluentBit"}
// Get takes name of the fluentBit, and returns the corresponding fluentBit object, and an error if there is any.
func (c *FakeFluentBits) Get(name string, options v1.GetOptions) (result *v1alpha2.FluentBit, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(fluentbitsResource, c.ns, name), &v1alpha2.FluentBit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.FluentBit), err
}
// List takes label and field selectors, and returns the list of FluentBits that match those selectors.
func (c *FakeFluentBits) List(opts v1.ListOptions) (result *v1alpha2.FluentBitList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(fluentbitsResource, fluentbitsKind, c.ns, opts), &v1alpha2.FluentBitList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.FluentBitList{ListMeta: obj.(*v1alpha2.FluentBitList).ListMeta}
for _, item := range obj.(*v1alpha2.FluentBitList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested fluentBits.
func (c *FakeFluentBits) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(fluentbitsResource, c.ns, opts))
}
// Create takes the representation of a fluentBit and creates it. Returns the server's representation of the fluentBit, and an error, if there is any.
func (c *FakeFluentBits) Create(fluentBit *v1alpha2.FluentBit) (result *v1alpha2.FluentBit, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(fluentbitsResource, c.ns, fluentBit), &v1alpha2.FluentBit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.FluentBit), err
}
// Update takes the representation of a fluentBit and updates it. Returns the server's representation of the fluentBit, and an error, if there is any.
func (c *FakeFluentBits) Update(fluentBit *v1alpha2.FluentBit) (result *v1alpha2.FluentBit, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(fluentbitsResource, c.ns, fluentBit), &v1alpha2.FluentBit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.FluentBit), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeFluentBits) UpdateStatus(fluentBit *v1alpha2.FluentBit) (*v1alpha2.FluentBit, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(fluentbitsResource, "status", c.ns, fluentBit), &v1alpha2.FluentBit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.FluentBit), err
}
// Delete takes name of the fluentBit and deletes it. Returns an error if one occurs.
func (c *FakeFluentBits) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(fluentbitsResource, c.ns, name), &v1alpha2.FluentBit{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeFluentBits) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(fluentbitsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha2.FluentBitList{})
return err
}
// Patch applies the patch and returns the patched fluentBit.
func (c *FakeFluentBits) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.FluentBit, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(fluentbitsResource, c.ns, name, pt, data, subresources...), &v1alpha2.FluentBit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.FluentBit), err
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
v1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/logging/v1alpha2"
)
type FakeLoggingV1alpha2 struct {
*testing.Fake
}
func (c *FakeLoggingV1alpha2) FluentBits(namespace string) v1alpha2.FluentBitInterface {
return &FakeFluentBits{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeLoggingV1alpha2) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
)
// FluentBitsGetter has a method to return a FluentBitInterface.
// A group's client should implement this interface.
type FluentBitsGetter interface {
FluentBits(namespace string) FluentBitInterface
}
// FluentBitInterface has methods to work with FluentBit resources.
type FluentBitInterface interface {
Create(*v1alpha2.FluentBit) (*v1alpha2.FluentBit, error)
Update(*v1alpha2.FluentBit) (*v1alpha2.FluentBit, error)
UpdateStatus(*v1alpha2.FluentBit) (*v1alpha2.FluentBit, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha2.FluentBit, error)
List(opts v1.ListOptions) (*v1alpha2.FluentBitList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.FluentBit, err error)
FluentBitExpansion
}
// fluentBits implements FluentBitInterface
type fluentBits struct {
client rest.Interface
ns string
}
// newFluentBits returns a FluentBits
func newFluentBits(c *LoggingV1alpha2Client, namespace string) *fluentBits {
return &fluentBits{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the fluentBit, and returns the corresponding fluentBit object, and an error if there is any.
func (c *fluentBits) Get(name string, options v1.GetOptions) (result *v1alpha2.FluentBit, err error) {
result = &v1alpha2.FluentBit{}
err = c.client.Get().
Namespace(c.ns).
Resource("fluentbits").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of FluentBits that match those selectors.
func (c *fluentBits) List(opts v1.ListOptions) (result *v1alpha2.FluentBitList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha2.FluentBitList{}
err = c.client.Get().
Namespace(c.ns).
Resource("fluentbits").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested fluentBits.
func (c *fluentBits) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("fluentbits").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a fluentBit and creates it. Returns the server's representation of the fluentBit, and an error, if there is any.
func (c *fluentBits) Create(fluentBit *v1alpha2.FluentBit) (result *v1alpha2.FluentBit, err error) {
result = &v1alpha2.FluentBit{}
err = c.client.Post().
Namespace(c.ns).
Resource("fluentbits").
Body(fluentBit).
Do().
Into(result)
return
}
// Update takes the representation of a fluentBit and updates it. Returns the server's representation of the fluentBit, and an error, if there is any.
func (c *fluentBits) Update(fluentBit *v1alpha2.FluentBit) (result *v1alpha2.FluentBit, err error) {
result = &v1alpha2.FluentBit{}
err = c.client.Put().
Namespace(c.ns).
Resource("fluentbits").
Name(fluentBit.Name).
Body(fluentBit).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *fluentBits) UpdateStatus(fluentBit *v1alpha2.FluentBit) (result *v1alpha2.FluentBit, err error) {
result = &v1alpha2.FluentBit{}
err = c.client.Put().
Namespace(c.ns).
Resource("fluentbits").
Name(fluentBit.Name).
SubResource("status").
Body(fluentBit).
Do().
Into(result)
return
}
// Delete takes name of the fluentBit and deletes it. Returns an error if one occurs.
func (c *fluentBits) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("fluentbits").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *fluentBits) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("fluentbits").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched fluentBit.
func (c *fluentBits) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.FluentBit, err error) {
result = &v1alpha2.FluentBit{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("fluentbits").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
type FluentBitExpansion interface{}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
rest "k8s.io/client-go/rest"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
)
type LoggingV1alpha2Interface interface {
RESTClient() rest.Interface
FluentBitsGetter
}
// LoggingV1alpha2Client is used to interact with features provided by the logging.kubesphere.io group.
type LoggingV1alpha2Client struct {
restClient rest.Interface
}
func (c *LoggingV1alpha2Client) FluentBits(namespace string) FluentBitInterface {
return newFluentBits(c, namespace)
}
// NewForConfig creates a new LoggingV1alpha2Client for the given config.
func NewForConfig(c *rest.Config) (*LoggingV1alpha2Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &LoggingV1alpha2Client{client}, nil
}
// NewForConfigOrDie creates a new LoggingV1alpha2Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *LoggingV1alpha2Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new LoggingV1alpha2Client for the given RESTClient.
func New(c rest.Interface) *LoggingV1alpha2Client {
return &LoggingV1alpha2Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *LoggingV1alpha2Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
......@@ -30,7 +30,6 @@ import (
versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
devops "kubesphere.io/kubesphere/pkg/client/informers/externalversions/devops"
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
logging "kubesphere.io/kubesphere/pkg/client/informers/externalversions/logging"
network "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network"
servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh"
tenant "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant"
......@@ -177,7 +176,6 @@ type SharedInformerFactory interface {
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Devops() devops.Interface
Logging() logging.Interface
Network() network.Interface
Servicemesh() servicemesh.Interface
Tenant() tenant.Interface
......@@ -187,10 +185,6 @@ func (f *sharedInformerFactory) Devops() devops.Interface {
return devops.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Logging() logging.Interface {
return logging.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Network() network.Interface {
return network.New(f, f.namespace, f.tweakListOptions)
}
......
......@@ -24,9 +24,8 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
)
......@@ -60,10 +59,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case v1alpha1.SchemeGroupVersion.WithResource("s2ibinaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Devops().V1alpha1().S2iBinaries().Informer()}, nil
// Group=logging.kubesphere.io, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithResource("fluentbits"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Logging().V1alpha2().FluentBits().Informer()}, nil
// Group=network.kubesphere.io, Version=v1alpha1
case networkv1alpha1.SchemeGroupVersion.WithResource("namespacenetworkpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1alpha1().NamespaceNetworkPolicies().Informer()}, nil
......@@ -71,9 +66,9 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1alpha1().WorkspaceNetworkPolicies().Informer()}, nil
// Group=servicemesh.kubesphere.io, Version=v1alpha2
case servicemeshv1alpha2.SchemeGroupVersion.WithResource("servicepolicies"):
case v1alpha2.SchemeGroupVersion.WithResource("servicepolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().ServicePolicies().Informer()}, nil
case servicemeshv1alpha2.SchemeGroupVersion.WithResource("strategies"):
case v1alpha2.SchemeGroupVersion.WithResource("strategies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().Strategies().Informer()}, nil
// Group=tenant.kubesphere.io, Version=v1alpha1
......
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package logging
import (
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
v1alpha2 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/logging/v1alpha2"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha2 provides access to shared informers for resources in V1alpha2.
V1alpha2() v1alpha2.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha2 returns a new v1alpha2.Interface.
func (g *group) V1alpha2() v1alpha2.Interface {
return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions)
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
v1alpha2 "kubesphere.io/kubesphere/pkg/client/listers/logging/v1alpha2"
)
// FluentBitInformer provides access to a shared informer and lister for
// FluentBits.
type FluentBitInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha2.FluentBitLister
}
type fluentBitInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewFluentBitInformer constructs a new informer for FluentBit type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFluentBitInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredFluentBitInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredFluentBitInformer constructs a new informer for FluentBit type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFluentBitInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.LoggingV1alpha2().FluentBits(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.LoggingV1alpha2().FluentBits(namespace).Watch(options)
},
},
&loggingv1alpha2.FluentBit{},
resyncPeriod,
indexers,
)
}
func (f *fluentBitInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredFluentBitInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *fluentBitInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&loggingv1alpha2.FluentBit{}, f.defaultInformer)
}
func (f *fluentBitInformer) Lister() v1alpha2.FluentBitLister {
return v1alpha2.NewFluentBitLister(f.Informer().GetIndexer())
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
import (
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// FluentBits returns a FluentBitInformer.
FluentBits() FluentBitInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// FluentBits returns a FluentBitInformer.
func (v *version) FluentBits() FluentBitInformer {
return &fluentBitInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
// FluentBitListerExpansion allows custom methods to be added to
// FluentBitLister.
type FluentBitListerExpansion interface{}
// FluentBitNamespaceListerExpansion allows custom methods to be added to
// FluentBitNamespaceLister.
type FluentBitNamespaceListerExpansion interface{}
/*
Copyright 2019 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
)
// FluentBitLister helps list FluentBits.
type FluentBitLister interface {
// List lists all FluentBits in the indexer.
List(selector labels.Selector) (ret []*v1alpha2.FluentBit, err error)
// FluentBits returns an object that can list and get FluentBits.
FluentBits(namespace string) FluentBitNamespaceLister
FluentBitListerExpansion
}
// fluentBitLister implements the FluentBitLister interface.
type fluentBitLister struct {
indexer cache.Indexer
}
// NewFluentBitLister returns a new FluentBitLister.
func NewFluentBitLister(indexer cache.Indexer) FluentBitLister {
return &fluentBitLister{indexer: indexer}
}
// List lists all FluentBits in the indexer.
func (s *fluentBitLister) List(selector labels.Selector) (ret []*v1alpha2.FluentBit, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.FluentBit))
})
return ret, err
}
// FluentBits returns an object that can list and get FluentBits.
func (s *fluentBitLister) FluentBits(namespace string) FluentBitNamespaceLister {
return fluentBitNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// FluentBitNamespaceLister helps list and get FluentBits.
type FluentBitNamespaceLister interface {
// List lists all FluentBits in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha2.FluentBit, err error)
// Get retrieves the FluentBit from the indexer for a given namespace and name.
Get(name string) (*v1alpha2.FluentBit, error)
FluentBitNamespaceListerExpansion
}
// fluentBitNamespaceLister implements the FluentBitNamespaceLister
// interface.
type fluentBitNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all FluentBits in the indexer for a given namespace.
func (s fluentBitNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.FluentBit, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.FluentBit))
})
return ret, err
}
// Get retrieves the FluentBit from the indexer for a given namespace and name.
func (s fluentBitNamespaceLister) Get(name string) (*v1alpha2.FluentBit, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("fluentbit"), name)
}
return obj.(*v1alpha2.FluentBit), nil
}
......@@ -76,7 +76,6 @@ const (
WorkspaceMetricsTag = "Workspace Metrics"
ComponentMetricsTag = "Component Metrics"
LogQueryTag = "Log Query"
FluentBitSetting = "Fluent Bit Setting"
TerminalTag = "Terminal"
)
......
......@@ -16,14 +16,15 @@ import (
"kubesphere.io/kubesphere/pkg/models/iam"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
ldappool "kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
op "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
)
func InstallAPIs(container *restful.Container, client k8s.Client, op op.Client, db *mysql.Database) {
func InstallAPIs(container *restful.Container, client k8s.Client, op op.Client, db *mysql.Database, logging logging.Interface) {
urlruntime.Must(servicemeshv1alpha2.AddToContainer(container))
urlruntime.Must(devopsv1alpha2.AddToContainer(container))
urlruntime.Must(loggingv1alpha2.AddToContainer(container))
urlruntime.Must(loggingv1alpha2.AddToContainer(container, client, logging))
urlruntime.Must(monitoringv1alpha2.AddToContainer(container))
urlruntime.Must(openpitrixv1.AddToContainer(container, client, op))
urlruntime.Must(operationsv1alpha2.AddToContainer(container, client))
......
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"github.com/emicklei/go-restful"
urlruntime "k8s.io/apimachinery/pkg/util/runtime"
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
"kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2"
)
func init() {
Install(runtime.Container)
}
func Install(container *restful.Container) {
urlruntime.Must(v1alpha2.AddToContainer(container))
}
package v1alpha2
import (
"github.com/emicklei/go-restful"
"kubesphere.io/kubesphere/pkg/api"
"kubesphere.io/kubesphere/pkg/models/logging"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
util "kubesphere.io/kubesphere/pkg/utils/stringutils"
"strconv"
"strings"
"time"
)
const (
LevelCluster = iota
LevelContainer
// query type, default to `query`
TypeStat = "statistics"
TypeHist = "histogram"
TypeExport = "export"
Ascending = "asc"
Descending = "desc"
)
type handler struct {
k k8s.Client
lo logging.LoggingOperator
}
func newHandler(k k8s.Client, l loggingclient.Interface) *handler {
return &handler{k, logging.NewLoggingOperator(l)}
}
func (h handler) handleClusterQuery(req *restful.Request, resp *restful.Response) {
h.get(req, LevelCluster, resp)
}
func (h handler) handleContainerQuery(req *restful.Request, resp *restful.Response) {
h.get(req, LevelContainer, resp)
}
func (h handler) get(req *restful.Request, lvl int, resp *restful.Response) {
typ := req.QueryParameter("type")
noHit, sf, err := h.newSearchFilter(req, lvl)
if err != nil {
api.HandleBadRequest(resp, err)
}
if noHit {
handleNoHit(typ, resp)
return
}
switch typ {
case TypeStat:
res, err := h.lo.GetCurrentStats(sf)
if err != nil {
api.HandleInternalError(resp, err)
}
resp.WriteAsJson(res)
case TypeHist:
interval := req.QueryParameter("interval")
res, err := h.lo.CountLogsByInterval(sf, interval)
if err != nil {
api.HandleInternalError(resp, err)
}
resp.WriteAsJson(res)
case TypeExport:
resp.Header().Set(restful.HEADER_ContentType, "text/plain")
resp.Header().Set("Content-Disposition", "attachment")
err := h.lo.ExportLogs(sf, resp.ResponseWriter)
if err != nil {
api.HandleInternalError(resp, err)
}
default:
from, _ := strconv.ParseInt(req.QueryParameter("from"), 10, 64)
size, err := strconv.ParseInt(req.QueryParameter("size"), 10, 64)
if err != nil {
size = 10
}
order := req.QueryParameter("sort")
if order != Ascending {
order = Descending
}
res, err := h.lo.SearchLogs(sf, from, size, order)
if err != nil {
api.HandleInternalError(resp, err)
}
resp.WriteAsJson(res)
}
}
func (h handler) newSearchFilter(req *restful.Request, level int) (bool, loggingclient.SearchFilter, error) {
var sf loggingclient.SearchFilter
switch level {
case LevelCluster:
sf.NamespaceFilter = h.intersect(
util.Split(req.QueryParameter("namespaces"), ","),
util.Split(strings.ToLower(req.QueryParameter("namespace_query")), ","),
util.Split(req.QueryParameter("workspaces"), ","),
util.Split(strings.ToLower(req.QueryParameter("workspace_query")), ","))
sf.WorkloadFilter = util.Split(req.QueryParameter("workloads"), ",")
sf.WorkloadSearch = util.Split(req.QueryParameter("workload_query"), ",")
sf.PodFilter = util.Split(req.QueryParameter("pods"), ",")
sf.PodSearch = util.Split(req.QueryParameter("pod_query"), ",")
sf.ContainerFilter = util.Split(req.QueryParameter("containers"), ",")
sf.ContainerSearch = util.Split(req.QueryParameter("container_query"), ",")
case LevelContainer:
sf.NamespaceFilter = h.withCreationTime(req.PathParameter("namespace"))
sf.PodFilter = []string{req.PathParameter("pod")}
sf.ContainerFilter = []string{req.PathParameter("container")}
}
sf.LogSearch = util.Split(req.QueryParameter("log_query"), ",")
var err error
now := time.Now()
// If time is not given, set it to now.
if req.QueryParameter("start_time") == "" {
sf.Starttime = now
} else {
sf.Starttime, err = time.Parse(time.RFC3339, req.QueryParameter("start_time"))
if err != nil {
return false, sf, err
}
}
if req.QueryParameter("end_time") == "" {
sf.Endtime = now
} else {
sf.Endtime, err = time.Parse(time.RFC3339, req.QueryParameter("end_time"))
if err != nil {
return false, sf, err
}
}
return len(sf.NamespaceFilter) == 0, sf, nil
}
func handleNoHit(typ string, resp *restful.Response) {
switch typ {
case TypeStat:
resp.WriteAsJson(new(loggingclient.Statistics))
case TypeHist:
resp.WriteAsJson(new(loggingclient.Histogram))
case TypeExport:
resp.Header().Set(restful.HEADER_ContentType, "text/plain")
resp.Header().Set("Content-Disposition", "attachment")
resp.Write(nil)
default:
resp.WriteAsJson(new(loggingclient.Logs))
}
}
package v1alpha2
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"strings"
"time"
)
func (h handler) intersect(nsFilter []string, nsSearch []string, wsFilter []string, wsSearch []string) map[string]time.Time {
nsList, err := h.k.Kubernetes().CoreV1().Namespaces().List(v1.ListOptions{})
if err != nil {
klog.Errorf("failed to list namespace, error: %s", err)
return nil
}
inner := make(map[string]time.Time)
// if no search condition is set on both namespace and workspace,
// then return all namespaces
if nsSearch == nil && nsFilter == nil && wsSearch == nil && wsFilter == nil {
for _, ns := range nsList.Items {
inner[ns.Name] = ns.CreationTimestamp.Time
}
} else {
for _, ns := range nsList.Items {
if stringutils.StringIn(ns.Name, nsFilter) ||
stringutils.StringIn(ns.Annotations[constants.WorkspaceLabelKey], wsFilter) ||
containsIn(ns.Name, nsSearch) ||
containsIn(ns.Annotations[constants.WorkspaceLabelKey], wsSearch) {
inner[ns.Name] = ns.CreationTimestamp.Time
}
}
}
return inner
}
func containsIn(str string, subStrs []string) bool {
for _, sub := range subStrs {
if strings.Contains(str, sub) {
return true
}
}
return false
}
func (h handler) withCreationTime(name string) map[string]time.Time {
ns, err := h.k.Kubernetes().CoreV1().Namespaces().Get(name, v1.GetOptions{})
if err == nil {
return map[string]time.Time{name: ns.CreationTimestamp.Time}
}
return nil
}
......@@ -8,8 +8,6 @@ import (
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/apiserver/logging"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/informers"
"kubesphere.io/kubesphere/pkg/models/iam"
......@@ -289,26 +287,28 @@ func (h *tenantHandler) ListDevopsRules(req *restful.Request, resp *restful.Resp
resp.WriteAsJson(rules)
}
func (h *tenantHandler) LogQuery(req *restful.Request, resp *restful.Response) {
operation := req.QueryParameter("operation")
req, err := h.regenerateLoggingRequest(req)
switch {
case err != nil:
api.HandleInternalError(resp, err)
case req != nil:
logging.LoggingQueryCluster(req, resp)
default:
if operation == "export" {
resp.Header().Set(restful.HEADER_ContentType, "text/plain")
resp.Header().Set("Content-Disposition", "attachment")
resp.Write(nil)
} else {
resp.WriteAsJson(loggingv1alpha2.QueryResult{Read: new(loggingv1alpha2.ReadResult)})
}
}
}
//TODO(wansir): We need move this part to logging module
//func (h *tenantHandler) LogQuery(req *restful.Request, resp *restful.Response) {
// operation := req.QueryParameter("operation")
// req, err := h.regenerateLoggingRequest(req)
// switch {
// case err != nil:
// api.HandleInternalError(resp, err)
// case req != nil:
// loggingv1alpha2.Get(req, loggingv1alpha2.LevelCluster, h.k8s, h.lo, resp)
// default:
// if operation == "export" {
// resp.Header().Set(restful.HEADER_ContentType, "text/plain")
// resp.Header().Set("Content-Disposition", "attachment")
// resp.Write(nil)
// } else {
// resp.WriteAsJson(v1alpha2.APIResponse{Logs: new(loggingclient.Logs)})
// }
// }
//}
// override namespace query conditions
//TODO(wansir): We need move this part to logging module
func (h *tenantHandler) regenerateLoggingRequest(req *restful.Request) (*restful.Request, error) {
username := req.HeaderParameter(constants.UserNameHeader)
......
......@@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"kubesphere.io/kubesphere/pkg/api"
devopsv1alpha2 "kubesphere.io/kubesphere/pkg/api/devops/v1alpha2"
"kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
"kubesphere.io/kubesphere/pkg/constants"
......@@ -151,32 +150,32 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, db *mysql.Databa
Doc("Delete the specified devops project from the workspace").
Returns(http.StatusOK, api.StatusOK, devopsv1alpha2.DevOpsProject{}).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
ws.Route(ws.GET("/logs").
To(handler.LogQuery).
Doc("Query cluster-level logs in a multi-tenants environment").
Param(ws.QueryParameter("operation", "Operation type. This can be one of four types: query (for querying logs), statistics (for retrieving statistical data), histogram (for displaying log count by time interval) and export (for exporting logs). Defaults to query.").DefaultValue("query").DataType("string").Required(false)).
Param(ws.QueryParameter("workspaces", "A comma-separated list of workspaces. This field restricts the query to specified workspaces. For example, the following filter matches the workspace my-ws and demo-ws: `my-ws,demo-ws`").DataType("string").Required(false)).
Param(ws.QueryParameter("workspace_query", "A comma-separated list of keywords. Differing from **workspaces**, this field performs fuzzy matching on workspaces. For example, the following value limits the query to workspaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
Param(ws.QueryParameter("namespaces", "A comma-separated list of namespaces. This field restricts the query to specified namespaces. For example, the following filter matches the namespace my-ns and demo-ns: `my-ns,demo-ns`").DataType("string").Required(false)).
Param(ws.QueryParameter("namespace_query", "A comma-separated list of keywords. Differing from **namespaces**, this field performs fuzzy matching on namespaces. For example, the following value limits the query to namespaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
Param(ws.QueryParameter("workloads", "A comma-separated list of workloads. This field restricts the query to specified workloads. For example, the following filter matches the workload my-wl and demo-wl: `my-wl,demo-wl`").DataType("string").Required(false)).
Param(ws.QueryParameter("workload_query", "A comma-separated list of keywords. Differing from **workloads**, this field performs fuzzy matching on workloads. For example, the following value limits the query to workloads whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
Param(ws.QueryParameter("pods", "A comma-separated list of pods. This field restricts the query to specified pods. For example, the following filter matches the pod my-po and demo-po: `my-po,demo-po`").DataType("string").Required(false)).
Param(ws.QueryParameter("pod_query", "A comma-separated list of keywords. Differing from **pods**, this field performs fuzzy matching on pods. For example, the following value limits the query to pods whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
Param(ws.QueryParameter("containers", "A comma-separated list of containers. This field restricts the query to specified containers. For example, the following filter matches the container my-cont and demo-cont: `my-cont,demo-cont`").DataType("string").Required(false)).
Param(ws.QueryParameter("container_query", "A comma-separated list of keywords. Differing from **containers**, this field performs fuzzy matching on containers. For example, the following value limits the query to containers whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
Param(ws.QueryParameter("log_query", "A comma-separated list of keywords. The query returns logs which contain at least one keyword. Case-insensitive matching. For example, if the field is set to `err,INFO`, the query returns any log containing err(ERR,Err,...) *OR* INFO(info,InFo,...).").DataType("string").Required(false)).
Param(ws.QueryParameter("interval", "Time interval. It requires **operation** is set to histogram. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).").DefaultValue("15m").DataType("string").Required(false)).
Param(ws.QueryParameter("start_time", "Start time of query. Default to 0. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
Param(ws.QueryParameter("end_time", "End time of query. Default to now. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
Param(ws.QueryParameter("sort", "Sort order. One of acs, desc. This field sorts logs by timestamp.").DataType("string").DefaultValue("desc").Required(false)).
Param(ws.QueryParameter("from", "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to query. Defaults to 0 (i.e. from the beginning of the result set).").DataType("integer").DefaultValue("0").Required(false)).
Param(ws.QueryParameter("size", "Size of result to return. It requires **operation** is set to query. Defaults to 10 (i.e. 10 log records).").DataType("integer").DefaultValue("10").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}).
Writes(v1alpha2.Response{}).
Returns(http.StatusOK, api.StatusOK, v1alpha2.Response{})).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, "text/plain")
//ws.Route(ws.GET("/logs").
// To(handler.LogQuery).
// Doc("Query cluster-level logs in a multi-tenants environment").
// Param(ws.QueryParameter("operation", "Operation type. This can be one of four types: query (for querying logs), statistics (for retrieving statistical data), histogram (for displaying log count by time interval) and export (for exporting logs). Defaults to query.").DefaultValue("query").DataType("string").Required(false)).
// Param(ws.QueryParameter("workspaces", "A comma-separated list of workspaces. This field restricts the query to specified workspaces. For example, the following filter matches the workspace my-ws and demo-ws: `my-ws,demo-ws`").DataType("string").Required(false)).
// Param(ws.QueryParameter("workspace_query", "A comma-separated list of keywords. Differing from **workspaces**, this field performs fuzzy matching on workspaces. For example, the following value limits the query to workspaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
// Param(ws.QueryParameter("namespaces", "A comma-separated list of namespaces. This field restricts the query to specified namespaces. For example, the following filter matches the namespace my-ns and demo-ns: `my-ns,demo-ns`").DataType("string").Required(false)).
// Param(ws.QueryParameter("namespace_query", "A comma-separated list of keywords. Differing from **namespaces**, this field performs fuzzy matching on namespaces. For example, the following value limits the query to namespaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
// Param(ws.QueryParameter("workloads", "A comma-separated list of workloads. This field restricts the query to specified workloads. For example, the following filter matches the workload my-wl and demo-wl: `my-wl,demo-wl`").DataType("string").Required(false)).
// Param(ws.QueryParameter("workload_query", "A comma-separated list of keywords. Differing from **workloads**, this field performs fuzzy matching on workloads. For example, the following value limits the query to workloads whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
// Param(ws.QueryParameter("pods", "A comma-separated list of pods. This field restricts the query to specified pods. For example, the following filter matches the pod my-po and demo-po: `my-po,demo-po`").DataType("string").Required(false)).
// Param(ws.QueryParameter("pod_query", "A comma-separated list of keywords. Differing from **pods**, this field performs fuzzy matching on pods. For example, the following value limits the query to pods whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
// Param(ws.QueryParameter("containers", "A comma-separated list of containers. This field restricts the query to specified containers. For example, the following filter matches the container my-cont and demo-cont: `my-cont,demo-cont`").DataType("string").Required(false)).
// Param(ws.QueryParameter("container_query", "A comma-separated list of keywords. Differing from **containers**, this field performs fuzzy matching on containers. For example, the following value limits the query to containers whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
// Param(ws.QueryParameter("log_query", "A comma-separated list of keywords. The query returns logs which contain at least one keyword. Case-insensitive matching. For example, if the field is set to `err,INFO`, the query returns any log containing err(ERR,Err,...) *OR* INFO(info,InFo,...).").DataType("string").Required(false)).
// Param(ws.QueryParameter("interval", "Time interval. It requires **operation** is set to histogram. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).").DefaultValue("15m").DataType("string").Required(false)).
// Param(ws.QueryParameter("start_time", "Start time of query. Default to 0. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
// Param(ws.QueryParameter("end_time", "End time of query. Default to now. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
// Param(ws.QueryParameter("sort", "Sort order. One of acs, desc. This field sorts logs by timestamp.").DataType("string").DefaultValue("desc").Required(false)).
// Param(ws.QueryParameter("from", "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to query. Defaults to 0 (i.e. from the beginning of the result set).").DataType("integer").DefaultValue("0").Required(false)).
// Param(ws.QueryParameter("size", "Size of result to return. It requires **operation** is set to query. Defaults to 10 (i.e. 10 log records).").DataType("integer").DefaultValue("10").Required(false)).
// Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}).
// Writes(v1alpha2.Response{}).
// Returns(http.StatusOK, api.StatusOK, v1alpha2.Response{})).
// Consumes(restful.MIME_JSON, restful.MIME_XML).
// Produces(restful.MIME_JSON, "text/plain")
c.Add(ws)
return nil
......
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
type LogQueryLevel int
const (
QueryLevelCluster LogQueryLevel = iota
QueryLevelWorkspace
QueryLevelNamespace
QueryLevelWorkload
QueryLevelPod
QueryLevelContainer
)
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/informers"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"strconv"
"strings"
"time"
)
// list namespaces that match search conditions
func MatchNamespace(nsFilter []string, nsQuery []string, wsFilter []string, wsQuery []string) (bool, []string) {
nsLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister()
nsList, err := nsLister.List(labels.Everything())
if err != nil {
klog.Errorf("failed to list namespace, error: %s", err)
return true, nil
}
var namespaces []string
// if no search condition is set on both namespace and workspace,
// then return all namespaces
if nsQuery == nil && nsFilter == nil && wsQuery == nil && wsFilter == nil {
for _, ns := range nsList {
namespaces = append(namespaces, ns.Name)
}
return false, namespaces
}
for _, ns := range nsList {
if stringutils.StringIn(ns.Name, nsFilter) ||
stringutils.StringIn(ns.Annotations[constants.WorkspaceLabelKey], wsFilter) ||
containsIn(ns.Name, nsQuery) ||
containsIn(ns.Annotations[constants.WorkspaceLabelKey], wsQuery) {
namespaces = append(namespaces, ns.Name)
}
}
// if namespaces is equal to nil, indicates no namespace matched
// it causes the query to return no result
return namespaces == nil, namespaces
}
func containsIn(str string, subStrs []string) bool {
for _, sub := range subStrs {
if strings.Contains(str, sub) {
return true
}
}
return false
}
func MakeNamespaceCreationTimeMap(namespaces []string) map[string]string {
namespaceWithCreationTime := make(map[string]string)
nsLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister()
for _, item := range namespaces {
ns, err := nsLister.Get(item)
if err != nil {
// the ns doesn't exist
continue
}
namespaceWithCreationTime[ns.Name] = strconv.FormatInt(ns.CreationTimestamp.UnixNano()/int64(time.Millisecond), 10)
}
return namespaceWithCreationTime
}
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
_ "github.com/go-sql-driver/mysql"
"github.com/google/uuid"
"github.com/json-iterator/go"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/informers"
"net/http"
"strings"
"time"
)
var jsonIter = jsoniter.ConfigCompatibleWithStandardLibrary
const (
ConfigMapName = "fluent-bit-output-config"
ConfigMapData = "outputs"
LoggingNamespace = "kubesphere-logging-system"
)
func createCRDClientSet() (*rest.RESTClient, *runtime.Scheme, error) {
config, err := fb.GetClientConfig("")
if err != nil {
//panic(err.Error())
return nil, nil, err
}
// Create a new clientset which include our CRD schema
return fb.NewFluentbitCRDClient(config)
}
func FluentbitOutputsQuery() *v1alpha2.FluentbitOutputsResult {
var result v1alpha2.FluentbitOutputsResult
outputs, err := GetFluentbitOutputFromConfigMap()
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
result.Outputs = outputs
result.Status = http.StatusOK
return &result
}
func FluentbitOutputInsert(output v1alpha2.OutputPlugin) *v1alpha2.FluentbitOutputsResult {
var result v1alpha2.FluentbitOutputsResult
// 1. Update ConfigMap
var outputs []v1alpha2.OutputPlugin
outputs, err := GetFluentbitOutputFromConfigMap()
if err != nil {
// If the ConfigMap doesn't exist, a new one will be created later
klog.Errorln(err)
}
// When adding a new output for the first time, one should always set it enabled
output.Enable = true
output.Id = uuid.New().String()
output.Updatetime = time.Now()
outputs = append(outputs, output)
err = updateFluentbitOutputConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
// 2. Keep CRD in inline with ConfigMap
err = syncFluentbitCRDOutputWithConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
result.Status = http.StatusOK
return &result
}
func FluentbitOutputUpdate(output v1alpha2.OutputPlugin, id string) *v1alpha2.FluentbitOutputsResult {
var result v1alpha2.FluentbitOutputsResult
// 1. Update ConfigMap
var outputs []v1alpha2.OutputPlugin
outputs, err := GetFluentbitOutputFromConfigMap()
if err != nil {
// If the ConfigMap doesn't exist, a new one will be created later
klog.Errorln(err)
}
index := 0
for _, output := range outputs {
if output.Id == id {
break
}
index++
}
if index >= len(outputs) {
result.Status = http.StatusNotFound
result.Error = "The output plugin to update doesn't exist. Please check the output id you provide."
return &result
}
output.Updatetime = time.Now()
outputs = append(append(outputs[:index], outputs[index+1:]...), output)
err = updateFluentbitOutputConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
// 2. Keep CRD in inline with ConfigMap
err = syncFluentbitCRDOutputWithConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
result.Status = http.StatusOK
return &result
}
func FluentbitOutputDelete(id string) *v1alpha2.FluentbitOutputsResult {
var result v1alpha2.FluentbitOutputsResult
// 1. Update ConfigMap
// If the ConfigMap doesn't exist, a new one will be created
outputs, _ := GetFluentbitOutputFromConfigMap()
index := 0
for _, output := range outputs {
if output.Id == id {
break
}
index++
}
if index >= len(outputs) {
result.Status = http.StatusNotFound
result.Error = "The output plugin to delete doesn't exist. Please check the output id you provide."
return &result
}
outputs = append(outputs[:index], outputs[index+1:]...)
err := updateFluentbitOutputConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
// 2. Keep CRD in inline with DB
err = syncFluentbitCRDOutputWithConfigMap(outputs)
if err != nil {
result.Status = http.StatusInternalServerError
result.Error = err.Error()
return &result
}
result.Status = http.StatusOK
return &result
}
func GetFluentbitOutputFromConfigMap() ([]v1alpha2.OutputPlugin, error) {
configMap, err := informers.SharedInformerFactory().Core().V1().ConfigMaps().Lister().ConfigMaps(LoggingNamespace).Get(ConfigMapName)
if err != nil {
return nil, err
}
data := configMap.Data[ConfigMapData]
var outputs []fb.OutputPlugin
if err = jsonIter.UnmarshalFromString(data, &outputs); err != nil {
return nil, err
}
return outputs, nil
}
func updateFluentbitOutputConfigMap(outputs []fb.OutputPlugin) error {
var data string
data, err := jsonIter.MarshalToString(outputs)
if err != nil {
klog.Errorln(err)
return err
}
// Update the ConfigMap
config, err := rest.InClusterConfig()
if err != nil {
klog.Errorln(err)
return err
}
// Creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Errorln(err)
return err
}
configMapClient := clientset.CoreV1().ConfigMaps(LoggingNamespace)
configMap, err := configMapClient.Get(ConfigMapName, metav1.GetOptions{})
if err != nil {
// If the ConfigMap doesn't exist, create a new one
newConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: ConfigMapName,
},
Data: map[string]string{ConfigMapData: data},
}
_, err = configMapClient.Create(newConfigMap)
if err != nil {
klog.Errorln(err)
return err
}
} else {
// update
configMap.Data = map[string]string{ConfigMapData: data}
_, err = configMapClient.Update(configMap)
if err != nil {
klog.Errorln(err)
return err
}
}
return nil
}
func syncFluentbitCRDOutputWithConfigMap(outputs []v1alpha2.OutputPlugin) error {
var enabledOutputs []v1alpha2.Plugin
for _, output := range outputs {
if output.Enable {
enabledOutputs = append(enabledOutputs, v1alpha2.Plugin{Type: output.Type, Name: output.Name, Parameters: output.Parameters})
}
}
// Empty output is not allowed, must specify a null-type output
if len(enabledOutputs) == 0 {
enabledOutputs = []v1alpha2.Plugin{
{
Type: "fluentbit_output",
Name: "fluentbit-output-null",
Parameters: []v1alpha2.Parameter{
{
Name: "Name",
Value: "null",
},
{
Name: "Match",
Value: "*",
},
},
},
}
}
crdcs, scheme, err := createCRDClientSet()
if err != nil {
klog.Errorln(err)
return err
}
// Create a CRD client interface
crdclient := v1alpha2.CrdClient(crdcs, scheme, LoggingNamespace)
fluentbit, err := crdclient.Get("fluent-bit")
if err != nil {
klog.Errorln(err)
return err
}
fluentbit.Spec.Output = enabledOutputs
_, err = crdclient.Update("fluent-bit", fluentbit)
if err != nil {
klog.Errorln(err)
return err
}
return nil
}
// Parse es host, port and index
func ParseEsOutputParams(params []v1alpha2.Parameter) *v1alpha2.Config {
var (
isEsFound bool
host = "127.0.0.1"
port = "9200"
index = "logstash"
logstashFormat string
logstashPrefix string
)
for _, param := range params {
switch param.Name {
case "Name":
if param.Value == "es" {
isEsFound = true
}
case "Host":
host = param.Value
case "Port":
port = param.Value
case "Index":
index = param.Value
case "Logstash_Format":
logstashFormat = strings.ToLower(param.Value)
case "Logstash_Prefix":
logstashPrefix = param.Value
}
}
if !isEsFound {
return nil
}
// If Logstash_Format is On/True, ignore Index
if logstashFormat == "on" || logstashFormat == "true" {
if logstashPrefix != "" {
index = logstashPrefix
} else {
index = "logstash"
}
}
return &v1alpha2.Config{Host: host, Port: port, Index: index}
}
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
package logging
import (
"io"
"kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
)
type LoggingOperator interface {
GetCurrentStats(sf logging.SearchFilter) (v1alpha2.APIResponse, error)
CountLogsByInterval(sf logging.SearchFilter, interval string) (v1alpha2.APIResponse, error)
ExportLogs(sf logging.SearchFilter, w io.Writer) error
SearchLogs(sf logging.SearchFilter, from, size int64, order string) (v1alpha2.APIResponse, error)
}
type loggingOperator struct {
c logging.Interface
}
func NewLoggingOperator(client logging.Interface) LoggingOperator {
return &loggingOperator{client}
}
func (l loggingOperator) GetCurrentStats(sf logging.SearchFilter) (v1alpha2.APIResponse, error) {
res, err := l.c.GetCurrentStats(sf)
return v1alpha2.APIResponse{Statistics: &res}, err
}
func (l loggingOperator) CountLogsByInterval(sf logging.SearchFilter, interval string) (v1alpha2.APIResponse, error) {
res, err := l.c.CountLogsByInterval(sf, interval)
return v1alpha2.APIResponse{Histogram: &res}, err
}
func (l loggingOperator) ExportLogs(sf logging.SearchFilter, w io.Writer) error {
return l.c.ExportLogs(sf, w)
}
func (l loggingOperator) SearchLogs(sf logging.SearchFilter, from, size int64, order string) (v1alpha2.APIResponse, error) {
res, err := l.c.SearchLogs(sf, from, size, order)
return v1alpha2.APIResponse{Logs: &res}, err
}
......@@ -10,10 +10,10 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
"kubesphere.io/kubesphere/pkg/simple/client/cache"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
"kubesphere.io/kubesphere/pkg/simple/client/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/kubesphere"
"kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
"kubesphere.io/kubesphere/pkg/simple/client/notification"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
......@@ -168,7 +168,7 @@ type Config struct {
S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"`
OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"`
MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"`
LoggingOptions *esclient.Options `json:"logging,omitempty" yaml:"logging,omitempty" mapstructure:"logging"`
LoggingOptions *elasticsearch.Options `json:"logging,omitempty" yaml:"logging,omitempty" mapstructure:"logging"`
// Options below are only loaded from configuration file, no command line flags for these options now.
KubeSphereOptions *kubesphere.Options `json:"-" yaml:"kubesphere,omitempty" mapstructure:"kubesphere"`
......@@ -194,7 +194,7 @@ func newConfig() *Config {
KubeSphereOptions: kubesphere.NewKubeSphereOptions(),
AlertingOptions: alerting.NewAlertingOptions(),
NotificationOptions: notification.NewNotificationOptions(),
LoggingOptions: esclient.NewElasticSearchOptions(),
LoggingOptions: elasticsearch.NewElasticSearchOptions(),
}
}
......
......@@ -7,10 +7,10 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
"kubesphere.io/kubesphere/pkg/simple/client/cache"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
"kubesphere.io/kubesphere/pkg/simple/client/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/kubesphere"
"kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
"kubesphere.io/kubesphere/pkg/simple/client/notification"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
......@@ -87,7 +87,7 @@ func newTestConfig() *Config {
Endpoint: "http://prometheus.kubesphere-monitoring-system.svc",
SecondaryEndpoint: "http://prometheus.kubesphere-monitoring-system.svc",
},
LoggingOptions: &esclient.Options{
LoggingOptions: &elasticsearch.Options{
Host: "http://elasticsearch-logging.kubesphere-logging-system.svc:9200",
IndexPrefix: "elk",
Version: "6",
......
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package esclient
import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
v5 "kubesphere.io/kubesphere/pkg/simple/client/elasticsearch/versions/v5"
v6 "kubesphere.io/kubesphere/pkg/simple/client/elasticsearch/versions/v6"
v7 "kubesphere.io/kubesphere/pkg/simple/client/elasticsearch/versions/v7"
"strings"
"time"
"github.com/json-iterator/go"
)
const (
matchPhrase = iota
matchPhrasePrefix
regexpQuery
podNameMaxLength = 63
// max 10 characters + 1 hyphen
replicaSetSuffixMaxLength = 11
// a unique random string as suffix, 5 characters + 1 hyphen
randSuffixLength = 6
fieldPodName = "kubernetes.pod_name"
fieldContainerName = "kubernetes.container_name"
fieldLog = "log"
fieldNamespaceNameKeyword = "kubernetes.namespace_name.keyword"
fieldPodNameKeyword = "kubernetes.pod_name.keyword"
fieldContainerNameKeyword = "kubernetes.container_name.keyword"
)
const (
ElasticV5 = "5"
ElasticV6 = "6"
ElasticV7 = "7"
)
var jsonIter = jsoniter.ConfigCompatibleWithStandardLibrary
type ElasticSearchClient struct {
client Client
}
func NewLoggingClient(options *Options) (*ElasticSearchClient, error) {
var version, index string
esClient := &ElasticSearchClient{}
if options.Version == "" {
var err error
version, err = detectVersionMajor(options.Host)
if err != nil {
return nil, err
}
} else {
version = options.Version
}
if options.IndexPrefix != "" {
index = options.IndexPrefix
} else {
index = "logstash"
}
switch version {
case ElasticV5:
esClient.client = v5.New(options.Host, index)
case ElasticV6:
esClient.client = v6.New(options.Host, index)
case ElasticV7:
esClient.client = v7.New(options.Host, index)
default:
return nil, fmt.Errorf("unsupported elasticsearch version %s", version)
}
return esClient, nil
}
func (c *ElasticSearchClient) ES() *Client {
return &c.client
}
func detectVersionMajor(host string) (string, error) {
// Info APIs are backward compatible with versions of v5.x, v6.x and v7.x
es := v6.New(host, "")
res, err := es.Client.Info(
es.Client.Info.WithContext(context.Background()),
)
if err != nil {
return "", err
}
defer res.Body.Close()
var b map[string]interface{}
if err = json.NewDecoder(res.Body).Decode(&b); err != nil {
return "", err
}
if res.IsError() {
// Print the response status and error information.
e, _ := b["error"].(map[string]interface{})
return "", fmt.Errorf("[%s] type: %v, reason: %v", res.Status(), e["type"], e["reason"])
}
// get the major version
version, _ := b["version"].(map[string]interface{})
number, _ := version["number"].(string)
if number == "" {
return "", fmt.Errorf("failed to detect elastic version number")
}
v := strings.Split(number, ".")[0]
return v, nil
}
func createQueryRequest(param v1alpha2.QueryParameters) ([]byte, error) {
var request v1alpha2.Request
var mainBoolQuery v1alpha2.BoolFilter
if len(param.NamespaceWithCreationTime) != 0 {
var boolShould v1alpha2.BoolShould
for namespace, creationTime := range param.NamespaceWithCreationTime {
var boolFilter v1alpha2.BoolFilter
matchPhrase := v1alpha2.MatchPhrase{MatchPhrase: map[string]string{fieldNamespaceNameKeyword: namespace}}
rangeQuery := v1alpha2.RangeQuery{RangeSpec: v1alpha2.RangeSpec{TimeRange: v1alpha2.TimeRange{Gte: creationTime, Lte: ""}}}
boolFilter.Filter = append(boolFilter.Filter, matchPhrase)
boolFilter.Filter = append(boolFilter.Filter, rangeQuery)
boolShould.Should = append(boolShould.Should, v1alpha2.BoolQuery{Bool: boolFilter})
}
boolShould.MinimumShouldMatch = 1
mainBoolQuery.Filter = append(mainBoolQuery.Filter, v1alpha2.BoolQuery{Bool: boolShould})
}
if param.WorkloadFilter != nil {
boolQuery := makeBoolShould(regexpQuery, fieldPodNameKeyword, param.WorkloadFilter)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.PodFilter != nil {
boolQuery := makeBoolShould(matchPhrase, fieldPodNameKeyword, param.PodFilter)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.ContainerFilter != nil {
boolQuery := makeBoolShould(matchPhrase, fieldContainerNameKeyword, param.ContainerFilter)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.WorkloadQuery != nil {
boolQuery := makeBoolShould(matchPhrasePrefix, fieldPodName, param.WorkloadQuery)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.PodQuery != nil {
boolQuery := makeBoolShould(matchPhrasePrefix, fieldPodName, param.PodQuery)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.ContainerQuery != nil {
boolQuery := makeBoolShould(matchPhrasePrefix, fieldContainerName, param.ContainerQuery)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
if param.LogQuery != nil {
boolQuery := makeBoolShould(matchPhrasePrefix, fieldLog, param.LogQuery)
mainBoolQuery.Filter = append(mainBoolQuery.Filter, boolQuery)
}
rangeQuery := v1alpha2.RangeQuery{RangeSpec: v1alpha2.RangeSpec{TimeRange: v1alpha2.TimeRange{Gte: param.StartTime, Lte: param.EndTime}}}
mainBoolQuery.Filter = append(mainBoolQuery.Filter, rangeQuery)
if param.Operation == v1alpha2.OperationStatistics {
containerAgg := v1alpha2.AggField{Field: "kubernetes.docker_id.keyword"}
statisticAggs := v1alpha2.StatisticsAggs{ContainerAgg: v1alpha2.ContainerAgg{Cardinality: containerAgg}}
request.Aggs = statisticAggs
request.Size = 0
} else if param.Operation == v1alpha2.OperationHistogram {
var interval string
if param.Interval != "" {
interval = param.Interval
} else {
interval = "15m"
}
param.Interval = interval
request.Aggs = v1alpha2.HistogramAggs{HistogramAgg: v1alpha2.HistogramAgg{DateHistogram: v1alpha2.DateHistogram{Field: "time", Interval: interval}}}
request.Size = 0
} else {
request.From = param.From
request.Size = param.Size
var order string
if strings.Compare(strings.ToLower(param.Sort), "asc") == 0 {
order = "asc"
} else {
order = "desc"
}
request.Sorts = append(request.Sorts, v1alpha2.Sort{Order: v1alpha2.Order{Order: order}})
}
request.MainQuery = v1alpha2.BoolQuery{Bool: mainBoolQuery}
return json.Marshal(request)
}
func makeBoolShould(queryType int, field string, list []string) v1alpha2.BoolQuery {
var should []interface{}
for _, phrase := range list {
var q interface{}
switch queryType {
case matchPhrase:
q = v1alpha2.MatchPhrase{MatchPhrase: map[string]string{field: phrase}}
case matchPhrasePrefix:
q = v1alpha2.MatchPhrasePrefix{MatchPhrasePrefix: map[string]string{field: phrase}}
case regexpQuery:
q = v1alpha2.RegexpQuery{Regexp: map[string]string{field: makePodNameRegexp(phrase)}}
}
should = append(should, q)
}
return v1alpha2.BoolQuery{
Bool: v1alpha2.BoolShould{
Should: should,
MinimumShouldMatch: 1,
},
}
}
func makePodNameRegexp(workloadName string) string {
var regexp string
if len(workloadName) <= podNameMaxLength-replicaSetSuffixMaxLength-randSuffixLength {
// match deployment pods, eg. <deploy>-579dfbcddd-24znw
// replicaset rand string is limited to vowels
// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83
regexp += workloadName + "-[bcdfghjklmnpqrstvwxz2456789]{1,10}-[a-z0-9]{5}|"
// match statefulset pods, eg. <sts>-0
regexp += workloadName + "-[0-9]+|"
// match pods of daemonset or job, eg. <ds>-29tdk, <job>-5xqvl
regexp += workloadName + "-[a-z0-9]{5}"
} else if len(workloadName) <= podNameMaxLength-randSuffixLength {
replicaSetSuffixLength := podNameMaxLength - randSuffixLength - len(workloadName)
regexp += fmt.Sprintf("%s%d%s", workloadName+"-[bcdfghjklmnpqrstvwxz2456789]{", replicaSetSuffixLength, "}[a-z0-9]{5}|")
regexp += workloadName + "-[0-9]+|"
regexp += workloadName + "-[a-z0-9]{5}"
} else {
// Rand suffix may overwrites the workload name if the name is too long
// This won't happen for StatefulSet because a statefulset pod will fail to create
regexp += workloadName[:podNameMaxLength-randSuffixLength+1] + "[a-z0-9]{5}|"
regexp += workloadName + "-[0-9]+"
}
return regexp
}
func (c *ElasticSearchClient) parseQueryResult(operation int, body []byte) (*v1alpha2.QueryResult, error) {
var queryResult v1alpha2.QueryResult
var response v1alpha2.Response
err := jsonIter.Unmarshal(body, &response)
if err != nil {
klog.Error(err)
return nil, err
}
if response.Shards.Successful != response.Shards.Total {
//Elastic some shards error
klog.Warningf("Not all shards succeed, successful shards: %d, skipped shards: %d, failed shards: %d",
response.Shards.Successful, response.Shards.Skipped, response.Shards.Failed)
}
switch operation {
case v1alpha2.OperationQuery:
var readResult v1alpha2.ReadResult
readResult.Total = c.client.GetTotalHitCount(response.Hits.Total)
for _, hit := range response.Hits.Hits {
var logRecord v1alpha2.LogRecord
logRecord.Time = hit.Source.Time
logRecord.Log = hit.Source.Log
logRecord.Namespace = hit.Source.Kubernetes.Namespace
logRecord.Pod = hit.Source.Kubernetes.Pod
logRecord.Container = hit.Source.Kubernetes.Container
logRecord.Host = hit.Source.Kubernetes.Host
readResult.Records = append(readResult.Records, logRecord)
}
queryResult.Read = &readResult
case v1alpha2.OperationStatistics:
var statisticsResponse v1alpha2.StatisticsResponseAggregations
err := jsonIter.Unmarshal(response.Aggregations, &statisticsResponse)
if err != nil && response.Aggregations != nil {
klog.Error(err)
return nil, err
}
queryResult.Statistics = &v1alpha2.StatisticsResult{Containers: statisticsResponse.ContainerCount.Value, Logs: c.client.GetTotalHitCount(response.Hits.Total)}
case v1alpha2.OperationHistogram:
var histogramResult v1alpha2.HistogramResult
histogramResult.Total = c.client.GetTotalHitCount(response.Hits.Total)
var histogramAggregations v1alpha2.HistogramAggregations
err = jsonIter.Unmarshal(response.Aggregations, &histogramAggregations)
if err != nil && response.Aggregations != nil {
klog.Error(err)
return nil, err
}
for _, histogram := range histogramAggregations.HistogramAggregation.Histograms {
var histogramRecord v1alpha2.HistogramRecord
histogramRecord.Time = histogram.Time
histogramRecord.Count = histogram.Count
histogramResult.Histograms = append(histogramResult.Histograms, histogramRecord)
}
queryResult.Histogram = &histogramResult
case v1alpha2.OperationExport:
var readResult v1alpha2.ReadResult
readResult.ScrollID = response.ScrollId
for _, hit := range response.Hits.Hits {
var logRecord v1alpha2.LogRecord
logRecord.Log = hit.Source.Log
readResult.Records = append(readResult.Records, logRecord)
}
queryResult.Read = &readResult
}
return &queryResult, nil
}
func (c *ElasticSearchClient) Query(param v1alpha2.QueryParameters) (*v1alpha2.QueryResult, error) {
var queryResult = new(v1alpha2.QueryResult)
if param.NamespaceNotFound {
queryResult = new(v1alpha2.QueryResult)
switch param.Operation {
case v1alpha2.OperationStatistics:
queryResult.Statistics = new(v1alpha2.StatisticsResult)
case v1alpha2.OperationHistogram:
queryResult.Histogram = new(v1alpha2.HistogramResult)
default:
queryResult.Read = new(v1alpha2.ReadResult)
}
return queryResult, nil
}
query, err := createQueryRequest(param)
if err != nil {
klog.Error(err)
return nil, err
}
body, err := c.client.Search(query, param.ScrollTimeout)
if err != nil {
klog.Error(err)
return nil, err
}
return c.parseQueryResult(param.Operation, body)
}
func (c *ElasticSearchClient) Scroll(scrollId string) (*v1alpha2.QueryResult, error) {
body, err := c.client.Scroll(scrollId, time.Minute)
if err != nil {
klog.Error(err)
return nil, err
}
return c.parseQueryResult(v1alpha2.OperationExport, body)
}
func (c *ElasticSearchClient) ClearScroll(scrollId string) {
c.client.ClearScroll(scrollId)
}
package esclient
import "time"
type Client interface {
// Perform Search API
Search(body []byte, scrollTimeout time.Duration) ([]byte, error)
Scroll(scrollId string, scrollTimeout time.Duration) ([]byte, error)
ClearScroll(scrollId string)
GetTotalHitCount(v interface{}) int64
}
......@@ -5,10 +5,10 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/cache"
"kubesphere.io/kubesphere/pkg/simple/client/devops"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
esclient "kubesphere.io/kubesphere/pkg/simple/client/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/kubesphere"
"kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
"kubesphere.io/kubesphere/pkg/simple/client/prometheus"
......@@ -30,7 +30,7 @@ type ClientSetOptions struct {
openPitrixOptions *openpitrix.Options
prometheusOptions *prometheus.Options
kubesphereOptions *kubesphere.Options
elasticSearhOptions *esclient.Options
elasticsearhOptions *elasticsearch.Options
}
func NewClientSetOptions() *ClientSetOptions {
......@@ -45,7 +45,7 @@ func NewClientSetOptions() *ClientSetOptions {
openPitrixOptions: openpitrix.NewOptions(),
prometheusOptions: prometheus.NewPrometheusOptions(),
kubesphereOptions: kubesphere.NewKubeSphereOptions(),
elasticSearhOptions: esclient.NewElasticSearchOptions(),
elasticsearhOptions: elasticsearch.NewElasticSearchOptions(),
}
}
......@@ -99,8 +99,8 @@ func (c *ClientSetOptions) SetKubeSphereOptions(options *kubesphere.Options) *Cl
return c
}
func (c *ClientSetOptions) SetElasticSearchOptions(options *esclient.Options) *ClientSetOptions {
c.elasticSearhOptions = options
func (c *ClientSetOptions) SetElasticSearchOptions(options *elasticsearch.Options) *ClientSetOptions {
c.elasticsearhOptions = options
return c
}
......@@ -122,7 +122,7 @@ type ClientSet struct {
prometheusClient *prometheus.Client
openpitrixClient openpitrix.Client
kubesphereClient *kubesphere.Client
elasticSearchClient *esclient.ElasticSearchClient
elasticSearchClient *elasticsearch.Elasticsearch
}
var mutex sync.Mutex
......@@ -347,10 +347,10 @@ func (cs *ClientSet) KubeSphere() *kubesphere.Client {
return cs.kubesphereClient
}
func (cs *ClientSet) ElasticSearch() (*esclient.ElasticSearchClient, error) {
func (cs *ClientSet) ElasticSearch() (*elasticsearch.Elasticsearch, error) {
var err error
if cs.csoptions.elasticSearhOptions == nil || cs.csoptions.elasticSearhOptions.Host == "" {
if cs.csoptions.elasticsearhOptions == nil || cs.csoptions.elasticsearhOptions.Host == "" {
return nil, ErrClientSetNotEnabled
}
......@@ -361,7 +361,7 @@ func (cs *ClientSet) ElasticSearch() (*esclient.ElasticSearchClient, error) {
defer mutex.Unlock()
if cs.elasticSearchClient == nil {
cs.elasticSearchClient, err = esclient.NewLoggingClient(cs.csoptions.elasticSearhOptions)
cs.elasticSearchClient, err = elasticsearch.NewElasticsearch(cs.csoptions.elasticsearhOptions)
if err != nil {
return nil, err
}
......
package elasticsearch
import (
"fmt"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
"time"
)
const (
podNameMaxLength = 63
podNameSuffixLength = 6 // 5 characters + 1 hyphen
replicaSetSuffixMaxLength = 11 // max 10 characters + 1 hyphen
)
type bodyBuilder struct {
Body
}
func newBodyBuilder() *bodyBuilder {
return &bodyBuilder{}
}
func (bb *bodyBuilder) bytes() ([]byte, error) {
return json.Marshal(bb.Body)
}
// The mainBody func builds api body for query.
// TODO: Should use an elegant pakcage for building query body, but `elastic/go-elasticsearch` doesn't provide it currently.
//
// Example:
// GET kapis/logging.kubesphere.io/v1alpha2/cluster?start_time=0&end_time=156576063993&namespaces=kubesphere-system&pod_query=ks-apiserver
// -----
//{
// "from":0,
// "size":10,
// "sort":[
// {
// "time": "desc"
// }
// ],
// "query":{
// "bool":{
// "filter":[
// {
// "bool":{
// "should":[
// {
// "bool":{
// "filter":[
// {
// "match_phrase":{
// "kubernetes.namespace_name.keyword":"kubesphere-system"
// }
// },
// {
// "range":{
// "time":{
// "gte":"1572315987000"
// }
// }
// }
// ]
// }
// }
// ],
// "minimum_should_match":1
// }
// },
// {
// "bool":{
// "should":[
// {
// "match_phrase_prefix":{
// "kubernetes.pod_name":"ks-apiserver"
// }
// }
// ],
// "minimum_should_match":1
// }
// },
// {
// "range":{
// "time":{
// "gte":"0",
// "lte":"156576063993"
// }
// }
// }
// ]
// }
// }
//}
func (bb *bodyBuilder) mainBool(sf logging.SearchFilter) *bodyBuilder {
var ms []Match
// literal matching
if len(sf.NamespaceFilter) != 0 {
var b Bool
for ns := range sf.NamespaceFilter {
match := Match{
Bool: &Bool{
Filter: []Match{
{
MatchPhrase: map[string]string{
"kubernetes.namespace_name.keyword": ns,
},
},
{
Range: &Range{
Time: &Time{
Gte: func() *time.Time { t := sf.NamespaceFilter[ns]; return &t }(),
},
},
},
},
},
}
b.Should = append(b.Should, match)
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.WorkloadFilter != nil {
var b Bool
for _, wk := range sf.WorkloadFilter {
b.Should = append(b.Should, Match{Regexp: map[string]string{"kubernetes.pod_name.keyword": podNameRegexp(wk)}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.PodFilter != nil {
var b Bool
for _, po := range sf.PodFilter {
b.Should = append(b.Should, Match{MatchPhrase: map[string]string{"kubernetes.pod_name.keyword": po}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.ContainerFilter != nil {
var b Bool
for _, c := range sf.ContainerFilter {
b.Should = append(b.Should, Match{MatchPhrase: map[string]string{"kubernetes.container_name.keyword": c}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
// fuzzy matching
if sf.WorkloadSearch != nil {
var b Bool
for _, wk := range sf.WorkloadSearch {
b.Should = append(b.Should, Match{MatchPhrasePrefix: map[string]string{"kubernetes.pod_name": wk}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.PodSearch != nil {
var b Bool
for _, po := range sf.PodSearch {
b.Should = append(b.Should, Match{MatchPhrasePrefix: map[string]string{"kubernetes.pod_name": po}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.ContainerSearch != nil {
var b Bool
for _, c := range sf.ContainerSearch {
b.Should = append(b.Should, Match{MatchPhrasePrefix: map[string]string{"kubernetes.container_name": c}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if sf.LogSearch != nil {
var b Bool
for _, l := range sf.LogSearch {
b.Should = append(b.Should, Match{MatchPhrasePrefix: map[string]string{"log": l}})
}
b.MinimumShouldMatch = 1
ms = append(ms, Match{Bool: &b})
}
if !sf.Starttime.IsZero() || !sf.Endtime.IsZero() {
fromTo := Match{
Range: &Range{&Time{
Gte: &sf.Starttime,
Lte: &sf.Endtime,
}},
}
ms = append(ms, fromTo)
}
bb.Body.Query = &Query{Bool{Filter: ms}}
return bb
}
func (bb *bodyBuilder) cardinalityAggregation() *bodyBuilder {
bb.Body.Aggs = &Aggs{
CardinalityAggregation: &CardinalityAggregation{
&Cardinality{
Field: "kubernetes.docker_id.keyword",
},
},
}
return bb
}
func (bb *bodyBuilder) dateHistogramAggregation(interval string) *bodyBuilder {
if interval == "" {
interval = "15m"
}
bb.Body.Aggs = &Aggs{
DateHistogramAggregation: &DateHistogramAggregation{
&DateHistogram{
Field: "time",
Interval: interval,
},
},
}
return bb
}
func (bb *bodyBuilder) from(n int64) *bodyBuilder {
bb.From = n
return bb
}
func (bb *bodyBuilder) size(n int64) *bodyBuilder {
bb.Size = n
return bb
}
func (bb *bodyBuilder) sort(o string) *bodyBuilder {
if o != "asc" {
o = "desc"
}
bb.Sorts = []map[string]string{{"time": o}}
return bb
}
func podNameRegexp(workloadName string) string {
var regexp string
if len(workloadName) <= podNameMaxLength-replicaSetSuffixMaxLength-podNameSuffixLength {
// match deployment pods, eg. <deploy>-579dfbcddd-24znw
// replicaset rand string is limited to vowels
// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83
regexp += workloadName + "-[bcdfghjklmnpqrstvwxz2456789]{1,10}-[a-z0-9]{5}|"
// match statefulset pods, eg. <sts>-0
regexp += workloadName + "-[0-9]+|"
// match pods of daemonset or job, eg. <ds>-29tdk, <job>-5xqvl
regexp += workloadName + "-[a-z0-9]{5}"
} else if len(workloadName) <= podNameMaxLength-podNameSuffixLength {
replicaSetSuffixLength := podNameMaxLength - podNameSuffixLength - len(workloadName)
regexp += fmt.Sprintf("%s%d%s", workloadName+"-[bcdfghjklmnpqrstvwxz2456789]{", replicaSetSuffixLength, "}[a-z0-9]{5}|")
regexp += workloadName + "-[0-9]+|"
regexp += workloadName + "-[a-z0-9]{5}"
} else {
// Rand suffix may overwrites the workload name if the name is too long
// This won't happen for StatefulSet because long name will cause ReplicaSet fails during StatefulSet creation.
regexp += workloadName[:podNameMaxLength-podNameSuffixLength+1] + "[a-z0-9]{5}|"
regexp += workloadName + "-[0-9]+"
}
return regexp
}
func parseResponse(body []byte) (Response, error) {
var res Response
err := json.Unmarshal(body, &res)
if err != nil {
klog.Error(err)
return Response{}, err
}
return res, nil
}
package elasticsearch
import (
"github.com/google/go-cmp/cmp"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
"testing"
"time"
)
func TestMainBool(t *testing.T) {
var tests = []struct {
description string
searchFilter logging.SearchFilter
expected *bodyBuilder
}{
{
description: "filter 2 namespaces",
searchFilter: logging.SearchFilter{
NamespaceFilter: map[string]time.Time{
"kubesphere-system": time.Unix(1582000000, 0),
"kubesphere-logging-system": time.Unix(1582969999, 0),
},
},
expected: &bodyBuilder{Body{
Query: &Query{
Bool: Bool{
Filter: []Match{
{
Bool: &Bool{
Should: []Match{
{
Bool: &Bool{
Filter: []Match{
{
MatchPhrase: map[string]string{"kubernetes.namespace_name.keyword": "kubesphere-system"},
},
{
Range: &Range{&Time{Gte: func() *time.Time { t := time.Unix(1582000000, 0); return &t }()}},
},
},
},
},
{
Bool: &Bool{
Filter: []Match{
{
MatchPhrase: map[string]string{"kubernetes.namespace_name.keyword": "kubesphere-logging-system"},
},
{
Range: &Range{&Time{Gte: func() *time.Time { t := time.Unix(1582969999, 0); return &t }()}},
},
},
},
},
},
MinimumShouldMatch: 1,
},
},
},
},
},
}},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
body, err := newBodyBuilder().mainBool(test.searchFilter).bytes()
expected, _ := test.expected.bytes()
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(body, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestCardinalityAggregation(t *testing.T) {
var test = struct {
description string
searchFilter logging.SearchFilter
expected *bodyBuilder
}{
description: "add cardinality aggregation",
searchFilter: logging.SearchFilter{
LogSearch: []string{"info"},
},
expected: &bodyBuilder{Body{
Query: &Query{
Bool: Bool{
Filter: []Match{
{
Bool: &Bool{
Should: []Match{
{
MatchPhrasePrefix: map[string]string{"log": "info"},
},
},
MinimumShouldMatch: 1,
},
},
},
},
},
Aggs: &Aggs{
CardinalityAggregation: &CardinalityAggregation{
Cardinality: &Cardinality{Field: "kubernetes.docker_id.keyword"},
},
},
}},
}
t.Run(test.description, func(t *testing.T) {
body := newBodyBuilder().mainBool(test.searchFilter).cardinalityAggregation()
if diff := cmp.Diff(body, test.expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
}
})
}
package elasticsearch
import "time"
// --------------------------------------------- Request Body ---------------------------------------------
// More info: https://www.elastic.co/guide/en/elasticsearch/reference/current/getting-started-search-API.html
type Body struct {
From int64 `json:"from,omitempty"`
Size int64 `json:"size,omitempty"`
Sorts []map[string]string `json:"sort,omitempty"`
*Query `json:"query,omitempty"`
*Aggs `json:"aggs,omitempty"`
}
type Query struct {
Bool `json:"bool,omitempty"`
}
// Example:
// {bool: {filter: <[]Match>}}
// {bool: {should: <[]Match>, minimum_should_match: 1}}
type Bool struct {
Filter []Match `json:"filter,omitempty"`
Should []Match `json:"should,omitempty"`
MinimumShouldMatch int32 `json:"minimum_should_match,omitempty"`
}
// Example: []Match
// [
// {
// bool: <Bool>
// },
// {
// match_phrase: {
// <string>: <string>
// }
// },
// ...
// ]
type Match struct {
*Bool `json:"bool,omitempty"`
MatchPhrase map[string]string `json:"match_phrase,omitempty"`
MatchPhrasePrefix map[string]string `json:"match_phrase_prefix,omitempty"`
Regexp map[string]string `json:"regexp,omitempty"`
*Range `json:"range,omitempty"`
}
type Range struct {
*Time `json:"time,omitempty"`
}
type Time struct {
Gte *time.Time `json:"gte,omitempty"`
Lte *time.Time `json:"lte,omitempty"`
}
type Aggs struct {
*CardinalityAggregation `json:"container_count,omitempty"`
*DateHistogramAggregation `json:"log_count_over_time,omitempty"`
}
type CardinalityAggregation struct {
*Cardinality `json:"cardinality,omitempty"`
}
type Cardinality struct {
Field string `json:"field,omitempty"`
}
type DateHistogramAggregation struct {
*DateHistogram `json:"date_histogram,omitempty"`
}
type DateHistogram struct {
Field string `json:"field,omitempty"`
Interval string `json:"interval,omitempty"`
}
// --------------------------------------------- Response Body ---------------------------------------------
type Response struct {
ScrollId string `json:"_scroll_id,omitempty"`
Hits `json:"hits,omitempty"`
Aggregations `json:"aggregations,omitempty"`
}
type Hits struct {
Total interface{} `json:"total"` // `As of Elasticsearch v7.x, hits.total is changed incompatibly
AllHits []Hit `json:"hits"`
}
type Hit struct {
Source `json:"_source"`
Sort []int64 `json:"sort"`
}
type Source struct {
Log string `json:"log"`
Time string `json:"time"`
Kubernetes `json:"kubernetes"`
}
type Kubernetes struct {
Namespace string `json:"namespace_name"`
Pod string `json:"pod_name"`
Container string `json:"container_name"`
Host string `json:"host"`
}
type Aggregations struct {
ContainerCount `json:"container_count"`
LogCountOverTime `json:"log_count_over_time"`
}
type ContainerCount struct {
Value int64 `json:"value"`
}
type LogCountOverTime struct {
Buckets []Bucket `json:"buckets"`
}
type Bucket struct {
Time int64 `json:"key"`
Count int64 `json:"doc_count"`
}
package elasticsearch
import (
"bytes"
"context"
"fmt"
jsoniter "github.com/json-iterator/go"
"io"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
v5 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v5"
v6 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v6"
v7 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v7"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"strings"
)
const (
ElasticV5 = "5"
ElasticV6 = "6"
ElasticV7 = "7"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Elasticsearch implement logging interface
type Elasticsearch struct {
c client
}
// versioned es client interface
type client interface {
// Perform Search API
Search(body []byte) ([]byte, error)
Scroll(id string) ([]byte, error)
ClearScroll(id string)
GetTotalHitCount(v interface{}) int64
}
func NewElasticsearch(options *Options) (*Elasticsearch, error) {
var version, index string
es := &Elasticsearch{}
if options.Version == "" {
var err error
version, err = detectVersionMajor(options.Host)
if err != nil {
return nil, err
}
} else {
version = options.Version
}
if options.IndexPrefix != "" {
index = options.IndexPrefix
} else {
index = "logstash"
}
switch version {
case ElasticV5:
es.c = v5.New(options.Host, index)
case ElasticV6:
es.c = v6.New(options.Host, index)
case ElasticV7:
es.c = v7.New(options.Host, index)
default:
return nil, fmt.Errorf("unsupported elasticsearch version %s", version)
}
return es, nil
}
func (es *Elasticsearch) ES() *client {
return &es.c
}
func detectVersionMajor(host string) (string, error) {
// Info APIs are backward compatible with versions of v5.x, v6.x and v7.x
es := v6.New(host, "")
res, err := es.Client.Info(
es.Client.Info.WithContext(context.Background()),
)
if err != nil {
return "", err
}
defer res.Body.Close()
var b map[string]interface{}
if err = json.NewDecoder(res.Body).Decode(&b); err != nil {
return "", err
}
if res.IsError() {
// Print the response status and error information.
e, _ := b["error"].(map[string]interface{})
return "", fmt.Errorf("[%s] type: %v, reason: %v", res.Status(), e["type"], e["reason"])
}
// get the major version
version, _ := b["version"].(map[string]interface{})
number, _ := version["number"].(string)
if number == "" {
return "", fmt.Errorf("failed to detect elastic version number")
}
v := strings.Split(number, ".")[0]
return v, nil
}
func (es Elasticsearch) GetCurrentStats(sf logging.SearchFilter) (logging.Statistics, error) {
body, err := newBodyBuilder().
mainBool(sf).
cardinalityAggregation().
bytes()
if err != nil {
return logging.Statistics{}, err
}
b, err := es.c.Search(body)
if err != nil {
return logging.Statistics{}, err
}
res, err := parseResponse(b)
if err != nil {
return logging.Statistics{}, err
}
return logging.Statistics{
Containers: res.Value,
Logs: es.c.GetTotalHitCount(res.Total),
},
nil
}
func (es Elasticsearch) CountLogsByInterval(sf logging.SearchFilter, interval string) (logging.Histogram, error) {
body, err := newBodyBuilder().
mainBool(sf).
dateHistogramAggregation(interval).
bytes()
if err != nil {
return logging.Histogram{}, err
}
b, err := es.c.Search(body)
if err != nil {
return logging.Histogram{}, err
}
res, err := parseResponse(b)
if err != nil {
return logging.Histogram{}, err
}
var h logging.Histogram
h.Total = es.c.GetTotalHitCount(res.Total)
for _, b := range res.Buckets {
h.Buckets = append(h.Buckets, logging.Bucket{
Time: b.Time,
Count: b.Count,
})
}
return h, nil
}
func (es Elasticsearch) SearchLogs(sf logging.SearchFilter, f, s int64, o string) (logging.Logs, error) {
body, err := newBodyBuilder().
mainBool(sf).
from(f).
size(s).
sort(o).
bytes()
if err != nil {
return logging.Logs{}, err
}
b, err := es.c.Search(body)
if err != nil {
return logging.Logs{}, err
}
res, err := parseResponse(b)
if err != nil {
return logging.Logs{}, err
}
var l logging.Logs
l.Total = es.c.GetTotalHitCount(res.Total)
for _, hit := range res.AllHits {
l.Records = append(l.Records, logging.Record{
Log: hit.Log,
Time: hit.Time,
Namespace: hit.Namespace,
Pod: hit.Pod,
Container: hit.Container,
})
}
return l, nil
}
func (es Elasticsearch) ExportLogs(sf logging.SearchFilter, w io.Writer) error {
var id string
var from int64 = 0
var size int64 = 1000
res, err := es.SearchLogs(sf, from, size, "desc")
defer es.ClearScroll(id)
if err != nil {
return err
}
if res.Records == nil || len(res.Records) == 0 {
return nil
}
// limit to retrieve max 100k records
for i := 0; i < 100; i++ {
res, id, err = es.scroll(id)
if err != nil {
return err
}
if res.Records == nil || len(res.Records) == 0 {
return nil
}
output := new(bytes.Buffer)
for _, r := range res.Records {
output.WriteString(fmt.Sprintf(`%s`, stringutils.StripAnsi(r.Log)))
}
_, err = io.Copy(w, output)
if err != nil {
return err
}
}
return nil
}
func (es *Elasticsearch) scroll(id string) (logging.Logs, string, error) {
b, err := es.c.Scroll(id)
if err != nil {
return logging.Logs{}, id, err
}
res, err := parseResponse(b)
if err != nil {
return logging.Logs{}, id, err
}
var l logging.Logs
for _, hit := range res.AllHits {
l.Records = append(l.Records, logging.Record{
Log: hit.Log,
})
}
return l, res.ScrollId, nil
}
func (es *Elasticsearch) ClearScroll(id string) {
if id != "" {
es.c.ClearScroll(id)
}
}
package elasticsearch
import (
"github.com/google/go-cmp/cmp"
"kubesphere.io/kubesphere/pkg/simple/client/logging"
v5 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v5"
v6 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v6"
v7 "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch/versions/v7"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func MockElasticsearchService(pattern string, fakeResp string) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
res.Write([]byte(fakeResp))
})
return httptest.NewServer(mux)
}
func TestDetectVersionMajor(t *testing.T) {
var tests = []struct {
description string
fakeResp string
expected string
expectedError bool
}{
{
description: "detect es 6.x version number",
fakeResp: `{
"name" : "elasticsearch-logging-data-0",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "uLm0838MSd60T1XEh5P2Qg",
"version" : {
"number" : "6.7.0",
"build_flavor" : "oss",
"build_type" : "docker",
"build_hash" : "8453f77",
"build_date" : "2019-03-21T15:32:29.844721Z",
"build_snapshot" : false,
"lucene_version" : "7.7.0",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}`,
expected: ElasticV6,
expectedError: false,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
es := MockElasticsearchService("/", test.fakeResp)
defer es.Close()
v, err := detectVersionMajor(es.URL)
if err == nil && test.expectedError {
t.Fatalf("expected error while got nothing")
} else if err != nil && !test.expectedError {
t.Fatal(err)
}
if v != test.expected {
t.Fatalf("expected get version %s, but got %s", test.expected, v)
}
})
}
}
func TestGetCurrentStats(t *testing.T) {
var tests = []struct {
description string
searchFilter logging.SearchFilter
fakeVersion string
fakeResp string
expected logging.Statistics
expectedError bool
}{
{
description: "[es 6.x] run as admin",
searchFilter: logging.SearchFilter{},
fakeVersion: ElasticV6,
fakeResp: `{
"took": 171,
"timed_out": false,
"_shards": {
"total": 10,
"successful": 10,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 241222,
"max_score": 1.0,
"hits": [
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "Hn1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:29.015Z",
"log": " value: \"hostpath\"\n",
"time": "2020-02-28T19:25:29.015492329Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "I31GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:33.103Z",
"log": "I0228 19:25:33.102631 1 controller.go:1040] provision \"kubesphere-system/redis-pvc\" class \"local\": trying to save persistentvolume \"pvc-be6d127d-9366-4ea8-b1ce-f30c1b3a447b\"\n",
"time": "2020-02-28T19:25:33.103075891Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "JX1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:33.113Z",
"log": "I0228 19:25:33.112200 1 controller.go:1088] provision \"kubesphere-system/redis-pvc\" class \"local\": succeeded\n",
"time": "2020-02-28T19:25:33.113110332Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "Kn1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:34.168Z",
"log": " value: \"hostpath\"\n",
"time": "2020-02-28T19:25:34.168983384Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "LH1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:34.168Z",
"log": " value: \"/var/openebs/local/\"\n",
"time": "2020-02-28T19:25:34.168997393Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "NX1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:25:42.868Z",
"log": "I0228 19:25:42.868413 1 config.go:83] SC local has config:- name: StorageType\n",
"time": "2020-02-28T19:25:42.868578188Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "Q31GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:26:13.881Z",
"log": "- name: BasePath\n",
"time": "2020-02-28T19:26:13.881180681Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "S31GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:26:14.597Z",
"log": " value: \"/var/openebs/local/\"\n",
"time": "2020-02-28T19:26:14.597702238Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "TH1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:26:14.597Z",
"log": "I0228 19:26:14.597007 1 provisioner_hostpath.go:42] Creating volume pvc-c3b1e67f-00d2-407d-8c45-690bb273c16a at ks-allinone:/var/openebs/local/pvc-c3b1e67f-00d2-407d-8c45-690bb273c16a\n",
"time": "2020-02-28T19:26:14.597708432Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
},
{
"_index": "ks-logstash-log-2020.02.28",
"_type": "flb_type",
"_id": "UX1GjXABMO5aQxyNsyxy",
"_score": 1.0,
"_source": {
"@timestamp": "2020-02-28T19:26:15.920Z",
"log": "I0228 19:26:15.915071 1 event.go:221] Event(v1.ObjectReference{Kind:\"PersistentVolumeClaim\", Namespace:\"kubesphere-system\", Name:\"mysql-pvc\", UID:\"1e87deb5-eaec-475f-8eb6-8613b3be80a4\", APIVersion:\"v1\", ResourceVersion:\"2397\", FieldPath:\"\"}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-1e87deb5-eaec-475f-8eb6-8613b3be80a4\n",
"time": "2020-02-28T19:26:15.920650572Z",
"kubernetes": {
"pod_name": "openebs-localpv-provisioner-55c66b57b4-jgtjc",
"namespace_name": "kube-system",
"host": "ks-allinone",
"container_name": "openebs-localpv-provisioner",
"docker_id": "cac01cd01cc79d8a8903ddbe6fbde9ac7497919a3f33c61861443703a9e08b39",
"container_hash": "25d789bcd3d12a4ba50bbb56eed1de33279d04352adbba8fd7e3b7b938aec806"
}
}
}
]
},
"aggregations": {
"container_count": {
"value": 93
}
}
}`,
expected: logging.Statistics{
Containers: 93,
Logs: 241222,
},
expectedError: false,
},
{
description: "[es 6.x] index not found",
searchFilter: logging.SearchFilter{
NamespaceFilter: map[string]time.Time{
"workspace-1-project-a": time.Unix(1582000000, 0),
"workspace-1-project-b": time.Unix(1582333333, 0),
},
},
fakeVersion: ElasticV6,
fakeResp: `{
"error": {
"root_cause": [
{
"type": "index_not_found_exception",
"reason": "no such index",
"resource.type": "index_or_alias",
"resource.id": "ks-lsdfsdfsdfs",
"index_uuid": "_na_",
"index": "ks-lsdfsdfsdfs"
}
],
"type": "index_not_found_exception",
"reason": "no such index",
"resource.type": "index_or_alias",
"resource.id": "ks-lsdfsdfsdfs",
"index_uuid": "_na_",
"index": "ks-lsdfsdfsdfs"
},
"status": 404
}`,
expected: logging.Statistics{
Containers: 0,
Logs: 0,
},
expectedError: true,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
es := MockElasticsearchService("/", test.fakeResp)
defer es.Close()
clientv5 := Elasticsearch{c: v5.New(es.URL, "ks-logstash-log")}
clientv6 := Elasticsearch{c: v6.New(es.URL, "ks-logstash-log")}
clientv7 := Elasticsearch{c: v7.New(es.URL, "ks-logstash-log")}
var stats logging.Statistics
var err error
switch test.fakeVersion {
case ElasticV5:
stats, err = clientv5.GetCurrentStats(test.searchFilter)
case ElasticV6:
stats, err = clientv6.GetCurrentStats(test.searchFilter)
case ElasticV7:
stats, err = clientv7.GetCurrentStats(test.searchFilter)
}
if err != nil && !test.expectedError {
t.Fatal(err)
} else if diff := cmp.Diff(stats, test.expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
}
})
}
}
package esclient
package elasticsearch
import (
"github.com/spf13/pflag"
......@@ -33,7 +33,7 @@ func (s *Options) Validate() []error {
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
fs.StringVar(&s.Host, "elasticsearch-host", c.Host, ""+
"ElasticSearch logging service host. KubeSphere is using elastic as log store, "+
"Elasticsearch logging service host. KubeSphere is using elastic as log store, "+
"if this filed left blank, KubeSphere will use kubernetes builtin log API instead, and"+
" the following elastic search options will be ignored.")
......@@ -41,6 +41,6 @@ func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
"Index name prefix. KubeSphere will retrieve logs against indices matching the prefix.")
fs.StringVar(&s.Version, "elasticsearch-version", c.Version, ""+
"ElasticSearch major version, e.g. 5/6/7, if left blank, will detect automatically."+
"Elasticsearch major version, e.g. 5/6/7, if left blank, will detect automatically."+
"Currently, minimum supported version is 5.x")
}
......@@ -29,12 +29,12 @@ func New(address string, index string) *Elastic {
return &Elastic{client: client, index: index}
}
func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Search(body []byte) ([]byte, error) {
response, err := e.client.Search(
e.client.Search.WithContext(context.Background()),
e.client.Search.WithIndex(fmt.Sprintf("%s*", e.index)),
e.client.Search.WithBody(bytes.NewBuffer(body)),
e.client.Search.WithScroll(scrollTimeout))
e.client.Search.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......@@ -47,11 +47,11 @@ func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, erro
return ioutil.ReadAll(response.Body)
}
func (e *Elastic) Scroll(scrollId string, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Scroll(id string) ([]byte, error) {
response, err := e.client.Scroll(
e.client.Scroll.WithContext(context.Background()),
e.client.Scroll.WithScrollID(scrollId),
e.client.Scroll.WithScroll(scrollTimeout))
e.client.Scroll.WithScrollID(id),
e.client.Scroll.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......
......@@ -29,12 +29,12 @@ func New(address string, index string) *Elastic {
return &Elastic{Client: client, index: index}
}
func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Search(body []byte) ([]byte, error) {
response, err := e.Client.Search(
e.Client.Search.WithContext(context.Background()),
e.Client.Search.WithIndex(fmt.Sprintf("%s*", e.index)),
e.Client.Search.WithBody(bytes.NewBuffer(body)),
e.Client.Search.WithScroll(scrollTimeout))
e.Client.Search.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......@@ -47,11 +47,11 @@ func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, erro
return ioutil.ReadAll(response.Body)
}
func (e *Elastic) Scroll(scrollId string, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Scroll(id string) ([]byte, error) {
response, err := e.Client.Scroll(
e.Client.Scroll.WithContext(context.Background()),
e.Client.Scroll.WithScrollID(scrollId),
e.Client.Scroll.WithScroll(scrollTimeout))
e.Client.Scroll.WithScrollID(id),
e.Client.Scroll.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......
......@@ -29,13 +29,13 @@ func New(address string, index string) *Elastic {
return &Elastic{client: client, index: index}
}
func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Search(body []byte) ([]byte, error) {
response, err := e.client.Search(
e.client.Search.WithContext(context.Background()),
e.client.Search.WithIndex(fmt.Sprintf("%s*", e.index)),
e.client.Search.WithTrackTotalHits(true),
e.client.Search.WithBody(bytes.NewBuffer(body)),
e.client.Search.WithScroll(scrollTimeout))
e.client.Search.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......@@ -48,11 +48,11 @@ func (e *Elastic) Search(body []byte, scrollTimeout time.Duration) ([]byte, erro
return ioutil.ReadAll(response.Body)
}
func (e *Elastic) Scroll(scrollId string, scrollTimeout time.Duration) ([]byte, error) {
func (e *Elastic) Scroll(id string) ([]byte, error) {
response, err := e.client.Scroll(
e.client.Scroll.WithContext(context.Background()),
e.client.Scroll.WithScrollID(scrollId),
e.client.Scroll.WithScroll(scrollTimeout))
e.client.Scroll.WithScrollID(id),
e.client.Scroll.WithScroll(time.Minute))
if err != nil {
return nil, err
}
......
package logging
import (
"io"
"time"
)
type Interface interface {
// Current stats about log store, eg. total number of logs and containers
GetCurrentStats(sf SearchFilter) (Statistics, error)
CountLogsByInterval(sf SearchFilter, interval string) (Histogram, error)
SearchLogs(sf SearchFilter, from, size int64, order string) (Logs, error)
ExportLogs(sf SearchFilter, w io.Writer) error
}
// Log search result
type Logs struct {
Total int64 `json:"total" description:"total number of matched results"`
Records []Record `json:"records,omitempty" description:"actual array of results"`
}
type Record struct {
Log string `json:"log,omitempty" description:"log message"`
Time string `json:"time,omitempty" description:"log timestamp"`
Namespace string `json:"namespace,omitempty" description:"namespace"`
Pod string `json:"pod,omitempty" description:"pod name"`
Container string `json:"container,omitempty" description:"container name"`
}
// Log statistics result
type Statistics struct {
Containers int64 `json:"containers" description:"total number of containers"`
Logs int64 `json:"logs" description:"total number of logs"`
}
// Log count result by interval
type Histogram struct {
Total int64 `json:"total" description:"total number of logs"`
Buckets []Bucket `json:"histograms" description:"actual array of histogram results"`
}
type Bucket struct {
Time int64 `json:"time" description:"timestamp"`
Count int64 `json:"count" description:"total number of logs at intervals"`
}
// General query conditions
type SearchFilter struct {
// xxxSearch for literal matching
// xxxfilter for fuzzy matching
// To prevent disclosing archived logs of a reopened namespace,
// NamespaceFilter records the namespace creation time.
// Any query to this namespace must begin after its creation.
NamespaceFilter map[string]time.Time
WorkloadSearch []string
WorkloadFilter []string
PodSearch []string
PodFilter []string
ContainerSearch []string
ContainerFilter []string
LogSearch []string
Starttime time.Time
Endtime time.Time
}
package logging
type Interface interface {
}
package logging
type Query struct {
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部