diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index e40e7a02da45b0642c851e11fe90c283f3285502..e0fe5df838322366a356298accc67b12550cb5b4 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -159,7 +159,8 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) kubernetesClient.KubeSphere(), kubernetesClient.Istio(), kubernetesClient.Snapshot(), - kubernetesClient.ApiExtensions()) + kubernetesClient.ApiExtensions(), + kubernetesClient.Prometheus()) mgrOptions := manager.Options{ CertDir: s.WebhookCertDir, diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go index 2745b6c25f81a85283668fd2ebcd64319d50ce0f..f60b2a88b051408eb10953f67d00abe3a2170b2f 100644 --- a/cmd/ks-apiserver/app/options/options.go +++ b/cmd/ks-apiserver/app/options/options.go @@ -28,6 +28,7 @@ import ( "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" "kubesphere.io/kubesphere/pkg/informers" genericoptions "kubesphere.io/kubesphere/pkg/server/options" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" auditingclient "kubesphere.io/kubesphere/pkg/simple/client/auditing/elasticsearch" "kubesphere.io/kubesphere/pkg/simple/client/cache" runtimecache "sigs.k8s.io/controller-runtime/pkg/cache" @@ -82,6 +83,7 @@ func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) { s.MultiClusterOptions.AddFlags(fss.FlagSet("multicluster"), s.MultiClusterOptions) s.EventsOptions.AddFlags(fss.FlagSet("events"), s.EventsOptions) s.AuditingOptions.AddFlags(fss.FlagSet("auditing"), s.AuditingOptions) + s.AlertingOptions.AddFlags(fss.FlagSet("alerting"), s.AlertingOptions) fs = fss.FlagSet("klog") local := flag.NewFlagSet("klog", flag.ExitOnError) @@ -109,7 +111,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS apiServer.KubernetesClient = kubernetesClient informerFactory := informers.NewInformerFactories(kubernetesClient.Kubernetes(), kubernetesClient.KubeSphere(), - kubernetesClient.Istio(), kubernetesClient.Snapshot(), kubernetesClient.ApiExtensions()) + kubernetesClient.Istio(), kubernetesClient.Snapshot(), kubernetesClient.ApiExtensions(), kubernetesClient.Prometheus()) apiServer.InformerFactory = informerFactory if s.MonitoringOptions == nil || len(s.MonitoringOptions.Endpoint) == 0 { @@ -199,6 +201,14 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS apiServer.OpenpitrixClient = opClient } + if s.AlertingOptions != nil && (s.AlertingOptions.PrometheusEndpoint != "" || s.AlertingOptions.ThanosRulerEndpoint != "") { + alertingClient, err := alerting.NewRuleClient(s.AlertingOptions) + if err != nil { + return nil, fmt.Errorf("failed to init alerting client: %v", err) + } + apiServer.AlertingClient = alertingClient + } + server := &http.Server{ Addr: fmt.Sprintf(":%d", s.GenericServerRunOptions.InsecurePort), } diff --git a/cmd/ks-apiserver/app/options/validation.go b/cmd/ks-apiserver/app/options/validation.go index 2150fbe07b78617eb3558ce4886474efad15ffeb..3d18551ad827b67a6ab4f4364bb1de8aabf89d5b 100644 --- a/cmd/ks-apiserver/app/options/validation.go +++ b/cmd/ks-apiserver/app/options/validation.go @@ -34,6 +34,7 @@ func (s *ServerRunOptions) Validate() []error { errors = append(errors, s.AuthorizationOptions.Validate()...) errors = append(errors, s.EventsOptions.Validate()...) errors = append(errors, s.AuditingOptions.Validate()...) + errors = append(errors, s.AlertingOptions.Validate()...) return errors } diff --git a/go.mod b/go.mod index a3a379d607090a551a891810165de9d8f096d01e..76458cd1cae17744b0a7c570fa07798acfb442e3 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,9 @@ go 1.13 require ( code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6 - github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/PuerkitoBio/goquery v1.5.0 github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 - github.com/aws/aws-sdk-go v1.30.12 + github.com/aws/aws-sdk-go v1.33.12 github.com/beevik/etree v1.1.0 github.com/container-storage-interface/spec v1.2.0 github.com/containernetworking/cni v0.8.0 @@ -26,6 +25,7 @@ require ( github.com/emicklei/go-restful-openapi v1.4.1 github.com/emirpasic/gods v1.12.0 // indirect github.com/fatih/structs v1.1.0 + github.com/ghodss/yaml v1.0.0 github.com/go-ldap/ldap v3.0.3+incompatible github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.1 // indirect @@ -39,15 +39,13 @@ require ( github.com/golang/example v0.0.0-20170904185048-46695d81d1fa github.com/golang/mock v1.4.3 github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.4.0 - github.com/google/go-querystring v1.0.0 // indirect + github.com/google/go-cmp v0.5.0 github.com/google/uuid v1.1.1 github.com/gorilla/websocket v1.4.1 github.com/json-iterator/go v1.1.10 github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubesphere/sonargo v0.0.2 - github.com/lib/pq v1.2.0 // indirect github.com/mitchellh/mapstructure v1.2.2 github.com/onsi/ginkgo v1.14.0 github.com/onsi/gomega v1.10.1 @@ -58,9 +56,11 @@ require ( github.com/projectcalico/kube-controllers v3.8.8+incompatible github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce github.com/prometheus-community/prom-label-proxy v0.2.0 + github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1 github.com/prometheus/client_golang v1.7.1 - github.com/prometheus/common v0.10.0 - github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1 + github.com/prometheus/common v0.11.1 + github.com/prometheus/prometheus v1.8.2-0.20200907175821-8219b442c864 github.com/sony/sonyflake v1.0.0 github.com/speps/go-hashids v2.0.0+incompatible github.com/spf13/cobra v1.0.0 @@ -68,15 +68,15 @@ require ( github.com/spf13/viper v1.4.0 github.com/stretchr/testify v1.6.1 github.com/xanzy/ssh-agent v0.2.1 // indirect - golang.org/x/crypto v0.0.0-20200422194213-44a606286825 - golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 + golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de + golang.org/x/net v0.0.0-20200707034311-ab3426394381 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - google.golang.org/grpc v1.29.0 + google.golang.org/grpc v1.30.0 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect gopkg.in/src-d/go-git.v4 v4.11.0 gopkg.in/yaml.v2 v2.3.0 - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gotest.tools v2.2.0+incompatible istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44 istio.io/client-go v0.0.0-20201113183938-0734e976e785 @@ -86,7 +86,7 @@ require ( k8s.io/apimachinery v0.19.0 k8s.io/apiserver v0.18.6 k8s.io/cli-runtime v0.18.6 - k8s.io/client-go v0.19.0 + k8s.io/client-go v12.0.0+incompatible k8s.io/code-generator v0.19.0 k8s.io/component-base v0.18.6 k8s.io/klog v1.0.0 @@ -494,42 +494,37 @@ replace ( github.com/rcrowley/go-metrics => github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/retailnext/hllpp => github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 github.com/robfig/cron => github.com/robfig/cron v1.2.0 - github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v1.2.0 + github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af github.com/rogpeppe/go-charset => github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4 github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.3.0 github.com/rs/cors => github.com/rs/cors v1.6.0 github.com/russross/blackfriday => github.com/russross/blackfriday v1.5.2 - github.com/russross/blackfriday/v2 => github.com/russross/blackfriday/v2 v2.0.1 github.com/ryanuber/columnize => github.com/ryanuber/columnize v2.1.0+incompatible github.com/samuel/go-zookeeper => github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da - github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b + github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 github.com/sean-/seed => github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 github.com/segmentio/kafka-go => github.com/segmentio/kafka-go v0.2.0 github.com/sergi/go-diff => github.com/sergi/go-diff v1.0.0 github.com/shirou/gopsutil => github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7 github.com/shirou/w32 => github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 github.com/shurcooL/httpfs => github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/shurcooL/sanitized_anchor_name => github.com/shurcooL/sanitized_anchor_name v1.0.0 github.com/shurcooL/vfsgen => github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2 github.com/smartystreets/assertions => github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d github.com/smartystreets/goconvey => github.com/smartystreets/goconvey v1.6.4 github.com/soheilhy/cmux => github.com/soheilhy/cmux v0.1.4 - github.com/sony/gobreaker => github.com/sony/gobreaker v0.4.1 - github.com/sony/sonyflake => github.com/sony/sonyflake v1.0.0 + github.com/sony/sonyflake => github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 github.com/spaolacci/murmur3 => github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 github.com/speps/go-hashids => github.com/speps/go-hashids v2.0.0+incompatible github.com/spf13/afero => github.com/spf13/afero v1.2.2 github.com/spf13/cast => github.com/spf13/cast v1.3.0 - github.com/spf13/cobra => github.com/spf13/cobra v1.0.0 + github.com/spf13/cobra => github.com/spf13/cobra v0.0.5 github.com/spf13/jwalterweatherman => github.com/spf13/jwalterweatherman v1.0.0 github.com/spf13/pflag => github.com/spf13/pflag v1.0.5 github.com/spf13/viper => github.com/spf13/viper v1.4.0 github.com/src-d/gcfg => github.com/src-d/gcfg v1.4.0 - github.com/streadway/amqp => github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 - github.com/streadway/handy => github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a github.com/stretchr/objx => github.com/stretchr/objx v0.2.0 - github.com/stretchr/testify => github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify => github.com/stretchr/testify v1.4.0 github.com/tidwall/pretty => github.com/tidwall/pretty v1.0.0 github.com/tinylib/msgp => github.com/tinylib/msgp v1.1.0 github.com/tmc/grpc-websocket-proxy => github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 @@ -538,8 +533,7 @@ replace ( github.com/uber/jaeger-lib => github.com/uber/jaeger-lib v2.2.0+incompatible github.com/ugorji/go => github.com/ugorji/go v1.1.4 github.com/ugorji/go/codec => github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0 - github.com/urfave/cli => github.com/urfave/cli v1.22.1 - github.com/vektah/gqlparser => github.com/vektah/gqlparser v1.1.2 + github.com/urfave/cli => github.com/urfave/cli v1.20.0 github.com/willf/bitset => github.com/willf/bitset v1.1.3 github.com/xanzy/ssh-agent => github.com/xanzy/ssh-agent v0.2.1 github.com/xdg/scram => github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c diff --git a/go.sum b/go.sum index 5dfde1ecd0941cc2f936d19f303f70db6ef36f5d..88c16308f5bc8b5e2465151c59fb0d27b2b90f57 100644 --- a/go.sum +++ b/go.sum @@ -7,22 +7,19 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6 h1:tW+ztA4A9UT9xnco5wUjW1oNi35k22eUEn9tNpPYVwE= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -35,6 +32,7 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U= +github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -65,6 +63,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o= github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= @@ -77,6 +76,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -84,11 +84,14 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.30.12 h1:KrjyosZvkpJjcwMk0RNxMZewQ47v7+ZkbQDXjWsJMs8= github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= @@ -97,10 +100,13 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -110,15 +116,20 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chai2010/jsonmap v1.0.0/go.mod h1:+30HSZixzW7NVqyqXTDyQi0hsexrb6hnpvhuW0bot6s= +github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= +github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= +github.com/chromedp/chromedp v0.5.3/go.mod h1:YLdPtndaHQ4rCpSpBG+IPpy9JvX0VD+7aaLxYgYj28w= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= @@ -126,9 +137,7 @@ github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo= github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -137,9 +146,22 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= +github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= +github.com/cortexproject/cortex v1.3.1-0.20200901115931-255ff3306960/go.mod h1:ub8BpRZrRa02BOM8NJTnI2YklxW/mGhEkJDrhsDfcfg= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= +github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= +github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= +github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= +github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -152,6 +174,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -174,6 +197,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-elasticsearch/v5 v5.6.1 h1:RnL2wcXepOT5SdoKMMO1j1OBX0vxHYbBtkQNL2E3xs4= github.com/elastic/go-elasticsearch/v5 v5.6.1/go.mod h1:r7uV7HidpfkYh7D8SB4lkS13TNlNy3oa5GNmTZvuVqY= @@ -181,10 +205,15 @@ github.com/elastic/go-elasticsearch/v6 v6.8.2 h1:rp5DGrd63V5c6nHLjF6QEXUpZSvs0+Q github.com/elastic/go-elasticsearch/v6 v6.8.2/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI= github.com/elastic/go-elasticsearch/v7 v7.3.0 h1:H29Nqf9cB9dVxX6LwS+zTDC2D4t9s+8dK8ln4HPS9rw= github.com/elastic/go-elasticsearch/v7 v7.3.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20200315184450-1f3cb6622dad h1:zPs0fNF2Io1Qytf92EI2CDJ9oCXZr+NmjEVexrUEdq4= github.com/elazarl/goproxy v0.0.0-20200315184450-1f3cb6622dad/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v2.14.3+incompatible h1:i59XyRHAxKCVBw3vHzQlpP/+pi89wH1v1HL+RKyVgxk= github.com/emicklei/go-restful v2.14.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful-openapi v1.4.1 h1:SocVTIQWnXyit4dotTrwmncBAjtRaBmfcHjo3XGcCm4= @@ -198,19 +227,25 @@ github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQo github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -251,9 +286,7 @@ github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4az github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/universal-translator v0.0.0-20170327191703-71201497bace h1:vfBaUX49VsqTxXGADDIWvTPvaU4AbQyX/yENHE0f7AY= github.com/go-playground/universal-translator v0.0.0-20170327191703-71201497bace/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= @@ -276,12 +309,22 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gocql/gocql v0.0.0-20200121121104-95d072f1b5bb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 h1:kumyNm8Vr8cbVm/aLQYTbDE3SKCbbn5HEVoDp/Dyyfc= github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang/example v0.0.0-20170904185048-46695d81d1fa h1:iqCQC2Z53KkwGgTN9szyL4q0OQHmuNjeoNnMT6lk66k= github.com/golang/example v0.0.0-20170904185048-46695d81d1fa/go.mod h1:tO/5UvQ/uKigUjQBPqzstj6uxd3fUIjddi19DxGJeWg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -290,20 +333,24 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= @@ -314,6 +361,7 @@ github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= @@ -333,6 +381,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.14.4 h1:IOPK2xMPP3aV6/NPt4jt//ELFo3Vv8sDVD8j3+tleDU= github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -355,6 +404,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -371,6 +421,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -379,10 +431,14 @@ github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLl github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -404,21 +460,26 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFtnBXsd1vPOZaLsESovai349994nHx3e0= github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 h1:SWlt7BoQNASbhTUD0Oy5yysI2seJ7vWuGUp///OM4TM= github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 h1:OYDCOjVcx/5wNzlZ/At8otRibUlw0T6R0xOD31f32bw= github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0/go.mod h1:Q7VUue/CIrKbtpBdF04a1yjGGgsMaDws1HUxtjzgnEY= github.com/kubesphere/sonargo v0.0.2 h1:hsSRE3sv3mkPcUAeSABdp7rtfcNW2zzeHXzFa01CTkU= github.com/kubesphere/sonargo v0.0.2/go.mod h1:ww8n9ANlDXhX5PBZ18iaRnCgEkXN0GMml3/KZXOZ11w= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/leodido/go-urn v0.0.0-20181204092800-a67a23e1c1af h1:EhEGUQX36JFkvSWzrwGjjTJxrx7atfJdxv8cxFzmaB0= +github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leodido/go-urn v0.0.0-20181204092800-a67a23e1c1af/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -427,6 +488,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= @@ -435,6 +498,9 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -442,15 +508,29 @@ github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v6 v6.0.56/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= +github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -462,17 +542,21 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= +github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -492,6 +576,7 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -535,6 +620,10 @@ github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce h1:O/ github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce/go.mod h1:z4tuFqrAg/423AMSaDamY5LgqeOZ5ETui6iOxDwJ/ag= github.com/prometheus-community/prom-label-proxy v0.2.0 h1:2cNKhNjbTmmEDvBTW/6WUsE2x7bh76rBMZVBn4ey6To= github.com/prometheus-community/prom-label-proxy v0.2.0/go.mod h1:XdjyZg7LCbCC5FADHtpgNp6kQ0W9beXVGfmcvndMj5Y= +github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a h1:21yBrtc90hdEhJaL815CHNV0GW0DEdgxDOiL5OYoSHo= +github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a/go.mod h1:Ah5xJsVlBRNuzNCIf5cfASYMXNJemPoDpXZsncFvqCs= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1 h1:/CZyIylkTNOiVdzTtHwkTHTMOCGJXuLtu3ZLAQrH4u0= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1/go.mod h1:iIz0gzBgsmUvH3POupwMevtm74XmRcEBx8w/tE3sl4k= github.com/prometheus/alertmanager v0.20.0 h1:PBMNY7oyIvYMBBIag35/C0hO7xn8+35p4V5rNAph5N8= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= @@ -543,32 +632,38 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1 h1:Oh/bmW9DXCbMeAZbxMmt2wuY6Q4cD0IIbR6vJP3kdHg= github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -576,8 +671,9 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sony/sonyflake v1.0.0 h1:MpU6Ro7tfXwgn2l5eluf9xQvQJDROTBImNCfRXn/YeM= -github.com/sony/sonyflake v1.0.0/go.mod h1:Jv3cfhf/UFtolOTTRd3q4Nl6ENqM+KfyZ5PseKfZGF4= +github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 h1:3wBL/e/qjpSYaXacpbIV+Bsj/nwQ4UO1llG/av54zzw= +github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009/go.mod h1:dVvZuWJd174umvm5g8CmZD6S2GWwHKtpK/0ZPHswuNo= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw= @@ -586,8 +682,8 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -596,27 +692,36 @@ github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= +github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= +github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= +github.com/thanos-io/thanos v0.13.1-0.20200910143741-e0b7f7b32e9c/go.mod h1:1IzeMKiS+pvxbG2M6ZJyi8ZHaAQKXNjDbP2gjhPbSXE= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.23.0+incompatible h1:o2g11IUBdEsSZVzF3k7+bahLmxRP/dbOoW4zQ30UlKE= github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0 h1:Q3Bh5Dwzek5LreV9l86IftyLaexgU1mag9WNntbAW9c= github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= +github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= @@ -635,6 +740,11 @@ github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mo github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= +go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= +go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= @@ -644,6 +754,8 @@ go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v1.1.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= @@ -652,7 +764,6 @@ go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4= @@ -698,7 +809,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -709,6 +819,9 @@ gopkg.in/go-playground/validator.v9 v9.27.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWd gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -735,6 +848,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 helm.sh/helm/v3 v3.0.1/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44 h1:mfs4UJtpH8ElVEohFZw39qDGv9gg7TOkYVTwJZGQ5Yc= istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44/go.mod h1:kyq3g5w42zl/AKlbzDGppYpGMQYMYMyZKeq0/eexML8= istio.io/client-go v0.0.0-20201113183938-0734e976e785 h1:dY+HxVBYoR26sl3L54G3NMziPEiUCcW3f7b2U+LvzOM= diff --git a/pkg/api/alerting/v2alpha1/types.go b/pkg/api/alerting/v2alpha1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..b2a895709c890250105eb3c8afc3d88d6192704c --- /dev/null +++ b/pkg/api/alerting/v2alpha1/types.go @@ -0,0 +1,379 @@ +/* +Copyright 2020 KubeSphere Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/emicklei/go-restful" + "github.com/pkg/errors" + prommodel "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql/parser" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +const ( + RuleLevelCluster RuleLevel = "cluster" + RuleLevelNamespace RuleLevel = "namespace" +) + +var ( + ErrThanosRulerNotEnabled = errors.New("The request operation to custom alerting rule could not be done because thanos ruler is not enabled") + ErrAlertingRuleNotFound = errors.New("The alerting rule was not found") + ErrAlertingRuleAlreadyExists = errors.New("The alerting rule already exists") + + ruleLabelNameMatcher = regexp.MustCompile(`[a-zA-Z_][a-zA-Z0-9_]*`) +) + +type RuleLevel string + +type AlertingRule struct { + Id string `json:"id,omitempty" description:"rule id is only used by built-in alerting rules"` + Name string `json:"name,omitempty" description:"rule name should be unique in one namespace for custom alerting rules"` + + Query string `json:"query,omitempty" description:"prometheus query expression, grammars of which may be referred to https://prometheus.io/docs/prometheus/latest/querying/basics/"` + Duration string `json:"duration,omitempty" description:"duration an alert transitions from Pending to Firing state, which must match ^([0-9]+)(y|w|d|h|m|s|ms)$"` + Labels map[string]string `json:"labels,omitempty" description:"extra labels to attach to the resulting alert sample vectors (the key string has to match [a-zA-Z_][a-zA-Z0-9_]*). eg: a typical label called severity, whose value may be info, warning, error, critical, is usually used to indicate the severity of an alert"` + Annotations map[string]string `json:"annotations,omitempty" description:"non-identifying key/value pairs. summary, message, description are the commonly used annotation names"` +} + +type PostableAlertingRule struct { + AlertingRule `json:",omitempty"` +} + +func (r *PostableAlertingRule) Validate() error { + errs := []error{} + + if r.Name == "" { + errs = append(errs, errors.New("name can not be empty")) + } + if _, err := parser.ParseExpr(r.Query); err != nil { + errs = append(errs, errors.Wrapf(err, "query is invalid: %s", r.Query)) + } + if r.Duration != "" { + if _, err := prommodel.ParseDuration(r.Duration); err != nil { + errs = append(errs, errors.Wrapf(err, "duration is invalid: %s", r.Duration)) + } + } + + if len(r.Labels) > 0 { + for name, _ := range r.Labels { + if !ruleLabelNameMatcher.MatchString(name) || strings.HasPrefix(name, "__") { + errs = append(errs, errors.Errorf( + "label name (%s) is not valid. The name must match [a-zA-Z_][a-zA-Z0-9_]* and has not the __ prefix (label names with this prefix are for internal use)", name)) + } + } + } + + return utilerrors.NewAggregate(errs) +} + +type GettableAlertingRule struct { + AlertingRule `json:",omitempty"` + + State string `json:"state,omitempty" description:"state of a rule based on its alerts, one of firing, pending, inactive"` + Health string `json:"health,omitempty" description:"health state of a rule based on the last execution, one of ok, err, unknown"` + LastError string `json:"lastError,omitempty" description:"error for the last execution"` + EvaluationDurationSeconds float64 `json:"evaluationTime,omitempty" description:"taken seconds for evaluation of query expression"` + LastEvaluation *time.Time `json:"lastEvaluation,omitempty" description:"time for last evaluation of query expression"` + + Alerts []*Alert `json:"alerts,omitempty" description:"alerts"` +} + +type GettableAlertingRuleList struct { + Items []*GettableAlertingRule `json:"items"` + Total int `json:"total"` +} + +type Alert struct { + ActiveAt *time.Time `json:"activeAt,omitempty" description:"time when alert is active"` + Annotations map[string]string `json:"annotations,omitempty" description:"annotations"` + Labels map[string]string `json:"labels,omitempty" description:"labels"` + State string `json:"state,omitempty" description:"state"` + Value string `json:"value,omitempty" description:"the value at the last evaluation of the query expression"` + + RuleId string `json:"ruleId,omitempty" description:"rule id triggering the alert"` + RuleName string `json:"ruleName,omitempty" description:"rule name triggering the alert"` +} + +type AlertList struct { + Items []*Alert `json:"items"` + Total int `json:"total"` +} + +type AlertingRuleQueryParams struct { + NameContainFilter string + State string + Health string + LabelEqualFilters map[string]string + LabelContainFilters map[string]string + + Offset int + Limit int + SortField string + SortType string +} + +func (q *AlertingRuleQueryParams) Filter(rules []*GettableAlertingRule) []*GettableAlertingRule { + var ret []*GettableAlertingRule + for _, rule := range rules { + if rule == nil { + continue + } + if q == nil || q.matches(rule) { + ret = append(ret, rule) + } + } + return ret +} + +func (q *AlertingRuleQueryParams) matches(rule *GettableAlertingRule) bool { + if q.NameContainFilter != "" && !strings.Contains(rule.Name, q.NameContainFilter) { + return false + } + if q.State != "" && q.State != rule.State { + return false + } + if q.Health != "" && q.Health != rule.Health { + return false + } + if len(rule.Labels) == 0 { + return len(q.LabelEqualFilters) == 0 && len(q.LabelContainFilters) == 0 + } + for k, v := range q.LabelEqualFilters { + if fv, ok := rule.Labels[k]; !ok || fv != v { + return false + } + } + for k, v := range q.LabelContainFilters { + if fv, ok := rule.Labels[k]; !ok || !strings.Contains(fv, v) { + return false + } + } + return true +} + +// AlertingRuleIdCompare defines the default order for the alerting rules. +// For the alerting rule list, it guarantees a stable sort. For the custom alerting rules with possible same names +// and the builtin alerting rules with possible same ids, it guarantees the stability of get operations. +func AlertingRuleIdCompare(leftId, rightId string) bool { + // default to ascending order of id + return leftId <= rightId +} + +func (q *AlertingRuleQueryParams) Sort(rules []*GettableAlertingRule) { + idCompare := func(left, right *GettableAlertingRule) bool { + return AlertingRuleIdCompare(left.Id, right.Id) + } + var compare = idCompare + if q != nil { + reverse := q.SortType == "desc" + switch q.SortField { + case "name": + compare = func(left, right *GettableAlertingRule) bool { + if c := strings.Compare(left.Name, right.Name); c != 0 { + if reverse { + return c > 0 + } + return c < 0 + } + return idCompare(left, right) + } + case "lastEvaluation": + compare = func(left, right *GettableAlertingRule) bool { + if left.LastEvaluation == nil { + if right.LastEvaluation != nil { + return false + } + } else { + if right.LastEvaluation == nil { + return true + } else if left.LastEvaluation.Equal(*right.LastEvaluation) { + if reverse { + return left.LastEvaluation.After(*right.LastEvaluation) + } + return left.LastEvaluation.Before(*right.LastEvaluation) + } + } + return idCompare(left, right) + } + case "evaluationTime": + compare = func(left, right *GettableAlertingRule) bool { + if left.EvaluationDurationSeconds != right.EvaluationDurationSeconds { + if reverse { + return left.EvaluationDurationSeconds > right.EvaluationDurationSeconds + } + return left.EvaluationDurationSeconds < right.EvaluationDurationSeconds + } + return idCompare(left, right) + } + } + } + sort.Slice(rules, func(i, j int) bool { + return compare(rules[i], rules[j]) + }) +} + +func (q *AlertingRuleQueryParams) Sub(rules []*GettableAlertingRule) []*GettableAlertingRule { + start, stop := 0, 10 + if q != nil { + start, stop = q.Offset, q.Offset+q.Limit + } + total := len(rules) + if start < total { + if stop > total { + stop = total + } + return rules[start:stop] + } + return nil +} + +type AlertQueryParams struct { + State string + LabelEqualFilters map[string]string + LabelContainFilters map[string]string + + Offset int + Limit int +} + +func (q *AlertQueryParams) Filter(alerts []*Alert) []*Alert { + var ret []*Alert + for _, alert := range alerts { + if alert == nil { + continue + } + if q == nil || q.matches(alert) { + ret = append(ret, alert) + } + } + return ret +} + +func (q *AlertQueryParams) matches(alert *Alert) bool { + if q.State != "" && q.State != alert.State { + return false + } + if len(alert.Labels) == 0 { + return len(q.LabelEqualFilters) == 0 && len(q.LabelContainFilters) == 0 + } + for k, v := range q.LabelEqualFilters { + if fv, ok := alert.Labels[k]; !ok || fv != v { + return false + } + } + for k, v := range q.LabelContainFilters { + if fv, ok := alert.Labels[k]; !ok || !strings.Contains(fv, v) { + return false + } + } + return true +} + +func (q *AlertQueryParams) Sort(alerts []*Alert) { + compare := func(left, right *Alert) bool { + if left.ActiveAt == nil { + if right.ActiveAt != nil { + return false + } + } else { + if right.ActiveAt == nil { + return true + } else if !left.ActiveAt.Equal(*right.ActiveAt) { + return left.ActiveAt.After(*right.ActiveAt) + } + } + return prommodel.LabelsToSignature(left.Labels) <= prommodel.LabelsToSignature(right.Labels) + } + sort.Slice(alerts, func(i, j int) bool { + return compare(alerts[i], alerts[j]) + }) +} + +func (q *AlertQueryParams) Sub(alerts []*Alert) []*Alert { + start, stop := 0, 10 + if q != nil { + start, stop = q.Offset, q.Offset+q.Limit + } + total := len(alerts) + if start < total { + if stop > total { + stop = total + } + return alerts[start:stop] + } + return nil +} + +func ParseAlertingRuleQueryParams(req *restful.Request) (*AlertingRuleQueryParams, error) { + var ( + q = &AlertingRuleQueryParams{} + err error + ) + + q.NameContainFilter = req.QueryParameter("name") + q.State = req.QueryParameter("state") + q.Health = req.QueryParameter("health") + q.Offset, _ = strconv.Atoi(req.QueryParameter("offset")) + q.Limit, err = strconv.Atoi(req.QueryParameter("limit")) + if err != nil { + q.Limit = 10 + err = nil + } + q.LabelEqualFilters, q.LabelContainFilters = parseLabelFilters(req) + q.SortField = req.QueryParameter("sort_field") + q.SortType = req.QueryParameter("sort_type") + return q, err +} + +func ParseAlertQueryParams(req *restful.Request) (*AlertQueryParams, error) { + var ( + q = &AlertQueryParams{} + err error + ) + + q.State = req.QueryParameter("state") + q.Offset, _ = strconv.Atoi(req.QueryParameter("offset")) + q.Limit, err = strconv.Atoi(req.QueryParameter("limit")) + if err != nil { + q.Limit = 10 + err = nil + } + q.LabelEqualFilters, q.LabelContainFilters = parseLabelFilters(req) + return q, err +} + +func parseLabelFilters(req *restful.Request) (map[string]string, map[string]string) { + var ( + labelEqualFilters = make(map[string]string) + labelContainFilters = make(map[string]string) + labelFiltersString = req.QueryParameter("label_filters") + ) + for _, filter := range strings.Split(labelFiltersString, ",") { + if i := strings.Index(filter, "="); i > 0 && len(filter) > i+1 { + labelEqualFilters[filter[:i]] = filter[i+1:] + } else if i := strings.Index(filter, "~"); i > 0 && len(filter) > i+1 { + labelContainFilters[filter[:i]] = filter[i+1:] + } + } + return labelEqualFilters, labelContainFilters +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 440cacee536ac18e8cccc1d9fb629bb5cfb765d1..20fa0ce9371f6a066cbf3786a816ad09082f078f 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -49,6 +49,7 @@ import ( "kubesphere.io/kubesphere/pkg/apiserver/request" "kubesphere.io/kubesphere/pkg/informers" alertingv1 "kubesphere.io/kubesphere/pkg/kapis/alerting/v1" + alertingv2alpha1 "kubesphere.io/kubesphere/pkg/kapis/alerting/v2alpha1" clusterkapisv1alpha1 "kubesphere.io/kubesphere/pkg/kapis/cluster/v1alpha1" configv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/config/v1alpha2" devopsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2" @@ -72,6 +73,7 @@ import ( "kubesphere.io/kubesphere/pkg/models/iam/im" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/loginrecord" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/user" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" "kubesphere.io/kubesphere/pkg/simple/client/auditing" "kubesphere.io/kubesphere/pkg/simple/client/cache" "kubesphere.io/kubesphere/pkg/simple/client/devops" @@ -146,6 +148,8 @@ type APIServer struct { AuditingClient auditing.Client + AlertingClient alerting.RuleClient + // controller-runtime cache RuntimeCache runtimecache.Cache } @@ -255,6 +259,8 @@ func (s *APIServer) installKubeSphereAPIs() { s.InformerFactory.KubernetesSharedInformerFactory())) urlruntime.Must(notificationv1.AddToContainer(s.container, s.Config.NotificationOptions.Endpoint)) urlruntime.Must(alertingv1.AddToContainer(s.container, s.Config.AlertingOptions.Endpoint)) + urlruntime.Must(alertingv2alpha1.AddToContainer(s.container, s.InformerFactory, + s.KubernetesClient.Prometheus(), s.AlertingClient, s.Config.AlertingOptions)) urlruntime.Must(version.AddToContainer(s.container, s.KubernetesClient.Discovery())) } @@ -524,6 +530,26 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { apiextensionsInformerFactory.Start(stopCh) apiextensionsInformerFactory.WaitForCacheSync(stopCh) + if promFactory := s.InformerFactory.PrometheusSharedInformerFactory(); promFactory != nil { + prometheusGVRs := []schema.GroupVersionResource{ + {Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheuses"}, + {Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"}, + {Group: "monitoring.coreos.com", Version: "v1", Resource: "thanosrulers"}, + } + for _, gvr := range prometheusGVRs { + if isResourceExists(gvr) { + _, err = promFactory.ForResource(gvr) + if err != nil { + return err + } + } else { + klog.Warningf("resource %s not exists in the cluster", gvr) + } + } + promFactory.Start(stopCh) + promFactory.WaitForCacheSync(stopCh) + } + // controller runtime cache for resources go s.RuntimeCache.Start(stopCh) s.RuntimeCache.WaitForCacheSync(stopCh) diff --git a/pkg/apiserver/auditing/types_test.go b/pkg/apiserver/auditing/types_test.go index 8dd74d209c723a0375e44426dc56ed40678d5969..d56affd19841337915aa5e76d484f6ef6edb28c4 100644 --- a/pkg/apiserver/auditing/types_test.go +++ b/pkg/apiserver/auditing/types_test.go @@ -56,7 +56,7 @@ func TestGetAuditLevel(t *testing.T) { ksClient := fake.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) a := auditing{ webhookLister: fakeInformerFactory.KubeSphereSharedInformerFactory().Auditing().V1alpha1().Webhooks().Lister(), @@ -85,7 +85,7 @@ func TestAuditing_Enabled(t *testing.T) { ksClient := fake.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) a := auditing{ webhookLister: fakeInformerFactory.KubeSphereSharedInformerFactory().Auditing().V1alpha1().Webhooks().Lister(), @@ -115,7 +115,7 @@ func TestAuditing_K8sAuditingEnabled(t *testing.T) { ksClient := fake.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) a := auditing{ webhookLister: fakeInformerFactory.KubeSphereSharedInformerFactory().Auditing().V1alpha1().Webhooks().Lister(), @@ -145,7 +145,7 @@ func TestAuditing_LogRequestObject(t *testing.T) { ksClient := fake.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) a := auditing{ webhookLister: fakeInformerFactory.KubeSphereSharedInformerFactory().Auditing().V1alpha1().Webhooks().Lister(), @@ -236,7 +236,7 @@ func TestAuditing_LogResponseObject(t *testing.T) { ksClient := fake.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) a := auditing{ webhookLister: fakeInformerFactory.KubeSphereSharedInformerFactory().Auditing().V1alpha1().Webhooks().Lister(), diff --git a/pkg/apiserver/authorization/rbac/rbac_test.go b/pkg/apiserver/authorization/rbac/rbac_test.go index 419527606edec1dfe0993eddf8c439103671935f..0b811b6684e0f019f9782109db57035488d80eb1 100644 --- a/pkg/apiserver/authorization/rbac/rbac_test.go +++ b/pkg/apiserver/authorization/rbac/rbac_test.go @@ -857,7 +857,7 @@ func newMockRBACAuthorizer(staticRoles *StaticRoles) (*RBACAuthorizer, error) { ksClient := fakeks.NewSimpleClientset() k8sClient := fakek8s.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil) k8sInformerFactory := fakeInformerFactory.KubernetesSharedInformerFactory() ksInformerFactory := fakeInformerFactory.KubeSphereSharedInformerFactory() diff --git a/pkg/apiserver/config/config.go b/pkg/apiserver/config/config.go index fc1aafe4cc07a9b6c522b038cb7263b3bada312e..9cf1a7a511123ad11a4aa0afae2e600ad257b5ad 100644 --- a/pkg/apiserver/config/config.go +++ b/pkg/apiserver/config/config.go @@ -247,7 +247,8 @@ func (conf *Config) stripEmptyOptions() { conf.S3Options = nil } - if conf.AlertingOptions != nil && conf.AlertingOptions.Endpoint == "" { + if conf.AlertingOptions != nil && conf.AlertingOptions.Endpoint == "" && + conf.AlertingOptions.PrometheusEndpoint == "" && conf.AlertingOptions.ThanosRulerEndpoint == "" { conf.AlertingOptions = nil } diff --git a/pkg/apiserver/config/config_test.go b/pkg/apiserver/config/config_test.go index 4d6c7e1d52892ab022c5d8386575465326fb9302..93f77b5aab9830a8837a6c5d47b5cf6d4cca5535 100644 --- a/pkg/apiserver/config/config_test.go +++ b/pkg/apiserver/config/config_test.go @@ -119,6 +119,10 @@ func newTestConfig() (*Config, error) { }, AlertingOptions: &alerting.Options{ Endpoint: "http://alerting-client-server.kubesphere-alerting-system.svc:9200/api", + + PrometheusEndpoint: "http://prometheus-operated.kubesphere-monitoring-system.svc", + ThanosRulerEndpoint: "http://thanos-ruler-operated.kubesphere-monitoring-system.svc", + ThanosRuleResourceLabels: "thanosruler=thanos-ruler,role=thanos-alerting-rules", }, NotificationOptions: ¬ification.Options{ Endpoint: "http://notification.kubesphere-alerting-system.svc:9200", diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 5599a8192413aa09bdd3d8b7d30233c8e163089d..e759ee98948e6cccbedbcca4d4a48b7179b6a56b 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -104,6 +104,8 @@ const ( LogQueryTag = "Log Query" EventsQueryTag = "Events Query" AuditingQueryTag = "Auditing Query" + + AlertingTag = "Alerting" ) var ( diff --git a/pkg/informers/informers.go b/pkg/informers/informers.go index 242f8bf07dbcf4a50893cc5e1afad79df55ed624..c9b6df4b38fd0130d425ae12e2026ad26fbe68b9 100644 --- a/pkg/informers/informers.go +++ b/pkg/informers/informers.go @@ -19,6 +19,8 @@ package informers import ( snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v3/clientset/versioned" snapshotinformer "github.com/kubernetes-csi/external-snapshotter/client/v3/informers/externalversions" + prominformers "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" istioclient "istio.io/client-go/pkg/clientset/versioned" istioinformers "istio.io/client-go/pkg/informers/externalversions" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -41,6 +43,7 @@ type InformerFactory interface { IstioSharedInformerFactory() istioinformers.SharedInformerFactory SnapshotSharedInformerFactory() snapshotinformer.SharedInformerFactory ApiExtensionSharedInformerFactory() apiextensionsinformers.SharedInformerFactory + PrometheusSharedInformerFactory() prominformers.SharedInformerFactory // Start shared informer factory one by one if they are not nil Start(stopCh <-chan struct{}) @@ -52,10 +55,12 @@ type informerFactories struct { istioInformerFactory istioinformers.SharedInformerFactory snapshotInformerFactory snapshotinformer.SharedInformerFactory apiextensionsInformerFactory apiextensionsinformers.SharedInformerFactory + prometheusInformerFactory prominformers.SharedInformerFactory } func NewInformerFactories(client kubernetes.Interface, ksClient versioned.Interface, istioClient istioclient.Interface, - snapshotClient snapshotclient.Interface, apiextensionsClient apiextensionsclient.Interface) InformerFactory { + snapshotClient snapshotclient.Interface, apiextensionsClient apiextensionsclient.Interface, + prometheusClient promresourcesclient.Interface) InformerFactory { factory := &informerFactories{} if client != nil { @@ -78,6 +83,10 @@ func NewInformerFactories(client kubernetes.Interface, ksClient versioned.Interf factory.apiextensionsInformerFactory = apiextensionsinformers.NewSharedInformerFactory(apiextensionsClient, defaultResync) } + if prometheusClient != nil { + factory.prometheusInformerFactory = prominformers.NewSharedInformerFactory(prometheusClient, defaultResync) + } + return factory } @@ -101,6 +110,10 @@ func (f *informerFactories) ApiExtensionSharedInformerFactory() apiextensionsinf return f.apiextensionsInformerFactory } +func (f *informerFactories) PrometheusSharedInformerFactory() prominformers.SharedInformerFactory { + return f.prometheusInformerFactory +} + func (f *informerFactories) Start(stopCh <-chan struct{}) { if f.informerFactory != nil { f.informerFactory.Start(stopCh) @@ -121,4 +134,8 @@ func (f *informerFactories) Start(stopCh <-chan struct{}) { if f.apiextensionsInformerFactory != nil { f.apiextensionsInformerFactory.Start(stopCh) } + + if f.prometheusInformerFactory != nil { + f.prometheusInformerFactory.Start(stopCh) + } } diff --git a/pkg/informers/null_informers.go b/pkg/informers/null_informers.go index 6a5aa710671bc61b64c4ff1a8f511269cd48bd6d..08f8f424b7370b1b90fb772ac31d831d34a5bc1c 100644 --- a/pkg/informers/null_informers.go +++ b/pkg/informers/null_informers.go @@ -17,19 +17,23 @@ limitations under the License. package informers import ( + "time" + snapshotinformer "github.com/kubernetes-csi/external-snapshotter/client/v3/informers/externalversions" + prominformers "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions" + promfake "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" istioinformers "istio.io/client-go/pkg/informers/externalversions" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake" ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" - "time" ) type nullInformerFactory struct { fakeK8sInformerFactory informers.SharedInformerFactory fakeKsInformerFactory ksinformers.SharedInformerFactory + fakePrometheusFactory prominformers.SharedInformerFactory } func NewNullInformerFactory() InformerFactory { @@ -39,9 +43,13 @@ func NewNullInformerFactory() InformerFactory { fakeKsClient := ksfake.NewSimpleClientset() fakeKsInformerFactory := ksinformers.NewSharedInformerFactory(fakeKsClient, time.Minute*10) + fakePrometheusClient := promfake.NewSimpleClientset() + fakePrometheusFactory := prominformers.NewSharedInformerFactory(fakePrometheusClient, time.Minute*10) + return &nullInformerFactory{ fakeK8sInformerFactory: fakeInformerFactory, fakeKsInformerFactory: fakeKsInformerFactory, + fakePrometheusFactory: fakePrometheusFactory, } } @@ -65,5 +73,9 @@ func (n nullInformerFactory) ApiExtensionSharedInformerFactory() apiextensionsin return nil } +func (n *nullInformerFactory) PrometheusSharedInformerFactory() prominformers.SharedInformerFactory { + return n.fakePrometheusFactory +} + func (n nullInformerFactory) Start(stopCh <-chan struct{}) { } diff --git a/pkg/kapis/alerting/v2alpha1/handler.go b/pkg/kapis/alerting/v2alpha1/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..94df298c97c6559daae3e2e31373b8646d45dbdf --- /dev/null +++ b/pkg/kapis/alerting/v2alpha1/handler.go @@ -0,0 +1,285 @@ +/* +Copyright 2020 KubeSphere Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "github.com/emicklei/go-restful" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + "k8s.io/klog" + ksapi "kubesphere.io/kubesphere/pkg/api" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" + "kubesphere.io/kubesphere/pkg/informers" + alertingmodels "kubesphere.io/kubesphere/pkg/models/alerting" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" +) + +type handler struct { + operator alertingmodels.Operator +} + +func newHandler(informers informers.InformerFactory, + promResourceClient promresourcesclient.Interface, ruleClient alerting.RuleClient, + option *alerting.Options) *handler { + return &handler{ + operator: alertingmodels.NewOperator( + informers, promResourceClient, ruleClient, option), + } +} + +func (h *handler) handleListCustomAlertingRules(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + query, err := v2alpha1.ParseAlertingRuleQueryParams(req) + if err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + rules, err := h.operator.ListCustomAlertingRules(req.Request.Context(), namespace, query) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + resp.WriteEntity(rules) +} + +func (h *handler) handleListCustomRulesAlerts(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + query, err := v2alpha1.ParseAlertQueryParams(req) + if err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + alerts, err := h.operator.ListCustomRulesAlerts(req.Request.Context(), namespace, query) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + resp.WriteEntity(alerts) +} + +func (h *handler) handleGetCustomAlertingRule(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + ruleName := req.PathParameter("rule_name") + + rule, err := h.operator.GetCustomAlertingRule(req.Request.Context(), namespace, ruleName) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + if rule == nil { + ksapi.HandleNotFound(resp, nil, err) + return + } + resp.WriteEntity(rule) +} + +func (h *handler) handleListCustomRuleAlerts(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + ruleName := req.PathParameter("rule_name") + + alerts, err := h.operator.ListCustomRuleAlerts(req.Request.Context(), namespace, ruleName) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + resp.WriteEntity(alerts) +} + +func (h *handler) handleCreateCustomAlertingRule(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + + var rule v2alpha1.PostableAlertingRule + if err := req.ReadEntity(&rule); err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + if err := rule.Validate(); err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + err := h.operator.CreateCustomAlertingRule(req.Request.Context(), namespace, &rule) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + case err == v2alpha1.ErrAlertingRuleAlreadyExists: + ksapi.HandleConflict(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } +} + +func (h *handler) handleUpdateCustomAlertingRule(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + ruleName := req.PathParameter("rule_name") + + var rule v2alpha1.PostableAlertingRule + if err := req.ReadEntity(&rule); err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + if err := rule.Validate(); err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + err := h.operator.UpdateCustomAlertingRule(req.Request.Context(), namespace, ruleName, &rule) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } +} + +func (h *handler) handleDeleteCustomAlertingRule(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + name := req.PathParameter("rule_name") + + err := h.operator.DeleteCustomAlertingRule(req.Request.Context(), namespace, name) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrThanosRulerNotEnabled: + ksapi.HandleBadRequest(resp, nil, err) + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } +} + +func (h *handler) handleListBuiltinAlertingRules(req *restful.Request, resp *restful.Response) { + query, err := v2alpha1.ParseAlertingRuleQueryParams(req) + if err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + rules, err := h.operator.ListBuiltinAlertingRules(req.Request.Context(), query) + if err != nil { + klog.Error(err) + ksapi.HandleInternalError(resp, nil, err) + return + } + resp.WriteEntity(rules) +} + +func (h *handler) handleListBuiltinRulesAlerts(req *restful.Request, resp *restful.Response) { + query, err := v2alpha1.ParseAlertQueryParams(req) + if err != nil { + klog.Error(err) + ksapi.HandleBadRequest(resp, nil, err) + return + } + + alerts, err := h.operator.ListBuiltinRulesAlerts(req.Request.Context(), query) + if err != nil { + klog.Error(err) + ksapi.HandleInternalError(resp, nil, err) + return + } + resp.WriteEntity(alerts) +} + +func (h *handler) handleGetBuiltinAlertingRule(req *restful.Request, resp *restful.Response) { + ruleId := req.PathParameter("rule_id") + + rule, err := h.operator.GetBuiltinAlertingRule(req.Request.Context(), ruleId) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + if rule == nil { + ksapi.HandleNotFound(resp, nil, err) + return + } + + resp.WriteEntity(rule) +} + +func (h *handler) handleListBuiltinRuleAlerts(req *restful.Request, resp *restful.Response) { + ruleId := req.PathParameter("rule_id") + + alerts, err := h.operator.ListBuiltinRuleAlerts(req.Request.Context(), ruleId) + if err != nil { + klog.Error(err) + switch { + case err == v2alpha1.ErrAlertingRuleNotFound: + ksapi.HandleNotFound(resp, nil, err) + default: + ksapi.HandleInternalError(resp, nil, err) + } + return + } + + resp.WriteEntity(alerts) +} diff --git a/pkg/kapis/alerting/v2alpha1/register.go b/pkg/kapis/alerting/v2alpha1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..1e32d3fb6d2ae1b8bd4449422f4ffd5442bcfe7b --- /dev/null +++ b/pkg/kapis/alerting/v2alpha1/register.go @@ -0,0 +1,199 @@ +/* +Copyright 2020 KubeSphere Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "net/http" + + "github.com/emicklei/go-restful" + restfulspec "github.com/emicklei/go-restful-openapi" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + "k8s.io/apimachinery/pkg/runtime/schema" + ksapi "kubesphere.io/kubesphere/pkg/api" + alertingv2alpha1 "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" +) + +const ( + groupName = "alerting.kubesphere.io" +) + +var GroupVersion = schema.GroupVersion{Group: groupName, Version: "v2alpha1"} + +func AddToContainer(container *restful.Container, informers informers.InformerFactory, + promResourceClient promresourcesclient.Interface, ruleClient alerting.RuleClient, + option *alerting.Options) error { + + handler := newHandler(informers, promResourceClient, ruleClient, option) + + ws := runtime.NewWebService(GroupVersion) + + ws.Route(ws.GET("/rules"). + To(handler.handleListCustomAlertingRules). + Doc("list the cluster-level custom alerting rules"). + Param(ws.QueryParameter("name", "rule name")). + Param(ws.QueryParameter("state", "state of a rule based on its alerts, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("health", "health state of a rule based on the last execution, one of `ok`, `err`, `unknown`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("sort_field", "sort field, one of `name`, `lastEvaluation`, `evaluationTime`")). + Param(ws.QueryParameter("sort_type", "sort type, one of `asc`, `desc`")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRuleList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/alerts"). + To(handler.handleListCustomRulesAlerts). + Doc("list the alerts of the cluster-level custom alerting rules"). + Param(ws.QueryParameter("state", "state, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.AlertList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/rules/{rule_name}"). + To(handler.handleGetCustomAlertingRule). + Doc("get the cluster-level custom alerting rule with the specified name"). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRule{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/rules/{rule_name}/alerts"). + To(handler.handleListCustomRuleAlerts). + Doc("list the alerts of the cluster-level custom alerting rule with the specified name"). + Returns(http.StatusOK, ksapi.StatusOK, []alertingv2alpha1.Alert{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.POST("/rules"). + To(handler.handleCreateCustomAlertingRule). + Doc("create a cluster-level custom alerting rule"). + Reads(alertingv2alpha1.PostableAlertingRule{}). + Returns(http.StatusOK, ksapi.StatusOK, nil). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.PUT("/rules/{rule_name}"). + To(handler.handleUpdateCustomAlertingRule). + Doc("update the cluster-level custom alerting rule with the specified name"). + Reads(alertingv2alpha1.PostableAlertingRule{}). + Returns(http.StatusOK, ksapi.StatusOK, nil). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.DELETE("/rules/{rule_name}"). + To(handler.handleDeleteCustomAlertingRule). + Doc("delete the cluster-level custom alerting rule with the specified name"). + Returns(http.StatusOK, ksapi.StatusOK, nil). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/namespaces/{namespace}/rules"). + To(handler.handleListCustomAlertingRules). + Doc("list the custom alerting rules in the specified namespace"). + Param(ws.QueryParameter("name", "rule name")). + Param(ws.QueryParameter("state", "state of a rule based on its alerts, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("health", "health state of a rule based on the last execution, one of `ok`, `err`, `unknown`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("sort_field", "sort field, one of `name`, `lastEvaluation`, `evaluationTime`")). + Param(ws.QueryParameter("sort_type", "sort type, one of `asc`, `desc`")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRuleList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/namespaces/{namespace}/alerts"). + To(handler.handleListCustomRulesAlerts). + Doc("list the alerts of the custom alerting rules in the specified namespace."). + Param(ws.QueryParameter("state", "state, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.AlertList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/namespaces/{namespace}/rules/{rule_name}"). + To(handler.handleGetCustomAlertingRule). + Doc("get the custom alerting rule with the specified name in the specified namespace"). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRule{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/namespaces/{namespace}/rules/{rule_name}/alerts"). + To(handler.handleListCustomRuleAlerts). + Doc("get the alerts of the custom alerting rule with the specified name in the specified namespace"). + Returns(http.StatusOK, ksapi.StatusOK, []alertingv2alpha1.Alert{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.POST("/namespaces/{namespace}/rules"). + To(handler.handleCreateCustomAlertingRule). + Doc("create a custom alerting rule in the specified namespace"). + Reads(alertingv2alpha1.PostableAlertingRule{}). + Returns(http.StatusOK, ksapi.StatusOK, ""). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.PUT("/namespaces/{namespace}/rules/{rule_name}"). + To(handler.handleUpdateCustomAlertingRule). + Doc("update the custom alerting rule with the specified name in the specified namespace"). + Reads(alertingv2alpha1.PostableAlertingRule{}). + Returns(http.StatusOK, ksapi.StatusOK, ""). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.DELETE("/namespaces/{namespace}/rules/{rule_name}"). + To(handler.handleDeleteCustomAlertingRule). + Doc("delete the custom alerting rule with the specified rule name in the specified namespace"). + Returns(http.StatusOK, ksapi.StatusOK, nil). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/builtin/rules"). + To(handler.handleListBuiltinAlertingRules). + Doc("list the builtin(non-custom) alerting rules"). + Param(ws.QueryParameter("name", "rule name")). + Param(ws.QueryParameter("state", "state of a rule based on its alerts, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("health", "health state of a rule based on the last execution, one of `ok`, `err`, `unknown`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("sort_field", "sort field, one of `name`, `lastEvaluation`, `evaluationTime`")). + Param(ws.QueryParameter("sort_type", "sort type, one of `asc`, `desc`")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRuleList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/builtin/alerts"). + To(handler.handleListBuiltinRulesAlerts). + Doc("list the alerts of the builtin(non-custom) rules"). + Param(ws.QueryParameter("state", "state, one of `firing`, `pending`, `inactive`")). + Param(ws.QueryParameter("label_filters", "label filters, concatenating multiple filters with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a").DataFormat("key=%s,key~%s")). + Param(ws.QueryParameter("offset", "offset of the result set").DataType("integer").DefaultValue("0")). + Param(ws.QueryParameter("limit", "limit size of the result set").DataType("integer").DefaultValue("10")). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.AlertList{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/builtin/rules/{rule_id}"). + To(handler.handleGetBuiltinAlertingRule). + Doc("get the builtin(non-custom) alerting rule with specified id"). + Returns(http.StatusOK, ksapi.StatusOK, alertingv2alpha1.GettableAlertingRule{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + ws.Route(ws.GET("/builtin/rules/{rule_id}/alerts"). + To(handler.handleListBuiltinRuleAlerts). + Doc("list the alerts of the builtin(non-custom) alerting rule with the specified id"). + Returns(http.StatusOK, ksapi.StatusOK, []alertingv2alpha1.Alert{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.AlertingTag})) + + container.Add(ws) + + return nil +} diff --git a/pkg/kapis/cluster/v1alpha1/handler_test.go b/pkg/kapis/cluster/v1alpha1/handler_test.go index 55fa22cbaa4adc91b9bcf99131010051091a1ca0..d5c22f99f2cc001541bd6d95f2881a65d392e4fd 100644 --- a/pkg/kapis/cluster/v1alpha1/handler_test.go +++ b/pkg/kapis/cluster/v1alpha1/handler_test.go @@ -125,7 +125,7 @@ func TestGeranteAgentDeployment(t *testing.T) { k8sclient := k8sfake.NewSimpleClientset(service) ksclient := fake.NewSimpleClientset(cluster) - informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil) + informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil, nil) informersFactory.KubernetesSharedInformerFactory().Core().V1().Services().Informer().GetIndexer().Add(service) informersFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Informer().GetIndexer().Add(cluster) @@ -233,7 +233,7 @@ func TestValidateKubeConfig(t *testing.T) { k8sclient := k8sfake.NewSimpleClientset(service) ksclient := fake.NewSimpleClientset(cluster) - informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil) + informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil, nil) informersFactory.KubernetesSharedInformerFactory().Core().V1().Services().Informer().GetIndexer().Add(service) informersFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Informer().GetIndexer().Add(cluster) diff --git a/pkg/kapis/monitoring/v1alpha3/helper_test.go b/pkg/kapis/monitoring/v1alpha3/helper_test.go index 8b84c305de8d69d9e3977a4b4e4e3771f85ec25b..99ef67fd894ee474ba3eafde9ce518be9f461dd3 100644 --- a/pkg/kapis/monitoring/v1alpha3/helper_test.go +++ b/pkg/kapis/monitoring/v1alpha3/helper_test.go @@ -216,7 +216,7 @@ func TestParseRequestParams(t *testing.T) { for i, tt := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { client := fake.NewSimpleClientset(&tt.namespace) - fakeInformerFactory := informers.NewInformerFactories(client, nil, nil, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(client, nil, nil, nil, nil, nil) handler := newHandler(client, nil, fakeInformerFactory, nil) result, err := handler.makeQueryOptions(tt.params, tt.lvl) diff --git a/pkg/kapis/resources/v1alpha3/handler_test.go b/pkg/kapis/resources/v1alpha3/handler_test.go index 6ec5c57edd2fb40a16294db12de284d3e2613f94..be59b1bca415003eca158595c27e0fef3e1435a4 100644 --- a/pkg/kapis/resources/v1alpha3/handler_test.go +++ b/pkg/kapis/resources/v1alpha3/handler_test.go @@ -186,7 +186,7 @@ func prepare() (informers.InformerFactory, error) { snapshotClient := fakesnapshot.NewSimpleClientset() apiextensionsClient := fakeapiextensions.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, apiextensionsClient) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, apiextensionsClient, nil) k8sInformerFactory := fakeInformerFactory.KubernetesSharedInformerFactory() diff --git a/pkg/models/alerting/alerting.go b/pkg/models/alerting/alerting.go new file mode 100644 index 0000000000000000000000000000000000000000..3c40ed01c980ef49f7a5873687dba4598434a46f --- /dev/null +++ b/pkg/models/alerting/alerting.go @@ -0,0 +1,629 @@ +package alerting + +import ( + "context" + "sort" + "strings" + + "github.com/pkg/errors" + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/alerting/rules" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" +) + +const ( + rulerNamespace = constants.KubeSphereMonitoringNamespace + customRuleGroupDefault = "alerting.custom.defaults" + customRuleResourceLabelKeyLevel = "custom-alerting-rule-level" +) + +var ( + maxSecretSize = corev1.MaxSecretSize + maxConfigMapDataSize = int(float64(maxSecretSize) * 0.45) +) + +// Operator contains all operations to alerting rules. The operations may involve manipulations of prometheusrule +// custom resources where the rules are persisted, and querying the rules state from prometheus endpoint and +// thanos ruler endpoint. +// For the following apis, if namespace is empty, do operations to alerting rules with cluster level, +// or do operations only to rules of the specified namespaces. +// All custom rules will be configured for thanos ruler, so the operations to custom alerting rule can not be done +// if thanos ruler is not enabled. +type Operator interface { + // ListCustomAlertingRules lists the custom alerting rules. + ListCustomAlertingRules(ctx context.Context, namespace string, + queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) + // ListCustomRulesAlerts lists the alerts of the custom alerting rules. + ListCustomRulesAlerts(ctx context.Context, namespace string, + queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) + // GetCustomAlertingRule gets the custom alerting rule with the given name. + GetCustomAlertingRule(ctx context.Context, namespace, ruleName string) (*v2alpha1.GettableAlertingRule, error) + // ListCustomRuleAlerts lists the alerts of the custom alerting rule with the given name. + ListCustomRuleAlerts(ctx context.Context, namespace, ruleName string) ([]*v2alpha1.Alert, error) + // CreateCustomAlertingRule creates a custom alerting rule. + CreateCustomAlertingRule(ctx context.Context, namespace string, rule *v2alpha1.PostableAlertingRule) error + // UpdateCustomAlertingRule updates the custom alerting rule with the given name. + UpdateCustomAlertingRule(ctx context.Context, namespace, ruleName string, rule *v2alpha1.PostableAlertingRule) error + // DeleteCustomAlertingRule deletes the custom alerting rule with the given name. + DeleteCustomAlertingRule(ctx context.Context, namespace, ruleName string) error + + // ListBuiltinAlertingRules lists the builtin(non-custom) alerting rules + ListBuiltinAlertingRules(ctx context.Context, + queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) + // ListBuiltinRulesAlerts lists the alerts of the builtin(non-custom) alerting rules + ListBuiltinRulesAlerts(ctx context.Context, + queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) + // GetBuiltinAlertingRule gets the builtin(non-custom) alerting rule with the given id + GetBuiltinAlertingRule(ctx context.Context, ruleId string) (*v2alpha1.GettableAlertingRule, error) + // ListBuiltinRuleAlerts lists the alerts of the builtin(non-custom) alerting rule with the given id + ListBuiltinRuleAlerts(ctx context.Context, ruleId string) ([]*v2alpha1.Alert, error) +} + +func NewOperator(informers informers.InformerFactory, + promResourceClient promresourcesclient.Interface, ruleClient alerting.RuleClient, + option *alerting.Options) Operator { + o := operator{ + namespaceInformer: informers.KubernetesSharedInformerFactory().Core().V1().Namespaces(), + + promResourceClient: promResourceClient, + + prometheusInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().Prometheuses(), + thanosRulerInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().ThanosRulers(), + ruleResourceInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().PrometheusRules(), + + ruleClient: ruleClient, + + thanosRuleResourceLabels: make(map[string]string), + } + + o.resourceRuleCache = rules.NewRuleCache(o.ruleResourceInformer) + + if option != nil && len(option.ThanosRuleResourceLabels) != 0 { + lblStrings := strings.Split(option.ThanosRuleResourceLabels, ",") + for _, lblString := range lblStrings { + lbl := strings.Split(strings.TrimSpace(lblString), "=") + if len(lbl) == 2 { + o.thanosRuleResourceLabels[lbl[0]] = lbl[1] + } + } + } + + return &o +} + +type operator struct { + ruleClient alerting.RuleClient + + promResourceClient promresourcesclient.Interface + + prometheusInformer prominformersv1.PrometheusInformer + thanosRulerInformer prominformersv1.ThanosRulerInformer + ruleResourceInformer prominformersv1.PrometheusRuleInformer + + namespaceInformer coreinformersv1.NamespaceInformer + + resourceRuleCache *rules.RuleCache + + thanosRuleResourceLabels map[string]string +} + +func (o *operator) ListCustomAlertingRules(ctx context.Context, namespace string, + queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) { + + var level v2alpha1.RuleLevel + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + } + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, level) + if err != nil { + return nil, err + } + + return pageAlertingRules(alertingRules, queryParams), nil +} + +func (o *operator) ListCustomRulesAlerts(ctx context.Context, namespace string, + queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) { + + var level v2alpha1.RuleLevel + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + } + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, level) + if err != nil { + return nil, err + } + + return pageAlerts(alertingRules, queryParams), nil +} + +func (o *operator) GetCustomAlertingRule(ctx context.Context, namespace, ruleName string) ( + *v2alpha1.GettableAlertingRule, error) { + + var level v2alpha1.RuleLevel + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + } + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + return o.getCustomAlertingRule(ctx, ruleNamespace, ruleName, level) +} + +func (o *operator) ListCustomRuleAlerts(ctx context.Context, namespace, ruleName string) ( + []*v2alpha1.Alert, error) { + + rule, err := o.GetCustomAlertingRule(ctx, namespace, ruleName) + if err != nil { + return nil, err + } + if rule == nil { + return nil, v2alpha1.ErrAlertingRuleNotFound + } + return rule.Alerts, nil +} + +func (o *operator) ListBuiltinAlertingRules(ctx context.Context, + queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) { + + alertingRules, err := o.listBuiltinAlertingRules(ctx) + if err != nil { + return nil, err + } + + return pageAlertingRules(alertingRules, queryParams), nil +} + +func (o *operator) ListBuiltinRulesAlerts(ctx context.Context, + queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) { + alertingRules, err := o.listBuiltinAlertingRules(ctx) + if err != nil { + return nil, err + } + + return pageAlerts(alertingRules, queryParams), nil +} + +func (o *operator) GetBuiltinAlertingRule(ctx context.Context, ruleId string) ( + *v2alpha1.GettableAlertingRule, error) { + + return o.getBuiltinAlertingRule(ctx, ruleId) +} + +func (o *operator) ListBuiltinRuleAlerts(ctx context.Context, ruleId string) ([]*v2alpha1.Alert, error) { + rule, err := o.getBuiltinAlertingRule(ctx, ruleId) + if err != nil { + return nil, err + } + if rule == nil { + return nil, v2alpha1.ErrAlertingRuleNotFound + } + return rule.Alerts, nil +} + +func (o *operator) ListClusterAlertingRules(ctx context.Context, customFlag string, + queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) { + + namespace := rulerNamespace + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, v2alpha1.RuleLevelCluster) + if err != nil { + return nil, err + } + + return pageAlertingRules(alertingRules, queryParams), nil +} + +func (o *operator) ListClusterRulesAlerts(ctx context.Context, + queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) { + + namespace := rulerNamespace + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, v2alpha1.RuleLevelCluster) + if err != nil { + return nil, err + } + + return pageAlerts(alertingRules, queryParams), nil +} + +func (o *operator) listCustomAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace, + level v2alpha1.RuleLevel) ([]*v2alpha1.GettableAlertingRule, error) { + + ruler, err := o.getThanosRuler() + if err != nil { + return nil, err + } + if ruler == nil { + return nil, v2alpha1.ErrThanosRulerNotEnabled + } + + resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace, + labels.SelectorFromSet(labels.Set{customRuleResourceLabelKeyLevel: string(level)})) + if err != nil { + return nil, err + } + + ruleGroups, err := o.ruleClient.ThanosRules(ctx) + if err != nil { + return nil, err + } + + return rules.GetAlertingRulesStatus(ruleNamespace.Name, &rules.ResourceRuleChunk{ + ResourceRulesMap: resourceRulesMap, + Custom: true, + Level: level, + }, ruleGroups, ruler.ExternalLabels()) +} + +func (o *operator) getCustomAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + ruleName string, level v2alpha1.RuleLevel) (*v2alpha1.GettableAlertingRule, error) { + + ruler, err := o.getThanosRuler() + if err != nil { + return nil, err + } + if ruler == nil { + return nil, v2alpha1.ErrThanosRulerNotEnabled + } + + resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, + labels.SelectorFromSet(labels.Set{customRuleResourceLabelKeyLevel: string(level)}), ruleName) + if err != nil { + return nil, err + } + if resourceRule == nil { + return nil, v2alpha1.ErrAlertingRuleNotFound + } + + ruleGroups, err := o.ruleClient.ThanosRules(ctx) + if err != nil { + return nil, err + } + + return rules.GetAlertingRuleStatus(ruleNamespace.Name, &rules.ResourceRule{ + ResourceRuleItem: *resourceRule, + Custom: true, + Level: level, + }, ruleGroups, ruler.ExternalLabels()) +} + +func (o *operator) listBuiltinAlertingRules(ctx context.Context) ( + []*v2alpha1.GettableAlertingRule, error) { + + ruler, err := o.getPrometheusRuler() + if err != nil { + return nil, err + } + + ruleGroups, err := o.ruleClient.PrometheusRules(ctx) + if err != nil { + return nil, err + } + + if ruler == nil { + // for out-cluster prometheus + return rules.ParseAlertingRules(ruleGroups, false, v2alpha1.RuleLevelCluster, + func(group, id string, rule *alerting.AlertingRule) bool { + return true + }) + } + + namespace := rulerNamespace + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace, nil) + if err != nil { + return nil, err + } + + return rules.GetAlertingRulesStatus(ruleNamespace.Name, &rules.ResourceRuleChunk{ + ResourceRulesMap: resourceRulesMap, + Custom: false, + Level: v2alpha1.RuleLevelCluster, + }, ruleGroups, ruler.ExternalLabels()) +} + +func (o *operator) getBuiltinAlertingRule(ctx context.Context, ruleId string) (*v2alpha1.GettableAlertingRule, error) { + + ruler, err := o.getPrometheusRuler() + if err != nil { + return nil, err + } + + ruleGroups, err := o.ruleClient.PrometheusRules(ctx) + if err != nil { + return nil, err + } + + if ruler == nil { + // for out-cluster prometheus + alertingRules, err := rules.ParseAlertingRules(ruleGroups, false, v2alpha1.RuleLevelCluster, + func(group, id string, rule *alerting.AlertingRule) bool { + return ruleId == id + }) + if err != nil { + return nil, err + } + if len(alertingRules) == 0 { + return nil, v2alpha1.ErrAlertingRuleNotFound + } + sort.Slice(alertingRules, func(i, j int) bool { + return v2alpha1.AlertingRuleIdCompare(alertingRules[i].Id, alertingRules[j].Id) + }) + return alertingRules[0], nil + } + + namespace := rulerNamespace + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return nil, err + } + + resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, nil, ruleId) + if err != nil { + return nil, err + } + + if resourceRule == nil { + return nil, v2alpha1.ErrAlertingRuleNotFound + } + + return rules.GetAlertingRuleStatus(ruleNamespace.Name, &rules.ResourceRule{ + ResourceRuleItem: *resourceRule, + Custom: false, + Level: v2alpha1.RuleLevelCluster, + }, ruleGroups, ruler.ExternalLabels()) +} + +func (o *operator) CreateCustomAlertingRule(ctx context.Context, namespace string, + rule *v2alpha1.PostableAlertingRule) error { + ruler, err := o.getThanosRuler() + if err != nil { + return err + } + if ruler == nil { + return v2alpha1.ErrThanosRulerNotEnabled + } + + var ( + level v2alpha1.RuleLevel + ruleResourceLabels = make(map[string]string) + ) + for k, v := range o.thanosRuleResourceLabels { + ruleResourceLabels[k] = v + } + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + expr, err := rules.InjectExprNamespaceLabel(rule.Query, namespace) + if err != nil { + return err + } + rule.Query = expr + } + ruleResourceLabels[customRuleResourceLabelKeyLevel] = string(level) + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return err + } + + extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{customRuleResourceLabelKeyLevel: string(level)}) + resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, extraRuleResourceSelector, rule.Name) + if err != nil { + return err + } + if resourceRule != nil { + return v2alpha1.ErrAlertingRuleAlreadyExists + } + + return ruler.AddAlertingRule(ctx, ruleNamespace, extraRuleResourceSelector, + customRuleGroupDefault, parseToPrometheusRule(rule), ruleResourceLabels) +} + +func (o *operator) UpdateCustomAlertingRule(ctx context.Context, namespace, name string, + rule *v2alpha1.PostableAlertingRule) error { + + rule.Name = name + + ruler, err := o.getThanosRuler() + if err != nil { + return err + } + if ruler == nil { + return v2alpha1.ErrThanosRulerNotEnabled + } + + var ( + level v2alpha1.RuleLevel + ruleResourceLabels = make(map[string]string) + ) + for k, v := range o.thanosRuleResourceLabels { + ruleResourceLabels[k] = v + } + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + expr, err := rules.InjectExprNamespaceLabel(rule.Query, namespace) + if err != nil { + return err + } + rule.Query = expr + } + ruleResourceLabels[customRuleResourceLabelKeyLevel] = string(level) + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return err + } + + extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{customRuleResourceLabelKeyLevel: string(level)}) + resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, extraRuleResourceSelector, rule.Name) + if err != nil { + return err + } + if resourceRule == nil { + return v2alpha1.ErrAlertingRuleNotFound + } + + return ruler.UpdateAlertingRule(ctx, ruleNamespace, extraRuleResourceSelector, + resourceRule.Group, parseToPrometheusRule(rule), ruleResourceLabels) +} + +func (o *operator) DeleteCustomAlertingRule(ctx context.Context, namespace, name string) error { + ruler, err := o.getThanosRuler() + if err != nil { + return err + } + if ruler == nil { + return v2alpha1.ErrThanosRulerNotEnabled + } + + var ( + level v2alpha1.RuleLevel + ) + if namespace == "" { + namespace = rulerNamespace + level = v2alpha1.RuleLevelCluster + } else { + level = v2alpha1.RuleLevelNamespace + } + + ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace) + if err != nil { + return err + } + + extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{customRuleResourceLabelKeyLevel: string(level)}) + resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, extraRuleResourceSelector, name) + if err != nil { + return err + } + if resourceRule == nil { + return v2alpha1.ErrAlertingRuleNotFound + } + + return ruler.DeleteAlertingRule(ctx, ruleNamespace, extraRuleResourceSelector, resourceRule.Group, name) +} + +// getPrometheusRuler gets the cluster-in prometheus +func (o *operator) getPrometheusRuler() (rules.Ruler, error) { + prometheuses, err := o.prometheusInformer.Lister().Prometheuses(rulerNamespace).List(labels.Everything()) + if err != nil { + return nil, errors.Wrap(err, "error listing prometheuses") + } + if len(prometheuses) > 1 { + // It is not supported to have multiple Prometheus instances in the monitoring namespace for now + return nil, errors.Errorf( + "there is more than one prometheus custom resource in %s", rulerNamespace) + } + if len(prometheuses) == 0 { + return nil, nil + } + + return rules.NewPrometheusRuler(prometheuses[0], o.ruleResourceInformer, o.promResourceClient), nil +} + +func (o *operator) getThanosRuler() (rules.Ruler, error) { + thanosrulers, err := o.thanosRulerInformer.Lister().ThanosRulers(rulerNamespace).List(labels.Everything()) + if err != nil { + return nil, errors.Wrap(err, "error listing thanosrulers: ") + } + if len(thanosrulers) > 1 { + // It is not supported to have multiple thanosruler instances in the monitoring namespace for now + return nil, errors.Errorf( + "there is more than one thanosruler custom resource in %s", rulerNamespace) + } + if len(thanosrulers) == 0 { + // if there is no thanos ruler, custom rules will not be supported + return nil, nil + } + + return rules.NewThanosRuler(thanosrulers[0], o.ruleResourceInformer, o.promResourceClient), nil +} + +func parseToPrometheusRule(rule *v2alpha1.PostableAlertingRule) *promresourcesv1.Rule { + return &promresourcesv1.Rule{ + Alert: rule.Name, + Expr: intstr.FromString(rule.Query), + For: rule.Duration, + Labels: rule.Labels, + Annotations: rule.Annotations, + } +} + +func pageAlertingRules(alertingRules []*v2alpha1.GettableAlertingRule, + queryParams *v2alpha1.AlertingRuleQueryParams) *v2alpha1.GettableAlertingRuleList { + + alertingRules = queryParams.Filter(alertingRules) + queryParams.Sort(alertingRules) + + return &v2alpha1.GettableAlertingRuleList{ + Total: len(alertingRules), + Items: queryParams.Sub(alertingRules), + } +} + +func pageAlerts(alertingRules []*v2alpha1.GettableAlertingRule, + queryParams *v2alpha1.AlertQueryParams) *v2alpha1.AlertList { + + var alerts []*v2alpha1.Alert + for _, rule := range alertingRules { + alerts = append(alerts, queryParams.Filter(rule.Alerts)...) + } + queryParams.Sort(alerts) + + return &v2alpha1.AlertList{ + Total: len(alerts), + Items: queryParams.Sub(alerts), + } +} diff --git a/pkg/models/alerting/rules/cache.go b/pkg/models/alerting/rules/cache.go new file mode 100644 index 0000000000000000000000000000000000000000..ed94a5217d9907af650c5dca98f1d7ab5687d6b5 --- /dev/null +++ b/pkg/models/alerting/rules/cache.go @@ -0,0 +1,247 @@ +package rules + +import ( + "sort" + "sync" + + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" + "kubesphere.io/kubesphere/pkg/server/errors" +) + +// RuleCache caches all rules from the prometheusrule custom resources +type RuleCache struct { + lock sync.RWMutex + namespaces map[string]*namespaceRuleCache +} + +func NewRuleCache(ruleResourceInformer prominformersv1.PrometheusRuleInformer) *RuleCache { + rc := RuleCache{ + namespaces: make(map[string]*namespaceRuleCache), + } + + ruleResourceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: rc.addCache, + UpdateFunc: func(oldObj, newObj interface{}) { + rc.addCache(newObj) + }, + DeleteFunc: rc.deleteCache, + }) + return &rc +} + +func (c *RuleCache) addCache(referObj interface{}) { + pr, ok := referObj.(*promresourcesv1.PrometheusRule) + if !ok { + return + } + cr := parseRuleResource(pr) + + c.lock.Lock() + defer c.lock.Unlock() + + cn, ok := c.namespaces[pr.Namespace] + if !ok || cn == nil { + cn = &namespaceRuleCache{ + namespace: pr.Namespace, + resources: make(map[string]*resourceRuleCache), + } + c.namespaces[pr.Namespace] = cn + } + cn.resources[pr.Name] = cr +} + +func (c *RuleCache) deleteCache(referObj interface{}) { + pr, ok := referObj.(*promresourcesv1.PrometheusRule) + if !ok { + return + } + + c.lock.Lock() + defer c.lock.Unlock() + + cn, ok := c.namespaces[pr.Namespace] + if !ok { + return + } + delete(cn.resources, pr.Name) + if len(cn.resources) == 0 { + delete(c.namespaces, pr.Namespace) + } +} + +func (c *RuleCache) getResourceRuleCaches(ruler Ruler, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector) (map[string]*resourceRuleCache, error) { + + selected, err := ruleNamespaceSelected(ruler, ruleNamespace) + if err != nil { + return nil, err + } + if !selected { + return nil, nil + } + rSelector, err := ruler.RuleResourceSelector(extraRuleResourceSelector) + if err != nil { + return nil, err + } + var m = make(map[string]*resourceRuleCache) + + c.lock.RLock() + defer c.lock.RUnlock() + + cn, ok := c.namespaces[ruleNamespace.Name] + if ok && cn != nil { + for _, cr := range cn.resources { + if rSelector.Matches(labels.Set(cr.Labels)) { + m[cr.Name] = cr + } + } + } + return m, nil +} + +func (c *RuleCache) GetRule(ruler Ruler, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, idOrName string) (*ResourceRuleItem, error) { + + caches, err := c.getResourceRuleCaches(ruler, ruleNamespace, extraRuleResourceSelector) + if err != nil { + return nil, err + } + if len(caches) == 0 { + return nil, nil + } + + var rules []*ResourceRuleItem + switch ruler.(type) { + case *PrometheusRuler: + for rn, rc := range caches { + if rule, ok := rc.IdRules[idOrName]; ok { + rules = append(rules, &ResourceRuleItem{ + Group: rule.Group, + Id: rule.Id, + Rule: rule.Rule.DeepCopy(), + ResourceName: rn, + }) + } + } + case *ThanosRuler: + for rn, rc := range caches { + if nrules, ok := rc.NameRules[idOrName]; ok { + for _, nrule := range nrules { + rules = append(rules, &ResourceRuleItem{ + Group: nrule.Group, + Id: nrule.Id, + Rule: nrule.Rule.DeepCopy(), + ResourceName: rn, + }) + } + } + } + default: + return nil, errors.New("unsupported ruler type") + } + + if l := len(rules); l == 0 { + return nil, nil + } else if l > 1 { + // guarantees the stability of the get operations. + sort.Slice(rules, func(i, j int) bool { + return v2alpha1.AlertingRuleIdCompare(rules[i].Id, rules[j].Id) + }) + } + return rules[0], nil +} + +func (c *RuleCache) ListRules(ruler Ruler, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector) (map[string]*ResourceRuleCollection, error) { + + caches, err := c.getResourceRuleCaches(ruler, ruleNamespace, extraRuleResourceSelector) + if err != nil { + return nil, err + } + if len(caches) == 0 { + return nil, nil + } + + ret := make(map[string]*ResourceRuleCollection) + for rn, rc := range caches { + rrs := &ResourceRuleCollection{ + GroupSet: make(map[string]struct{}), + IdRules: make(map[string]*ResourceRuleItem), + NameRules: make(map[string][]*ResourceRuleItem), + } + for name, rules := range rc.NameRules { + for _, rule := range rules { + rrs.GroupSet[rule.Group] = struct{}{} + rr := &ResourceRuleItem{ + Group: rule.Group, + Id: rule.Id, + Rule: rule.Rule.DeepCopy(), + ResourceName: rn, + } + rrs.IdRules[rr.Id] = rr + rrs.NameRules[name] = append(rrs.NameRules[name], rr) + } + } + if len(rrs.IdRules) > 0 { + ret[rn] = rrs + } + } + + return ret, nil +} + +type namespaceRuleCache struct { + namespace string + resources map[string]*resourceRuleCache +} + +type resourceRuleCache struct { + Name string + Labels map[string]string + GroupSet map[string]struct{} + IdRules map[string]*cacheRule + NameRules map[string][]*cacheRule +} + +type cacheRule struct { + Group string + Id string + Rule *promresourcesv1.Rule +} + +func parseRuleResource(pr *promresourcesv1.PrometheusRule) *resourceRuleCache { + var ( + groupSet = make(map[string]struct{}) + idRules = make(map[string]*cacheRule) + nameRules = make(map[string][]*cacheRule) + ) + for i := 0; i < len(pr.Spec.Groups); i++ { + g := pr.Spec.Groups[i] + for j := 0; j < len(g.Rules); j++ { + gr := g.Rules[j] + if gr.Alert == "" { + continue + } + groupSet[g.Name] = struct{}{} + cr := &cacheRule{ + Group: g.Name, + Id: GenResourceRuleIdIgnoreFormat(g.Name, &gr), + Rule: &gr, + } + nameRules[cr.Rule.Alert] = append(nameRules[cr.Rule.Alert], cr) + idRules[cr.Id] = cr + } + } + return &resourceRuleCache{ + Name: pr.Name, + Labels: pr.Labels, + GroupSet: groupSet, + IdRules: idRules, + NameRules: nameRules, + } +} diff --git a/pkg/models/alerting/rules/rule.go b/pkg/models/alerting/rules/rule.go new file mode 100644 index 0000000000000000000000000000000000000000..6524187e8c2d32fab4a2f58d67d2677f62e06dc1 --- /dev/null +++ b/pkg/models/alerting/rules/rule.go @@ -0,0 +1,31 @@ +package rules + +import ( + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" +) + +type ResourceRuleCollection struct { + GroupSet map[string]struct{} + IdRules map[string]*ResourceRuleItem + NameRules map[string][]*ResourceRuleItem +} + +type ResourceRuleItem struct { + ResourceName string + Group string + Id string + Rule *promresourcesv1.Rule +} + +type ResourceRule struct { + Level v2alpha1.RuleLevel + Custom bool + ResourceRuleItem +} + +type ResourceRuleChunk struct { + Level v2alpha1.RuleLevel + Custom bool + ResourceRulesMap map[string]*ResourceRuleCollection +} diff --git a/pkg/models/alerting/rules/ruler.go b/pkg/models/alerting/rules/ruler.go new file mode 100644 index 0000000000000000000000000000000000000000..887a6e899faedb1956bce7bf0dad7886c6da31f3 --- /dev/null +++ b/pkg/models/alerting/rules/ruler.go @@ -0,0 +1,501 @@ +package rules + +import ( + "context" + "fmt" + "sort" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" +) + +const ( + customAlertingRuleResourcePrefix = "custom-alerting-rule-" +) + +var ( + maxSecretSize = corev1.MaxSecretSize + maxConfigMapDataSize = int(float64(maxSecretSize) * 0.45) + + errOutOfConfigMapSize = errors.New("out of config map size") +) + +type Ruler interface { + Namespace() string + RuleResourceNamespaceSelector() (labels.Selector, error) + RuleResourceSelector(extraRuleResourceSelector labels.Selector) (labels.Selector, error) + ExternalLabels() func() map[string]string + + ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector) ( + []*promresourcesv1.PrometheusRule, error) + + AddAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error + UpdateAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error + DeleteAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector, + group string, name string) error +} + +type ruleResource promresourcesv1.PrometheusRule + +// deleteAlertingRule deletes the rules with the given name. +// If the rule is deleted, return true to indicate the resource should be updated. +func (r *ruleResource) deleteAlertingRule(name string) (bool, error) { + var ( + nGroups []promresourcesv1.RuleGroup + ok bool + ) + + for _, g := range r.Spec.Groups { + var rules []promresourcesv1.Rule + for _, gr := range g.Rules { + if gr.Alert != "" && gr.Alert == name { + ok = true + continue + } + rules = append(rules, gr) + } + if len(rules) > 0 { + nGroups = append(nGroups, promresourcesv1.RuleGroup{ + Name: g.Name, + Interval: g.Interval, + PartialResponseStrategy: g.PartialResponseStrategy, + Rules: rules, + }) + } + } + + if ok { + r.Spec.Groups = nGroups + } + return ok, nil +} + +// updateAlertingRule updates the rule with the given group. +// If the rule is updated, return true to indicate the resource should be updated. +func (r *ruleResource) updateAlertingRule(groupName string, rule *promresourcesv1.Rule) (bool, error) { + var ( + ok bool + pr = (promresourcesv1.PrometheusRule)(*r) + npr = pr.DeepCopy() + groupMap = make(map[string]*promresourcesv1.RuleGroup) + ) + + for _, g := range npr.Spec.Groups { + var rules []promresourcesv1.Rule + for i, gr := range g.Rules { + if gr.Alert != "" && gr.Alert == rule.Alert { + ok = true + continue + } + rules = append(rules, g.Rules[i]) + } + if len(rules) > 0 { + groupMap[g.Name] = &promresourcesv1.RuleGroup{ + Name: g.Name, + Interval: g.Interval, + PartialResponseStrategy: g.PartialResponseStrategy, + Rules: rules, + } + } + } + + if ok { + if g, exist := groupMap[groupName]; exist { + g.Rules = append(g.Rules, *rule) + } else { + groupMap[groupName] = &promresourcesv1.RuleGroup{ + Name: groupName, + Rules: []promresourcesv1.Rule{*rule}, + } + } + + var groups []promresourcesv1.RuleGroup + for _, g := range groupMap { + groups = append(groups, *g) + } + + npr.Spec.Groups = groups + content, err := yaml.Marshal(npr.Spec) + if err != nil { + return false, errors.Wrap(err, "failed to unmarshal content") + } + + if len(string(content)) < maxConfigMapDataSize { // check size limit + r.Spec.Groups = groups + return true, nil + } + return false, errOutOfConfigMapSize + } + return false, nil +} + +func (r *ruleResource) addAlertingRule(group string, rule *promresourcesv1.Rule) (bool, error) { + var ( + err error + pr = (promresourcesv1.PrometheusRule)(*r) + npr = pr.DeepCopy() + ok bool + ) + + for i := 0; i < len(npr.Spec.Groups); i++ { + if npr.Spec.Groups[i].Name == group { + npr.Spec.Groups[i].Rules = append(npr.Spec.Groups[i].Rules, *rule) + ok = true + break + } + } + if !ok { // add a group when there is no group with the specified group name + npr.Spec.Groups = append(npr.Spec.Groups, promresourcesv1.RuleGroup{ + Name: group, + Rules: []promresourcesv1.Rule{*rule}, + }) + } + + content, err := yaml.Marshal(npr.Spec) + if err != nil { + return false, errors.Wrap(err, "failed to unmarshal content") + } + + if len(string(content)) < maxConfigMapDataSize { // check size limit + r.Spec.Groups = npr.Spec.Groups + return true, nil + } else { + return false, errOutOfConfigMapSize + } +} + +func (r *ruleResource) commit(ctx context.Context, prometheusResourceClient promresourcesclient.Interface) error { + var pr = (promresourcesv1.PrometheusRule)(*r) + if len(pr.Spec.Groups) == 0 { + return prometheusResourceClient.MonitoringV1().PrometheusRules(r.Namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}) + } + newPr, err := prometheusResourceClient.MonitoringV1().PrometheusRules(r.Namespace).Update(ctx, &pr, metav1.UpdateOptions{}) + if err != nil { + return err + } + newPr.DeepCopyInto(&pr) + return nil +} + +type PrometheusRuler struct { + resource *promresourcesv1.Prometheus + informer prominformersv1.PrometheusRuleInformer + client promresourcesclient.Interface +} + +func NewPrometheusRuler(resource *promresourcesv1.Prometheus, informer prominformersv1.PrometheusRuleInformer, + client promresourcesclient.Interface) Ruler { + return &PrometheusRuler{ + resource: resource, + informer: informer, + client: client, + } +} + +func (r *PrometheusRuler) Namespace() string { + return r.resource.Namespace +} + +func (r *PrometheusRuler) RuleResourceNamespaceSelector() (labels.Selector, error) { + if r.resource.Spec.RuleNamespaceSelector == nil { + return nil, nil + } + return metav1.LabelSelectorAsSelector(r.resource.Spec.RuleNamespaceSelector) +} + +func (r *PrometheusRuler) RuleResourceSelector(extraRuleResourceSelector labels.Selector) (labels.Selector, error) { + rSelector, err := metav1.LabelSelectorAsSelector(r.resource.Spec.RuleSelector) + if err != nil { + return nil, err + } + if extraRuleResourceSelector != nil { + if requirements, ok := extraRuleResourceSelector.Requirements(); ok { + rSelector = rSelector.Add(requirements...) + } + } + return rSelector, nil +} + +func (r *PrometheusRuler) ExternalLabels() func() map[string]string { + // ignoring the external labels because rules gotten from prometheus endpoint do not include them + return nil +} + +func (r *PrometheusRuler) ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector) ( + []*promresourcesv1.PrometheusRule, error) { + selected, err := ruleNamespaceSelected(r, ruleNamespace) + if err != nil { + return nil, err + } + if !selected { + return nil, nil + } + rSelector, err := r.RuleResourceSelector(extraRuleResourceSelector) + if err != nil { + return nil, err + } + return r.informer.Lister().PrometheusRules(ruleNamespace.Name).List(rSelector) +} + +func (r *PrometheusRuler) AddAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error { + return errors.New("not supported to add rules for prometheus") +} + +func (r *PrometheusRuler) UpdateAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error { + return errors.New("not supported to update rules for prometheus") +} + +func (r *PrometheusRuler) DeleteAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, + group string, name string) error { + return errors.New("not supported to update rules for prometheus") +} + +type ThanosRuler struct { + resource *promresourcesv1.ThanosRuler + informer prominformersv1.PrometheusRuleInformer + client promresourcesclient.Interface +} + +func NewThanosRuler(resource *promresourcesv1.ThanosRuler, informer prominformersv1.PrometheusRuleInformer, + client promresourcesclient.Interface) Ruler { + return &ThanosRuler{ + resource: resource, + informer: informer, + client: client, + } +} + +func (r *ThanosRuler) Namespace() string { + return r.resource.Namespace +} + +func (r *ThanosRuler) RuleResourceNamespaceSelector() (labels.Selector, error) { + if r.resource.Spec.RuleNamespaceSelector == nil { + return nil, nil + } + return metav1.LabelSelectorAsSelector(r.resource.Spec.RuleNamespaceSelector) +} + +func (r *ThanosRuler) RuleResourceSelector(extraRuleSelector labels.Selector) (labels.Selector, error) { + rSelector, err := metav1.LabelSelectorAsSelector(r.resource.Spec.RuleSelector) + if err != nil { + return nil, err + } + if extraRuleSelector != nil { + if requirements, ok := extraRuleSelector.Requirements(); ok { + rSelector = rSelector.Add(requirements...) + } + } + return rSelector, nil +} + +func (r *ThanosRuler) ExternalLabels() func() map[string]string { + // rules gotten from thanos ruler endpoint include the labels + lbls := make(map[string]string) + if ls := r.resource.Spec.Labels; ls != nil { + for k, v := range ls { + lbls[k] = v + } + } + return func() map[string]string { + return lbls + } +} + +func (r *ThanosRuler) ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleSelector labels.Selector) ( + []*promresourcesv1.PrometheusRule, error) { + selected, err := ruleNamespaceSelected(r, ruleNamespace) + if err != nil { + return nil, err + } + if !selected { + return nil, nil + } + rSelector, err := r.RuleResourceSelector(extraRuleSelector) + if err != nil { + return nil, err + } + return r.informer.Lister().PrometheusRules(ruleNamespace.Name).List(rSelector) +} + +func (r *ThanosRuler) AddAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error { + + prometheusRules, err := r.ListRuleResources(ruleNamespace, extraRuleResourceSelector) + if err != nil { + return err + } + + return r.addAlertingRule(ctx, ruleNamespace, prometheusRules, nil, group, rule, ruleResourceLabels) +} + +func (r *ThanosRuler) addAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + prometheusRules []*promresourcesv1.PrometheusRule, excludeRuleResources map[string]*ruleResource, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error { + + sort.Slice(prometheusRules, func(i, j int) bool { + return len(fmt.Sprint(prometheusRules[i])) <= len(fmt.Sprint(prometheusRules[j])) + }) + + for _, prometheusRule := range prometheusRules { + if len(excludeRuleResources) > 0 { + if _, ok := excludeRuleResources[prometheusRule.Name]; ok { + continue + } + } + resource := ruleResource(*prometheusRule) + if ok, err := resource.addAlertingRule(group, rule); err != nil { + if err == errOutOfConfigMapSize { + break + } + return err + } else if ok { + if err = resource.commit(ctx, r.client); err != nil { + return err + } + return nil + } + } + // create a new rule resource and add rule into it when all existing rule resources are full. + newPromRule := promresourcesv1.PrometheusRule{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ruleNamespace.Name, + GenerateName: customAlertingRuleResourcePrefix, + Labels: ruleResourceLabels, + }, + Spec: promresourcesv1.PrometheusRuleSpec{ + Groups: []promresourcesv1.RuleGroup{{ + Name: group, + Rules: []promresourcesv1.Rule{*rule}, + }}, + }, + } + if _, err := r.client.MonitoringV1(). + PrometheusRules(ruleNamespace.Name).Create(ctx, &newPromRule, metav1.CreateOptions{}); err != nil { + return errors.Wrapf(err, "error creating a prometheusrule resource %s/%s", + newPromRule.Namespace, newPromRule.Name) + } + return nil +} + +func (r *ThanosRuler) UpdateAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, + group string, rule *promresourcesv1.Rule, ruleResourceLabels map[string]string) error { + + prometheusRules, err := r.ListRuleResources(ruleNamespace, extraRuleResourceSelector) + if err != nil { + return err + } + + var ( + found bool + success bool + resourcesToDelRule = make(map[string]*ruleResource) + ) + for _, prometheusRule := range prometheusRules { + resource := ruleResource(*prometheusRule) + if success { // If the update has been successful, delete the possible same rule in other resources + if ok, err := resource.deleteAlertingRule(rule.Alert); err != nil { + return err + } else if ok { + if err = resource.commit(ctx, r.client); err != nil { + return err + } + } + continue + } + if ok, err := resource.updateAlertingRule(group, rule); err != nil { + if err == errOutOfConfigMapSize { + // updating the rule in the resource will oversize the size limit, + // so delete it and then add the new rule to a new resource. + resourcesToDelRule[resource.Name] = &resource + found = true + } else { + return err + } + } else if ok { + if err = resource.commit(ctx, r.client); err != nil { + return err + } + found = true + success = true + } + } + + if !found { + return v2alpha1.ErrAlertingRuleNotFound + } + + if !success { + err := r.addAlertingRule(ctx, ruleNamespace, prometheusRules, resourcesToDelRule, group, rule, ruleResourceLabels) + if err != nil { + return err + } + } + for _, resource := range resourcesToDelRule { + if ok, err := resource.deleteAlertingRule(rule.Alert); err != nil { + return err + } else if ok { + if err = resource.commit(ctx, r.client); err != nil { + return err + } + } + } + return nil +} + +func (r *ThanosRuler) DeleteAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace, + extraRuleResourceSelector labels.Selector, group string, name string) error { + prometheusRules, err := r.ListRuleResources(ruleNamespace, extraRuleResourceSelector) + if err != nil { + return err + } + var success bool + for _, prometheusRule := range prometheusRules { + resource := ruleResource(*prometheusRule) + if ok, err := resource.deleteAlertingRule(name); err != nil { + return err + } else if ok { + if err = resource.commit(ctx, r.client); err != nil { + return err + } + success = true + } + } + if !success { + return v2alpha1.ErrAlertingRuleNotFound + } + return nil +} + +func ruleNamespaceSelected(r Ruler, ruleNamespace *corev1.Namespace) (bool, error) { + rnSelector, err := r.RuleResourceNamespaceSelector() + if err != nil { + return false, err + } + if rnSelector == nil { // refer to the comment of Prometheus.Spec.RuleResourceNamespaceSelector + if r.Namespace() != ruleNamespace.Name { + return false, nil + } + } else { + if !rnSelector.Matches(labels.Set(ruleNamespace.Labels)) { + return false, nil + } + } + return true, nil +} diff --git a/pkg/models/alerting/rules/utils.go b/pkg/models/alerting/rules/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..b22f1ca9a781e4cf90bb40f73fede00000bc5f1d --- /dev/null +++ b/pkg/models/alerting/rules/utils.go @@ -0,0 +1,381 @@ +package rules + +import ( + "kubesphere.io/kubesphere/pkg/simple/client/alerting" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus-community/prom-label-proxy/injectproxy" + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + prommodel "github.com/prometheus/common/model" + promlabels "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/rules" + "k8s.io/klog" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" +) + +const ( + ErrGenRuleId = "error generating rule id" + + LabelKeyInternalRuleGroup = "__rule_group__" + LabelKeyInternalRuleName = "__rule_name__" + LabelKeyInternalRuleQuery = "__rule_query__" + LabelKeyInternalRuleDuration = "__rule_duration__" +) + +func FormatExpr(expr string) (string, error) { + parsedExpr, err := parser.ParseExpr(expr) + if err == nil { + return parsedExpr.String(), nil + } + return "", errors.Wrapf(err, "failed to parse expr: %s", expr) +} + +// InjectExprNamespaceLabel injects an label, whose key is "namespace" and whose value is the given namespace, +// into the prometheus query expression, which will limit the query scope. +func InjectExprNamespaceLabel(expr, namespace string) (string, error) { + parsedExpr, err := parser.ParseExpr(expr) + if err != nil { + return "", err + } + if err = injectproxy.NewEnforcer(&promlabels.Matcher{ + Type: promlabels.MatchEqual, + Name: "namespace", + Value: namespace, + }).EnforceNode(parsedExpr); err == nil { + return parsedExpr.String(), nil + } + return "", err +} + +func FormatDuration(for_ string) (string, error) { + var duration prommodel.Duration + var err error + if for_ != "" { + duration, err = prommodel.ParseDuration(for_) + if err != nil { + return "", errors.Wrapf(err, "failed to parse Duration string(\"%s\") to time.Duration", for_) + } + } + return duration.String(), nil +} + +func parseDurationSeconds(durationSeconds float64) string { + return prommodel.Duration(int64(durationSeconds * float64(time.Second))).String() +} + +func GenResourceRuleIdIgnoreFormat(group string, rule *promresourcesv1.Rule) string { + query, err := FormatExpr(rule.Expr.String()) + if err != nil { + klog.Warning(errors.Wrapf(err, "invalid alerting rule(%s)", rule.Alert)) + query = rule.Expr.String() + } + duration, err := FormatDuration(rule.For) + if err != nil { + klog.Warning(errors.Wrapf(err, "invalid alerting rule(%s)", rule.Alert)) + duration = rule.For + } + + lbls := make(map[string]string) + for k, v := range rule.Labels { + lbls[k] = v + } + lbls[LabelKeyInternalRuleGroup] = group + lbls[LabelKeyInternalRuleName] = rule.Alert + lbls[LabelKeyInternalRuleQuery] = query + lbls[LabelKeyInternalRuleDuration] = duration + + return prommodel.Fingerprint(prommodel.LabelsToSignature(lbls)).String() +} + +func GenEndpointRuleId(group string, epRule *alerting.AlertingRule, + externalLabels func() map[string]string) (string, error) { + query, err := FormatExpr(epRule.Query) + if err != nil { + return "", err + } + duration := parseDurationSeconds(epRule.Duration) + + var labelsMap map[string]string + if externalLabels == nil { + labelsMap = epRule.Labels + } else { + labelsMap = make(map[string]string) + extLabels := externalLabels() + for key, value := range epRule.Labels { + if v, ok := extLabels[key]; !(ok && value == v) { + labelsMap[key] = value + } + } + } + + lbls := make(map[string]string) + for k, v := range labelsMap { + lbls[k] = v + } + lbls[LabelKeyInternalRuleGroup] = group + lbls[LabelKeyInternalRuleName] = epRule.Name + lbls[LabelKeyInternalRuleQuery] = query + lbls[LabelKeyInternalRuleDuration] = duration + + return prommodel.Fingerprint(prommodel.LabelsToSignature(lbls)).String(), nil +} + +// GetAlertingRulesStatus mix rules from prometheusrule custom resources and rules from endpoints. +// Use rules from prometheusrule custom resources as the main reference. +func GetAlertingRulesStatus(ruleNamespace string, ruleChunk *ResourceRuleChunk, epRuleGroups []*alerting.RuleGroup, + extLabels func() map[string]string) ([]*v2alpha1.GettableAlertingRule, error) { + + var ( + idEpRules = make(map[string]*alerting.AlertingRule) + nameIds = make(map[string][]string) + ret []*v2alpha1.GettableAlertingRule + ) + for _, group := range epRuleGroups { + fileShort := strings.TrimSuffix(filepath.Base(group.File), filepath.Ext(group.File)) + if !strings.HasPrefix(fileShort, ruleNamespace+"-") { + continue + } + resourceRules, ok := ruleChunk.ResourceRulesMap[strings.TrimPrefix(fileShort, ruleNamespace+"-")] + if !ok { + continue + } + if _, ok := resourceRules.GroupSet[group.Name]; !ok { + continue + } + + for _, epRule := range group.Rules { + if eid, err := GenEndpointRuleId(group.Name, epRule, extLabels); err != nil { + return nil, errors.Wrap(err, ErrGenRuleId) + } else { + idEpRules[eid] = epRule + nameIds[epRule.Name] = append(nameIds[epRule.Name], eid) + } + } + } + if ruleChunk.Custom { + // guarantee the names of the custom alerting rules not to be repeated + var m = make(map[string][]*ResourceRuleItem) + for _, resourceRules := range ruleChunk.ResourceRulesMap { + for name, rrArr := range resourceRules.NameRules { + m[name] = append(m[name], rrArr...) + } + } + for _, rrArr := range m { + if l := len(rrArr); l > 0 { + if l > 1 { + sort.Slice(rrArr, func(i, j int) bool { + return v2alpha1.AlertingRuleIdCompare(rrArr[i].Id, rrArr[j].Id) + }) + } + resRule := rrArr[0] + epRule := idEpRules[resRule.Id] + if r := getAlertingRuleStatus(resRule, epRule, ruleChunk.Custom, ruleChunk.Level); r != nil { + ret = append(ret, r) + } + } + } + } else { + // guarantee the ids of the builtin alerting rules not to be repeated + var m = make(map[string]*v2alpha1.GettableAlertingRule) + for _, resourceRules := range ruleChunk.ResourceRulesMap { + for id, rule := range resourceRules.IdRules { + if r := getAlertingRuleStatus(rule, idEpRules[id], ruleChunk.Custom, ruleChunk.Level); r != nil { + m[id] = r + } + } + } + for _, r := range m { + ret = append(ret, r) + } + } + + return ret, nil +} + +func GetAlertingRuleStatus(ruleNamespace string, rule *ResourceRule, epRuleGroups []*alerting.RuleGroup, + extLabels func() map[string]string) (*v2alpha1.GettableAlertingRule, error) { + + if rule == nil || rule.Rule == nil { + return nil, nil + } + + var epRules = make(map[string]*alerting.AlertingRule) + for _, group := range epRuleGroups { + fileShort := strings.TrimSuffix(filepath.Base(group.File), filepath.Ext(group.File)) + if !strings.HasPrefix(fileShort, ruleNamespace+"-") { + continue + } + if strings.TrimPrefix(fileShort, ruleNamespace+"-") != rule.ResourceName { + continue + } + + for _, epRule := range group.Rules { + if eid, err := GenEndpointRuleId(group.Name, epRule, extLabels); err != nil { + return nil, errors.Wrap(err, ErrGenRuleId) + } else { + if rule.Rule.Alert == epRule.Name { + epRules[eid] = epRule + } + } + } + } + var epRule *alerting.AlertingRule + if rule.Custom { + // guarantees the stability of the get operations. + var ids []string + for k, _ := range epRules { + ids = append(ids, k) + } + if l := len(ids); l > 0 { + if l > 1 { + sort.Slice(ids, func(i, j int) bool { + return v2alpha1.AlertingRuleIdCompare(ids[i], ids[j]) + }) + } + epRule = epRules[ids[0]] + } + } else { + epRule = epRules[rule.Id] + } + + return getAlertingRuleStatus(&rule.ResourceRuleItem, epRule, rule.Custom, rule.Level), nil +} + +func getAlertingRuleStatus(resRule *ResourceRuleItem, epRule *alerting.AlertingRule, + custom bool, level v2alpha1.RuleLevel) *v2alpha1.GettableAlertingRule { + + if resRule == nil || resRule.Rule == nil { + return nil + } + + rule := v2alpha1.GettableAlertingRule{ + AlertingRule: v2alpha1.AlertingRule{ + Id: resRule.Id, + Name: resRule.Rule.Alert, + Query: resRule.Rule.Expr.String(), + Duration: resRule.Rule.For, + Labels: resRule.Rule.Labels, + Annotations: resRule.Rule.Annotations, + }, + State: stateInactiveString, + Health: string(rules.HealthUnknown), + } + + if epRule != nil { + // The state information and alerts associated with the rule are from the rule from the endpoint. + if epRule.Health != "" { + rule.Health = epRule.Health + } + rule.LastError = epRule.LastError + rule.LastEvaluation = epRule.LastEvaluation + rule.EvaluationDurationSeconds = epRule.EvaluationTime + + rState := strings.ToLower(epRule.State) + cliRuleStateEmpty := rState == "" + if !cliRuleStateEmpty { + rule.State = rState + } + for _, a := range epRule.Alerts { + aState := strings.ToLower(a.State) + if cliRuleStateEmpty { + // for the rules gotten from prometheus or thanos ruler with a lower version, they may not contain + // the state property, so compute the rule state by states of its alerts + if alertState(rState) < alertState(aState) { + rule.State = aState + } + } + rule.Alerts = append(rule.Alerts, &v2alpha1.Alert{ + ActiveAt: a.ActiveAt, + Labels: a.Labels, + Annotations: a.Annotations, + State: aState, + Value: a.Value, + + RuleId: rule.Id, + RuleName: rule.Name, + }) + } + } + return &rule +} + +func ParseAlertingRules(epRuleGroups []*alerting.RuleGroup, custom bool, level v2alpha1.RuleLevel, + filterFunc func(group, ruleId string, rule *alerting.AlertingRule) bool) ([]*v2alpha1.GettableAlertingRule, error) { + + var ret []*v2alpha1.GettableAlertingRule + for _, g := range epRuleGroups { + for _, r := range g.Rules { + id, err := GenEndpointRuleId(g.Name, r, nil) + if err != nil { + return nil, err + } + if filterFunc(g.Name, id, r) { + rule := &v2alpha1.GettableAlertingRule{ + AlertingRule: v2alpha1.AlertingRule{ + Id: id, + Name: r.Name, + Query: r.Query, + Duration: parseDurationSeconds(r.Duration), + Labels: r.Labels, + Annotations: r.Annotations, + }, + State: r.State, + Health: string(r.Health), + LastError: r.LastError, + LastEvaluation: r.LastEvaluation, + EvaluationDurationSeconds: r.EvaluationTime, + } + if rule.Health != "" { + rule.Health = string(rules.HealthUnknown) + } + ruleStateEmpty := rule.State == "" + rule.State = stateInactiveString + for _, a := range r.Alerts { + aState := strings.ToLower(a.State) + if ruleStateEmpty { + // for the rules gotten from prometheus or thanos ruler with a lower version, they may not contain + // the state property, so compute the rule state by states of its alerts + if alertState(rule.State) < alertState(aState) { + rule.State = aState + } + } + rule.Alerts = append(rule.Alerts, &v2alpha1.Alert{ + ActiveAt: a.ActiveAt, + Labels: a.Labels, + Annotations: a.Annotations, + State: aState, + Value: a.Value, + + RuleId: rule.Id, + RuleName: rule.Name, + }) + } + ret = append(ret, rule) + } + } + } + return ret, nil +} + +var ( + statePendingString = rules.StatePending.String() + stateFiringString = rules.StateFiring.String() + stateInactiveString = rules.StateInactive.String() +) + +func alertState(state string) rules.AlertState { + switch state { + case statePendingString: + return rules.StatePending + case stateFiringString: + return rules.StateFiring + case stateInactiveString: + return rules.StateInactive + } + return rules.StateInactive +} diff --git a/pkg/models/alerting/rules/utils_test.go b/pkg/models/alerting/rules/utils_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8a18871708e2c42648a2a440cd2936559d63454e --- /dev/null +++ b/pkg/models/alerting/rules/utils_test.go @@ -0,0 +1,92 @@ +package rules + +import ( + "kubesphere.io/kubesphere/pkg/simple/client/alerting" + "testing" + + "github.com/google/go-cmp/cmp" + promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/prometheus/prometheus/rules" + "k8s.io/apimachinery/pkg/util/intstr" + "kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1" +) + +func TestGetAlertingRulesStatus(t *testing.T) { + var tests = []struct { + description string + ruleNamespace string + resourceRuleChunk *ResourceRuleChunk + ruleGroups []*alerting.RuleGroup + extLabels func() map[string]string + expected []*v2alpha1.GettableAlertingRule + }{{ + description: "get alerting rules status", + ruleNamespace: "test", + resourceRuleChunk: &ResourceRuleChunk{ + Level: v2alpha1.RuleLevelNamespace, + Custom: true, + ResourceRulesMap: map[string]*ResourceRuleCollection{ + "custom-alerting-rule-jqbgn": &ResourceRuleCollection{ + GroupSet: map[string]struct{}{"alerting.custom.defaults": struct{}{}}, + NameRules: map[string][]*ResourceRuleItem{ + "ca7f09e76954e67c": []*ResourceRuleItem{{ + ResourceName: "custom-alerting-rule-jqbgn", + Group: "alerting.custom.defaults", + Id: "ca7f09e76954e67c", + Rule: &promresourcesv1.Rule{ + Alert: "TestCPUUsageHigh", + Expr: intstr.FromString(`namespace:workload_cpu_usage:sum{namespace="test"} > 1`), + For: "1m", + Annotations: map[string]string{ + "alias": "The alias is here", + "description": "The description is here", + }, + }, + }}, + }, + }, + }, + }, + ruleGroups: []*alerting.RuleGroup{{ + Name: "alerting.custom.defaults", + File: "/etc/thanos/rules/thanos-ruler-thanos-ruler-rulefiles-0/test-custom-alerting-rule-jqbgn.yaml", + Rules: []*alerting.AlertingRule{{ + Name: "TestCPUUsageHigh", + Query: `namespace:workload_cpu_usage:sum{namespace="test"} > 1`, + Duration: 60, + Health: string(rules.HealthGood), + State: stateInactiveString, + Annotations: map[string]string{ + "alias": "The alias is here", + "description": "The description is here", + }, + }}, + }}, + expected: []*v2alpha1.GettableAlertingRule{{ + AlertingRule: v2alpha1.AlertingRule{ + Id: "ca7f09e76954e67c", + Name: "TestCPUUsageHigh", + Query: `namespace:workload_cpu_usage:sum{namespace="test"} > 1`, + Duration: "1m", + Annotations: map[string]string{ + "alias": "The alias is here", + "description": "The description is here", + }, + }, + Health: string(rules.HealthGood), + State: stateInactiveString, + }}, + }} + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + rules, err := GetAlertingRulesStatus(test.ruleNamespace, test.resourceRuleChunk, test.ruleGroups, test.extLabels) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(rules, test.expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", test.expected, diff) + } + }) + } +} diff --git a/pkg/models/resources/v1alpha2/resource/resource_test.go b/pkg/models/resources/v1alpha2/resource/resource_test.go index a9ab102d0b24219261b7908f49dc76d7f5b12d11..6fc19b930433de080a2e765a69173121861d29ee 100644 --- a/pkg/models/resources/v1alpha2/resource/resource_test.go +++ b/pkg/models/resources/v1alpha2/resource/resource_test.go @@ -213,7 +213,7 @@ func prepare() (informers.InformerFactory, error) { k8sClient := fakek8s.NewSimpleClientset() istioClient := fakeistio.NewSimpleClientset() snapshotClient := fakesnapshot.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, nil, nil) k8sInformerFactory := fakeInformerFactory.KubernetesSharedInformerFactory() diff --git a/pkg/models/resources/v1alpha3/resource/resource_test.go b/pkg/models/resources/v1alpha3/resource/resource_test.go index 8502ce92d695b69eb638ee4575912107fbfa590c..ac63337440bc61ee78e516149d9d16eaf5831112 100644 --- a/pkg/models/resources/v1alpha3/resource/resource_test.go +++ b/pkg/models/resources/v1alpha3/resource/resource_test.go @@ -108,7 +108,7 @@ func prepare() *ResourceGetter { istioClient := fakeistio.NewSimpleClientset() snapshotClient := fakesnapshot.NewSimpleClientset() apiextensionsClient := fakeapiextensions.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, apiextensionsClient) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, snapshotClient, apiextensionsClient, nil) for _, namespace := range namespaces { fakeInformerFactory.KubernetesSharedInformerFactory().Core().V1(). diff --git a/pkg/models/tenant/tenent_test.go b/pkg/models/tenant/tenent_test.go index b99bdadbb4b8fb0c2d78ca07510fcc03e667a71b..d2a6311af1135bf2d02d404fcbf3b6cad7f10a08 100644 --- a/pkg/models/tenant/tenent_test.go +++ b/pkg/models/tenant/tenent_test.go @@ -491,7 +491,7 @@ func prepare() Interface { ksClient := fakeks.NewSimpleClientset([]runtime.Object{testWorkspace, systemWorkspace}...) k8sClient := fakek8s.NewSimpleClientset([]runtime.Object{testNamespace, kubesphereSystem}...) istioClient := fakeistio.NewSimpleClientset() - fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, nil, nil) + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, nil, nil, nil) for _, workspace := range workspaces { fakeInformerFactory.KubeSphereSharedInformerFactory().Tenant().V1alpha1(). diff --git a/pkg/simple/client/alerting/options.go b/pkg/simple/client/alerting/options.go index 2ab11da2d133e6b9a9c1e55362f15ee3a373e555..c8a419e920cacf0355c278c4e9886018ce7125cf 100644 --- a/pkg/simple/client/alerting/options.go +++ b/pkg/simple/client/alerting/options.go @@ -16,8 +16,21 @@ limitations under the License. package alerting +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + "kubesphere.io/kubesphere/pkg/utils/reflectutils" +) + type Options struct { Endpoint string `json:"endpoint" yaml:"endpoint"` + + // The following options are for the alerting with v2alpha1 version or higher versions + PrometheusEndpoint string `json:"prometheusEndpoint" yaml:"prometheusEndpoint"` + ThanosRulerEndpoint string `json:"thanosRulerEndpoint" yaml:"thanosRulerEndpoint"` + ThanosRuleResourceLabels string `json:"thanosRuleResourceLabels" yaml:"thanosRuleResourceLabels"` } func NewAlertingOptions() *Options { @@ -26,13 +39,37 @@ func NewAlertingOptions() *Options { } } -func (s *Options) ApplyTo(options *Options) { - if options == nil { - options = s - return - } +func (o *Options) ApplyTo(options *Options) { + reflectutils.Override(options, o) +} - if s.Endpoint != "" { - options.Endpoint = s.Endpoint +func (o *Options) Validate() []error { + errs := []error{} + + if len(o.ThanosRuleResourceLabels) > 0 { + lblStrings := strings.Split(o.ThanosRuleResourceLabels, ",") + for _, lblString := range lblStrings { + if len(lblString) > 0 { + lbl := strings.Split(lblString, "=") + if len(lbl) != 2 { + errs = append(errs, fmt.Errorf("invalid alerting-thanos-rule-resource-labels arg: %s", o.ThanosRuleResourceLabels)) + break + } + } + } } + + return errs +} + +func (o *Options) AddFlags(fs *pflag.FlagSet, c *Options) { + fs.StringVar(&o.Endpoint, "alerting-server-endpoint", c.Endpoint, + "alerting server endpoint for alerting v1.") + + fs.StringVar(&o.PrometheusEndpoint, "alerting-prometheus-endpoint", c.PrometheusEndpoint, + "Prometheus service endpoint from which built-in alerting rules are fetched(alerting v2alpha1 or higher required)") + fs.StringVar(&o.ThanosRulerEndpoint, "alerting-thanos-ruler-endpoint", c.ThanosRulerEndpoint, + "Thanos ruler service endpoint from which custom alerting rules are fetched(alerting v2alpha1 or higher required)") + fs.StringVar(&o.ThanosRuleResourceLabels, "alerting-thanos-rule-resource-labels", c.ThanosRuleResourceLabels, + "Labels used by Thanos Ruler to select PrometheusRule custom resources. eg: thanosruler=thanos-ruler,role=custom-alerting-rules (alerting v2alpha1 or higher required)") } diff --git a/pkg/simple/client/alerting/rule_client.go b/pkg/simple/client/alerting/rule_client.go new file mode 100644 index 0000000000000000000000000000000000000000..e0d82b1b42b875ff938a38a30aedbab9fbcfe111 --- /dev/null +++ b/pkg/simple/client/alerting/rule_client.go @@ -0,0 +1,173 @@ +package alerting + +import ( + "context" + "encoding/json" + "fmt" + "github.com/pkg/errors" + "net/http" +) +import "github.com/prometheus/client_golang/api" + +const ( + apiPrefix = "/api/v1" + epRules = apiPrefix + "/rules" + statusAPIError = 422 + + statusSuccess status = "success" + statusError status = "error" + + ErrBadData ErrorType = "bad_data" + ErrTimeout ErrorType = "timeout" + ErrCanceled ErrorType = "canceled" + ErrExec ErrorType = "execution" + ErrBadResponse ErrorType = "bad_response" + ErrServer ErrorType = "server_error" + ErrClient ErrorType = "client_error" +) + +type status string + +type ErrorType string + +type Error struct { + Type ErrorType + Msg string + Detail string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +type response struct { + Status status `json:"status"` + Data json.RawMessage `json:"data,omitempty"` + ErrorType ErrorType `json:"errorType,omitempty"` + Error string `json:"error,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +type RuleClient interface { + PrometheusRules(ctx context.Context) ([]*RuleGroup, error) + ThanosRules(ctx context.Context) ([]*RuleGroup, error) +} + +type ruleClient struct { + prometheus api.Client + thanosruler api.Client +} + +func (c *ruleClient) PrometheusRules(ctx context.Context) ([]*RuleGroup, error) { + if c.prometheus != nil { + return c.rules(c.prometheus, ctx) + } + return nil, nil +} + +func (c *ruleClient) ThanosRules(ctx context.Context) ([]*RuleGroup, error) { + if c.thanosruler != nil { + return c.rules(c.thanosruler, ctx) + } + return nil, nil +} + +func (c *ruleClient) rules(client api.Client, ctx context.Context) ([]*RuleGroup, error) { + u := client.URL(epRules, nil) + q := u.Query() + q.Add("type", "alert") + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, errors.Wrap(err, "error creating request: ") + } + + _, body, _, err := c.do(client, ctx, req) + if err != nil { + return nil, errors.Wrap(err, "error doing request: ") + } + + var result struct { + Groups []*RuleGroup + } + err = json.Unmarshal(body, &result) + if err != nil { + return nil, errors.Wrap(err, "") + } + return result.Groups, nil +} + +func (c *ruleClient) do(client api.Client, ctx context.Context, req *http.Request) (*http.Response, []byte, []string, error) { + resp, body, e := client.Do(ctx, req) + if e != nil { + return resp, body, nil, e + } + + code := resp.StatusCode + + if code/100 != 2 && !apiError(code) { + errorType, errorMsg := errorTypeAndMsgFor(resp) + return resp, body, nil, &Error{ + Type: errorType, + Msg: errorMsg, + Detail: string(body), + } + } + + var result response + if http.StatusNoContent != code { + if jsonErr := json.Unmarshal(body, &result); jsonErr != nil { + return resp, body, nil, &Error{ + Type: ErrBadResponse, + Msg: jsonErr.Error(), + } + } + } + + var err error + if apiError(code) && result.Status == "success" { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + if result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), result.Warnings, err +} + +func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) { + switch resp.StatusCode / 100 { + case 4: + return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode) + case 5: + return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode) + } + return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode) +} + +func apiError(code int) bool { + // These are the codes that rule server sends when it returns an error. + return code == statusAPIError || code == http.StatusBadRequest || + code == http.StatusServiceUnavailable || code == http.StatusInternalServerError +} + +func NewRuleClient(options *Options) (RuleClient, error) { + var ( + c ruleClient + e error + ) + if options.PrometheusEndpoint != "" { + c.prometheus, e = api.NewClient(api.Config{Address: options.PrometheusEndpoint}) + } + if options.ThanosRulerEndpoint != "" { + c.thanosruler, e = api.NewClient(api.Config{Address: options.ThanosRulerEndpoint}) + } + return &c, e +} diff --git a/pkg/simple/client/alerting/rule_client_test.go b/pkg/simple/client/alerting/rule_client_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5d3f800ea720997f25504e6ae9328424afd22d78 --- /dev/null +++ b/pkg/simple/client/alerting/rule_client_test.go @@ -0,0 +1,100 @@ +package alerting + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" +) + +func TestListRules(t *testing.T) { + var tests = []struct { + description string + fakeCode int + fakeResp string + expectError bool + }{{ + description: "list alerting rules from prometheus endpoint", + expectError: false, + fakeCode: 200, + fakeResp: ` +{ + "status": "success", + "data": { + "groups": [ + { + "name": "kubernetes-resources", + "file": "/etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubesphere-monitoring-system-prometheus-k8s-rules.yaml", + "rules": [ + { + "state": "firing", + "name": "KubeCPUOvercommit", + "query": "sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum) / sum(kube_node_status_allocatable_cpu_cores) > (count(kube_node_status_allocatable_cpu_cores) - 1) / count(kube_node_status_allocatable_cpu_cores)", + "duration": 300, + "labels": { + "severity": "warning" + }, + "annotations": { + "message": "Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.", + "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit" + }, + "alerts": [ + { + "labels": { + "alertname": "KubeCPUOvercommit", + "severity": "warning" + }, + "annotations": { + "message": "Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.", + "runbook_url": "https://github.com/ kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit" + }, + "state": "firing", + "activeAt": "2020-09-22T06:18:47.55260138Z", + "value": "4.405e-01" + } + ], + "health": "ok", + "evaluationTime": 0.000894038, + "lastEvaluation": "2020-09-22T08:57:17.566233983Z", + "type": "alerting" + } + ] + } + ] + } +} +`, + }} + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + mock := MockService(epRules, test.fakeCode, test.fakeResp) + defer mock.Close() + c, e := NewRuleClient(&Options{PrometheusEndpoint: mock.URL}) + if e != nil { + t.Fatal(e) + } + rgs, e := c.PrometheusRules(context.TODO()) + if test.expectError { + } else { + if e != nil { + t.Fatal(e) + } else if len(rgs) == 1 && len(rgs[0].Rules) == 1 { + + } else { + t.Fatalf("expect %d group and %d rule but got %d group and %d rule", 1, 1, len(rgs), len(rgs[0].Rules)) + } + } + + }) + } +} + +func MockService(pattern string, fakeCode int, fakeResp string) *httptest.Server { + mux := http.NewServeMux() + mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(fakeCode) + res.Write([]byte(fakeResp)) + }) + return httptest.NewServer(mux) +} diff --git a/pkg/simple/client/alerting/types.go b/pkg/simple/client/alerting/types.go new file mode 100644 index 0000000000000000000000000000000000000000..d45d1a8748a772e43a6e30bb3ce748b1be767fb4 --- /dev/null +++ b/pkg/simple/client/alerting/types.go @@ -0,0 +1,40 @@ +package alerting + +import ( + "time" +) + +type RuleGroup struct { + Name string `json:"name"` + File string `json:"file"` + Rules []*AlertingRule `json:"rules"` + Interval float64 `json:"interval"` + EvaluationTime float64 `json:"evaluationTime"` + LastEvaluation *time.Time `json:"lastEvaluation"` +} + +type AlertingRule struct { + // State can be "pending", "firing", "inactive". + State string `json:"state"` + Name string `json:"name"` + Query string `json:"query"` + Duration float64 `json:"duration"` + Labels map[string]string `json:"labels"` + Annotations map[string]string `json:"annotations"` + Alerts []*Alert `json:"alerts"` + // Health can be "ok", "err", "unknown". + Health string `json:"health"` + LastError string `json:"lastError,omitempty"` + EvaluationTime float64 `json:"evaluationTime"` + LastEvaluation *time.Time `json:"lastEvaluation"` + // Type of an alertingRule is always "alerting". + Type string `json:"type"` +} + +type Alert struct { + Labels map[string]string `json:"labels"` + Annotations map[string]string `json:"annotations"` + State string `json:"state"` + ActiveAt *time.Time `json:"activeAt,omitempty"` + Value string `json:"value"` +} diff --git a/pkg/simple/client/k8s/fake_client.go b/pkg/simple/client/k8s/fake_client.go index c5a0c552d0004de4c6f8138b0d610973a554efe2..8b611502f0629e59d1815e038c5a48a55d678a1b 100644 --- a/pkg/simple/client/k8s/fake_client.go +++ b/pkg/simple/client/k8s/fake_client.go @@ -18,6 +18,7 @@ package k8s import ( snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v3/clientset/versioned" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" istioclient "istio.io/client-go/pkg/clientset/versioned" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/discovery" @@ -42,6 +43,8 @@ type FakeClient struct { ApiExtensionClient apiextensionsclient.Interface + prometheusClient promresourcesclient.Interface + MasterURL string KubeConfig *rest.Config @@ -50,7 +53,8 @@ type FakeClient struct { func NewFakeClientSets(k8sClient kubernetes.Interface, discoveryClient *discovery.DiscoveryClient, kubeSphereClient kubesphere.Interface, istioClient istioclient.Interface, snapshotClient snapshotclient.Interface, - apiextensionsclient apiextensionsclient.Interface, masterURL string, kubeConfig *rest.Config) Client { + apiextensionsclient apiextensionsclient.Interface, prometheusClient promresourcesclient.Interface, + masterURL string, kubeConfig *rest.Config) Client { return &FakeClient{ K8sClient: k8sClient, DiscoveryClient: discoveryClient, @@ -58,6 +62,7 @@ func NewFakeClientSets(k8sClient kubernetes.Interface, discoveryClient *discover IstioClient: istioClient, SnapshotClient: snapshotClient, ApiExtensionClient: apiextensionsclient, + prometheusClient: prometheusClient, MasterURL: masterURL, KubeConfig: kubeConfig, } @@ -87,6 +92,10 @@ func (n *FakeClient) Discovery() discovery.DiscoveryInterface { return n.DiscoveryClient } +func (n *FakeClient) Prometheus() promresourcesclient.Interface { + return n.prometheusClient +} + func (n *FakeClient) Master() string { return n.MasterURL } diff --git a/pkg/simple/client/k8s/kubernetes.go b/pkg/simple/client/k8s/kubernetes.go index 3c7d7b6a7e375507e1d104fe4eb58777c559c5ec..8301a29ffa1dbc0df0f14753ec970b0c477daac8 100644 --- a/pkg/simple/client/k8s/kubernetes.go +++ b/pkg/simple/client/k8s/kubernetes.go @@ -17,7 +17,10 @@ limitations under the License. package k8s import ( + "strings" + snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v3/clientset/versioned" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" istioclient "istio.io/client-go/pkg/clientset/versioned" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/discovery" @@ -25,7 +28,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" - "strings" ) type Client interface { @@ -35,6 +37,7 @@ type Client interface { Snapshot() snapshotclient.Interface ApiExtensions() apiextensionsclient.Interface Discovery() discovery.DiscoveryInterface + Prometheus() promresourcesclient.Interface Master() string Config() *rest.Config } @@ -55,6 +58,8 @@ type kubernetesClient struct { apiextensions apiextensionsclient.Interface + prometheus promresourcesclient.Interface + master string config *rest.Config @@ -77,6 +82,7 @@ func NewKubernetesClientOrDie(options *KubernetesOptions) Client { istio: istioclient.NewForConfigOrDie(config), snapshot: snapshotclient.NewForConfigOrDie(config), apiextensions: apiextensionsclient.NewForConfigOrDie(config), + prometheus: promresourcesclient.NewForConfigOrDie(config), master: config.Host, config: config, } @@ -135,6 +141,11 @@ func NewKubernetesClient(options *KubernetesOptions) (Client, error) { return nil, err } + k.prometheus, err = promresourcesclient.NewForConfig(config) + if err != nil { + return nil, err + } + k.master = options.Master k.config = config @@ -165,6 +176,10 @@ func (k *kubernetesClient) ApiExtensions() apiextensionsclient.Interface { return k.apiextensions } +func (k *kubernetesClient) Prometheus() promresourcesclient.Interface { + return k.prometheus +} + // master address used to generate kubeconfig for downloading func (k *kubernetesClient) Master() string { return k.master diff --git a/pkg/simple/client/k8s/null_client.go b/pkg/simple/client/k8s/null_client.go index fa5c2efd1894608b0f8c3f918297fb15e341bece..b98eb217847bc1c84516c78b13d73bd063de500f 100644 --- a/pkg/simple/client/k8s/null_client.go +++ b/pkg/simple/client/k8s/null_client.go @@ -18,6 +18,7 @@ package k8s import ( snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v3/clientset/versioned" + promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" istio "istio.io/client-go/pkg/clientset/versioned" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/discovery" @@ -57,6 +58,10 @@ func (n nullClient) Discovery() discovery.DiscoveryInterface { return nil } +func (n *nullClient) Prometheus() promresourcesclient.Interface { + return nil +} + func (n nullClient) Master() string { return "" } diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index b27a7ead8cd9be0e9ab927706c9f68d2b47e2883..78e4e5cd5619e7ca920c2571e9d8bd566407ae68 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -37,6 +37,7 @@ import ( "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/informers" + alertingv2alpha1 "kubesphere.io/kubesphere/pkg/kapis/alerting/v2alpha1" clusterkapisv1alpha1 "kubesphere.io/kubesphere/pkg/kapis/cluster/v1alpha1" devopsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2" devopsv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha3" @@ -128,6 +129,7 @@ func generateSwaggerJson() []byte { urlruntime.Must(terminalv1alpha2.AddToContainer(container, clientsets.Kubernetes(), nil)) urlruntime.Must(metricsv1alpha2.AddToContainer(container)) urlruntime.Must(networkv1alpha2.AddToContainer(container, "")) + urlruntime.Must(alertingv2alpha1.AddToContainer(container, informerFactory, nil, nil, nil)) config := restfulspec.Config{ WebServices: container.RegisteredWebServices(), diff --git a/vendor/github.com/edsrzf/mmap-go/.gitignore b/vendor/github.com/edsrzf/mmap-go/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9aa02c1ed3d0a949373dca3c1a72572bae5e285d --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/.gitignore @@ -0,0 +1,8 @@ +*.out +*.5 +*.6 +*.8 +*.swp +_obj +_test +testdata diff --git a/vendor/github.com/edsrzf/mmap-go/LICENSE b/vendor/github.com/edsrzf/mmap-go/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8f05f338ac5a0543e5cef2898cf5c06208b761aa --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2011, Evan Shaw +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4cc2bfe1c8cea8f7e39b9970c1af3e5a1ccc8b16 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/README.md @@ -0,0 +1,12 @@ +mmap-go +======= + +mmap-go is a portable mmap package for the [Go programming language](http://golang.org). +It has been tested on Linux (386, amd64), OS X, and Windows (386). It should also +work on other Unix-like platforms, but hasn't been tested with them. I'm interested +to hear about the results. + +I haven't been able to add more features without adding significant complexity, +so mmap-go doesn't support mprotect, mincore, and maybe a few other things. +If you're running on a Unix-like platform and need some of these features, +I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap). diff --git a/vendor/github.com/edsrzf/mmap-go/mmap.go b/vendor/github.com/edsrzf/mmap-go/mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..29655bd222ca6348c4f0a960715082265dd64f87 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap.go @@ -0,0 +1,117 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file defines the common package interface and contains a little bit of +// factored out logic. + +// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface, +// but doesn't go out of its way to abstract away every little platform detail. +// This specifically means: +// * forked processes may or may not inherit mappings +// * a file's timestamp may or may not be updated by writes through mappings +// * specifying a size larger than the file's actual size can increase the file's size +// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms +package mmap + +import ( + "errors" + "os" + "reflect" + "unsafe" +) + +const ( + // RDONLY maps the memory read-only. + // Attempts to write to the MMap object will result in undefined behavior. + RDONLY = 0 + // RDWR maps the memory as read-write. Writes to the MMap object will update the + // underlying file. + RDWR = 1 << iota + // COPY maps the memory as copy-on-write. Writes to the MMap object will affect + // memory, but the underlying file will remain unchanged. + COPY + // If EXEC is set, the mapped memory is marked as executable. + EXEC +) + +const ( + // If the ANON flag is set, the mapped memory will not be backed by a file. + ANON = 1 << iota +) + +// MMap represents a file mapped into memory. +type MMap []byte + +// Map maps an entire file into memory. +// If ANON is set in flags, f is ignored. +func Map(f *os.File, prot, flags int) (MMap, error) { + return MapRegion(f, -1, prot, flags, 0) +} + +// MapRegion maps part of a file into memory. +// The offset parameter must be a multiple of the system's page size. +// If length < 0, the entire file will be mapped. +// If ANON is set in flags, f is ignored. +func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) { + if offset%int64(os.Getpagesize()) != 0 { + return nil, errors.New("offset parameter must be a multiple of the system's page size") + } + + var fd uintptr + if flags&ANON == 0 { + fd = uintptr(f.Fd()) + if length < 0 { + fi, err := f.Stat() + if err != nil { + return nil, err + } + length = int(fi.Size()) + } + } else { + if length <= 0 { + return nil, errors.New("anonymous mapping requires non-zero length") + } + fd = ^uintptr(0) + } + return mmap(length, uintptr(prot), uintptr(flags), fd, offset) +} + +func (m *MMap) header() *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(m)) +} + +func (m *MMap) addrLen() (uintptr, uintptr) { + header := m.header() + return header.Data, uintptr(header.Len) +} + +// Lock keeps the mapped region in physical memory, ensuring that it will not be +// swapped out. +func (m MMap) Lock() error { + return m.lock() +} + +// Unlock reverses the effect of Lock, allowing the mapped region to potentially +// be swapped out. +// If m is already unlocked, aan error will result. +func (m MMap) Unlock() error { + return m.unlock() +} + +// Flush synchronizes the mapping's contents to the file's contents on disk. +func (m MMap) Flush() error { + return m.flush() +} + +// Unmap deletes the memory mapped region, flushes any remaining changes, and sets +// m to nil. +// Trying to read or write any remaining references to m after Unmap is called will +// result in undefined behavior. +// Unmap should only be called on the slice value that was originally returned from +// a call to Map. Calling Unmap on a derived slice may cause errors. +func (m *MMap) Unmap() error { + err := m.unmap() + *m = nil + return err +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..25b13e51fdffde79e77c4ee12c0031e02b869caa --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go @@ -0,0 +1,51 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux openbsd solaris netbsd + +package mmap + +import ( + "golang.org/x/sys/unix" +) + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + flags := unix.MAP_SHARED + prot := unix.PROT_READ + switch { + case inprot© != 0: + prot |= unix.PROT_WRITE + flags = unix.MAP_PRIVATE + case inprot&RDWR != 0: + prot |= unix.PROT_WRITE + } + if inprot&EXEC != 0 { + prot |= unix.PROT_EXEC + } + if inflags&ANON != 0 { + flags |= unix.MAP_ANON + } + + b, err := unix.Mmap(int(fd), off, len, prot, flags) + if err != nil { + return nil, err + } + return b, nil +} + +func (m MMap) flush() error { + return unix.Msync([]byte(m), unix.MS_SYNC) +} + +func (m MMap) lock() error { + return unix.Mlock([]byte(m)) +} + +func (m MMap) unlock() error { + return unix.Munlock([]byte(m)) +} + +func (m MMap) unmap() error { + return unix.Munmap([]byte(m)) +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..7910da257703ae4a2f31e57a415474f115f0289b --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go @@ -0,0 +1,143 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import ( + "errors" + "os" + "sync" + + "golang.org/x/sys/windows" +) + +// mmap on Windows is a two-step process. +// First, we call CreateFileMapping to get a handle. +// Then, we call MapviewToFile to get an actual pointer into memory. +// Because we want to emulate a POSIX-style mmap, we don't want to expose +// the handle -- only the pointer. We also want to return only a byte slice, +// not a struct, so it's convenient to manipulate. + +// We keep this map so that we can get back the original handle from the memory address. + +type addrinfo struct { + file windows.Handle + mapview windows.Handle +} + +var handleLock sync.Mutex +var handleMap = map[uintptr]*addrinfo{} + +func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { + flProtect := uint32(windows.PAGE_READONLY) + dwDesiredAccess := uint32(windows.FILE_MAP_READ) + switch { + case prot© != 0: + flProtect = windows.PAGE_WRITECOPY + dwDesiredAccess = windows.FILE_MAP_COPY + case prot&RDWR != 0: + flProtect = windows.PAGE_READWRITE + dwDesiredAccess = windows.FILE_MAP_WRITE + } + if prot&EXEC != 0 { + flProtect <<= 4 + dwDesiredAccess |= windows.FILE_MAP_EXECUTE + } + + // The maximum size is the area of the file, starting from 0, + // that we wish to allow to be mappable. It is the sum of + // the length the user requested, plus the offset where that length + // is starting from. This does not map the data into memory. + maxSizeHigh := uint32((off + int64(len)) >> 32) + maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF) + // TODO: Do we need to set some security attributes? It might help portability. + h, errno := windows.CreateFileMapping(windows.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil) + if h == 0 { + return nil, os.NewSyscallError("CreateFileMapping", errno) + } + + // Actually map a view of the data into memory. The view's size + // is the length the user requested. + fileOffsetHigh := uint32(off >> 32) + fileOffsetLow := uint32(off & 0xFFFFFFFF) + addr, errno := windows.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len)) + if addr == 0 { + return nil, os.NewSyscallError("MapViewOfFile", errno) + } + handleLock.Lock() + handleMap[addr] = &addrinfo{ + file: windows.Handle(hfile), + mapview: h, + } + handleLock.Unlock() + + m := MMap{} + dh := m.header() + dh.Data = addr + dh.Len = len + dh.Cap = dh.Len + + return m, nil +} + +func (m MMap) flush() error { + addr, len := m.addrLen() + errno := windows.FlushViewOfFile(addr, len) + if errno != nil { + return os.NewSyscallError("FlushViewOfFile", errno) + } + + handleLock.Lock() + defer handleLock.Unlock() + handle, ok := handleMap[addr] + if !ok { + // should be impossible; we would've errored above + return errors.New("unknown base address") + } + + errno = windows.FlushFileBuffers(handle.file) + return os.NewSyscallError("FlushFileBuffers", errno) +} + +func (m MMap) lock() error { + addr, len := m.addrLen() + errno := windows.VirtualLock(addr, len) + return os.NewSyscallError("VirtualLock", errno) +} + +func (m MMap) unlock() error { + addr, len := m.addrLen() + errno := windows.VirtualUnlock(addr, len) + return os.NewSyscallError("VirtualUnlock", errno) +} + +func (m MMap) unmap() error { + err := m.flush() + if err != nil { + return err + } + + addr := m.header().Data + // Lock the UnmapViewOfFile along with the handleMap deletion. + // As soon as we unmap the view, the OS is free to give the + // same addr to another new map. We don't want another goroutine + // to insert and remove the same addr into handleMap while + // we're trying to remove our old addr/handle pair. + handleLock.Lock() + defer handleLock.Unlock() + err = windows.UnmapViewOfFile(addr) + if err != nil { + return err + } + + handle, ok := handleMap[addr] + if !ok { + // should be impossible; we would've errored above + return errors.New("unknown base address") + } + delete(handleMap, addr) + + e := windows.CloseHandle(windows.Handle(handle.mapview)) + return os.NewSyscallError("CloseHandle", e) +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..042091d9b3b0d93b7070e05e11a35b4131c826f7 --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..bcfa19520af9be47bf00b12b35e3e65d2435428c --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000000000000000000000000000000000000..931ae31606f8c09ea5487f6ac4b419d7844ce25e --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6050c10f4c8b4c22f50c83715f44f12419f763be --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 0000000000000000000000000000000000000000..cea12879a0eae937f6ecdb6243f64591c5217fef --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000000000000000000000000000000000000..72efb0353ddfc02dc509b67b1332c1d3595ccb6a --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..fcd192b849eda1c391d5460b24ce4e263f3a6b90 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..e6179f65e3511d6da76e25c749c6d781c5e337a7 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000000000000000000000000000000000000..8c9f2049bc7be00b86e7237a7c91206c9dfabd02 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000000000000000000000000000000000000..8d393e904bb3126decbc1bec4fb2b9ffee02f1d3 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..150d91bc8be57d3cc659ceefbd11f08932106459 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..adfd979fe277aa548dc545ab9940a9ad0118fe2d --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000000000000000000000000000000000000..dbcae905e6e047ba3c00f68057f5bf8541e981fa --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..f6406bb2c76d74ee343d238ee3ca9e2c7ad0fcaa --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000000000000000000000000000000000000..ece692ea4610ab717f74b1b4a416d1452d3673dc --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c92c4d56084b267d40d8413179eb4540517f06c8 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.gitignore @@ -0,0 +1,29 @@ +#### joe made this: http://goel.io/joe + +#####=== Go ===##### + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..43eb762fa349d505b535da13a433a384662299b8 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.10.x +install: + - go get -v github.com/golang/lint/golint + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get -d -t -v ./... + - go build -v ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... + - go test -v -covermode=count -coverprofile=cov.out + - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md new file mode 100644 index 0000000000000000000000000000000000000000..95581c78b06215524665af88248656d2a4593c0c --- /dev/null +++ b/vendor/github.com/oklog/ulid/AUTHORS.md @@ -0,0 +1,2 @@ +- Peter Bourgon (@peterbourgon) +- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..8da38c6b00d3e622bb1bbfe431f7b611d3a63c41 --- /dev/null +++ b/vendor/github.com/oklog/ulid/CHANGELOG.md @@ -0,0 +1,33 @@ +## 1.3.1 / 2018-10-02 + +* Use underlying entropy source for random increments in Monotonic (#32) + +## 1.3.0 / 2018-09-29 + +* Monotonic entropy support (#31) + +## 1.2.0 / 2018-09-09 + +* Add a function to convert Unix time in milliseconds back to time.Time (#30) + +## 1.1.0 / 2018-08-15 + +* Ensure random part is always read from the entropy reader in full (#28) + +## 1.0.0 / 2018-07-29 + +* Add ParseStrict and MustParseStrict functions (#26) +* Enforce overflow checking when parsing (#20) + +## 0.3.0 / 2017-01-03 + +* Implement ULID.Compare method + +## 0.2.0 / 2016-12-13 + +* Remove year 2262 Timestamp bug. (#1) +* Gracefully handle invalid encodings when parsing. + +## 0.1.0 / 2016-12-06 + +* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..68f03f26eba01fdd16d56fe71b3a5552aa4f4710 --- /dev/null +++ b/vendor/github.com/oklog/ulid/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +We use GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first propose your ideas + in a Github issue. This will avoid unnecessary work and surely give + you and us a good deal of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock new file mode 100644 index 0000000000000000000000000000000000000000..349b449a6ea6707b4d17b01a9c6ad9ceb41a4349 --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/pborman/getopt" + packages = ["v2"] + revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml new file mode 100644 index 0000000000000000000000000000000000000000..624a7a019c7026910af39362306f5441b029da1a --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/vendor/github.com/oklog/ulid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0a3d2f82b255d4b03ae8ab4d2aa3f9d4eed73216 --- /dev/null +++ b/vendor/github.com/oklog/ulid/README.md @@ -0,0 +1,150 @@ +# Universally Unique Lexicographically Sortable Identifier + +![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) +[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) +[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) +[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) +[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) + +A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. + +## Background + +A GUID/UUID can be suboptimal for many use-cases because: + +- It isn't the most character efficient way of encoding 128 bits +- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address +- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures +- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures + +A ULID however: + +- Is compatible with UUID/GUID's +- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) +- Lexicographically sortable +- Canonically encoded as a 26 character string, as opposed to the 36 character UUID +- Uses Crockford's base32 for better efficiency and readability (5 bits per character) +- Case insensitive +- No special characters (URL safe) +- Monotonic sort order (correctly detects and handles the same millisecond) + +## Install + +```shell +go get github.com/oklog/ulid +``` + +## Usage + +An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. +This design allows for greater flexibility in choosing your trade-offs. + +Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. +Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. + + +```go +func ExampleULID() { + t := time.Unix(1000000, 0) + entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) + fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) + // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 +} + +``` + +## Specification + +Below is the current specification of ULID as implemented in this repository. + +### Components + +**Timestamp** +- 48 bits +- UNIX-time in milliseconds +- Won't run out of space till the year 10895 AD + +**Entropy** +- 80 bits +- User defined entropy source. +- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) + +### Encoding + +[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. +This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. + +``` +0123456789ABCDEFGHJKMNPQRSTVWXYZ +``` + +### Binary Layout and Byte Order + +The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). + +``` +0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_time_high | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 16_bit_uint_time_low | 16_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +``` + +### String Representation + +``` + 01AN4Z07BY 79KA1307SR9X4MV3 +|----------| |----------------| + Timestamp Entropy + 10 chars 16 chars + 48bits 80bits + base32 base32 +``` + +## Test + +```shell +go test ./... +``` + +## Benchmarks + +On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 + +``` +BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op +BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op +BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op +BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op +BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op +BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op +BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op +BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op +BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op +BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op +BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op +``` + +## Prior Art + +- [alizain/ulid](https://github.com/alizain/ulid) +- [RobThree/NUlid](https://github.com/RobThree/NUlid) +- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go new file mode 100644 index 0000000000000000000000000000000000000000..c5d0d66fd2a42cd3943aa832141f7da5542cc0e6 --- /dev/null +++ b/vendor/github.com/oklog/ulid/ulid.go @@ -0,0 +1,614 @@ +// Copyright 2016 The Oklog Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ulid + +import ( + "bufio" + "bytes" + "database/sql/driver" + "encoding/binary" + "errors" + "io" + "math" + "math/bits" + "math/rand" + "time" +) + +/* +An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier + + The components are encoded as 16 octets. + Each component is encoded with the MSB first (network byte order). + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 16_bit_uint_time_low | 16_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ +type ULID [16]byte + +var ( + // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong + // data size. + ErrDataSize = errors.New("ulid: bad data size when unmarshaling") + + // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with + // invalid Base32 encodings. + ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") + + // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient + // size. + ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") + + // ErrBigTime is returned when constructing an ULID with a time that is larger + // than MaxTime. + ErrBigTime = errors.New("ulid: time too big") + + // ErrOverflow is returned when unmarshaling a ULID whose first character is + // larger than 7, thereby exceeding the valid bit depth of 128. + ErrOverflow = errors.New("ulid: overflow when unmarshaling") + + // ErrMonotonicOverflow is returned by a Monotonic entropy source when + // incrementing the previous ULID's entropy bytes would result in overflow. + ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") + + // ErrScanValue is returned when the value passed to scan cannot be unmarshaled + // into the ULID. + ErrScanValue = errors.New("ulid: source value must be a string or byte slice") +) + +// New returns an ULID with the given Unix milliseconds timestamp and an +// optional entropy source. Use the Timestamp function to convert +// a time.Time to Unix milliseconds. +// +// ErrBigTime is returned when passing a timestamp bigger than MaxTime. +// Reading from the entropy source may also return an error. +func New(ms uint64, entropy io.Reader) (id ULID, err error) { + if err = id.SetTime(ms); err != nil { + return id, err + } + + switch e := entropy.(type) { + case nil: + return id, err + case *monotonic: + err = e.MonotonicRead(ms, id[6:]) + default: + _, err = io.ReadFull(e, id[6:]) + } + + return id, err +} + +// MustNew is a convenience function equivalent to New that panics on failure +// instead of returning an error. +func MustNew(ms uint64, entropy io.Reader) ULID { + id, err := New(ms, entropy) + if err != nil { + panic(err) + } + return id +} + +// Parse parses an encoded ULID, returning an error in case of failure. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. For a version that +// returns an error instead, see ParseStrict. +func Parse(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), false, &id) +} + +// ParseStrict parses an encoded ULID, returning an error in case of failure. +// +// It is like Parse, but additionally validates that the parsed ULID consists +// only of valid base32 characters. It is slightly slower than Parse. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings return ErrInvalidCharacters. +func ParseStrict(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), true, &id) +} + +func parse(v []byte, strict bool, id *ULID) error { + // Check if a base32 encoded ULID is the right length. + if len(v) != EncodedSize { + return ErrDataSize + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if strict && + (dec[v[0]] == 0xFF || + dec[v[1]] == 0xFF || + dec[v[2]] == 0xFF || + dec[v[3]] == 0xFF || + dec[v[4]] == 0xFF || + dec[v[5]] == 0xFF || + dec[v[6]] == 0xFF || + dec[v[7]] == 0xFF || + dec[v[8]] == 0xFF || + dec[v[9]] == 0xFF || + dec[v[10]] == 0xFF || + dec[v[11]] == 0xFF || + dec[v[12]] == 0xFF || + dec[v[13]] == 0xFF || + dec[v[14]] == 0xFF || + dec[v[15]] == 0xFF || + dec[v[16]] == 0xFF || + dec[v[17]] == 0xFF || + dec[v[18]] == 0xFF || + dec[v[19]] == 0xFF || + dec[v[20]] == 0xFF || + dec[v[21]] == 0xFF || + dec[v[22]] == 0xFF || + dec[v[23]] == 0xFF || + dec[v[24]] == 0xFF || + dec[v[25]] == 0xFF) { + return ErrInvalidCharacters + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if v[0] > '7' { + return ErrOverflow + } + + // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) + // to decode a base32 ULID. + + // 6 bytes timestamp (48 bits) + (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) + (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) + (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) + (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) + (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) + (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) + + // 10 bytes of entropy (80 bits) + (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) + (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) + (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) + (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) + (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) + (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) + (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) + (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) + (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) + (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) + + return nil +} + +// MustParse is a convenience function equivalent to Parse that panics on failure +// instead of returning an error. +func MustParse(ulid string) ULID { + id, err := Parse(ulid) + if err != nil { + panic(err) + } + return id +} + +// MustParseStrict is a convenience function equivalent to ParseStrict that +// panics on failure instead of returning an error. +func MustParseStrict(ulid string) ULID { + id, err := ParseStrict(ulid) + if err != nil { + panic(err) + } + return id +} + +// String returns a lexicographically sortable string encoded ULID +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy +func (id ULID) String() string { + ulid := make([]byte, EncodedSize) + _ = id.MarshalTextTo(ulid) + return string(ulid) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface by +// returning the ULID as a byte slice. +func (id ULID) MarshalBinary() ([]byte, error) { + ulid := make([]byte, len(id)) + return ulid, id.MarshalBinaryTo(ulid) +} + +// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. +// ErrBufferSize is returned when the len(dst) != 16. +func (id ULID) MarshalBinaryTo(dst []byte) error { + if len(dst) != len(id) { + return ErrBufferSize + } + + copy(dst, id[:]) + return nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by +// copying the passed data and converting it to an ULID. ErrDataSize is +// returned if the data length is different from ULID length. +func (id *ULID) UnmarshalBinary(data []byte) error { + if len(data) != len(*id) { + return ErrDataSize + } + + copy((*id)[:], data) + return nil +} + +// Encoding is the base 32 encoding alphabet used in ULID strings. +const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" + +// MarshalText implements the encoding.TextMarshaler interface by +// returning the string encoded ULID. +func (id ULID) MarshalText() ([]byte, error) { + ulid := make([]byte, EncodedSize) + return ulid, id.MarshalTextTo(ulid) +} + +// MarshalTextTo writes the ULID as a string to the given buffer. +// ErrBufferSize is returned when the len(dst) != 26. +func (id ULID) MarshalTextTo(dst []byte) error { + // Optimized unrolled loop ahead. + // From https://github.com/RobThree/NUlid + + if len(dst) != EncodedSize { + return ErrBufferSize + } + + // 10 byte timestamp + dst[0] = Encoding[(id[0]&224)>>5] + dst[1] = Encoding[id[0]&31] + dst[2] = Encoding[(id[1]&248)>>3] + dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] + dst[4] = Encoding[(id[2]&62)>>1] + dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] + dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] + dst[7] = Encoding[(id[4]&124)>>2] + dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] + dst[9] = Encoding[id[5]&31] + + // 16 bytes of entropy + dst[10] = Encoding[(id[6]&248)>>3] + dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] + dst[12] = Encoding[(id[7]&62)>>1] + dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] + dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] + dst[15] = Encoding[(id[9]&124)>>2] + dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] + dst[17] = Encoding[id[10]&31] + dst[18] = Encoding[(id[11]&248)>>3] + dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] + dst[20] = Encoding[(id[12]&62)>>1] + dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] + dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] + dst[23] = Encoding[(id[14]&124)>>2] + dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] + dst[25] = Encoding[id[15]&31] + + return nil +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var dec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const EncodedSize = 26 + +// UnmarshalText implements the encoding.TextUnmarshaler interface by +// parsing the data as string encoded ULID. +// +// ErrDataSize is returned if the len(v) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. +func (id *ULID) UnmarshalText(v []byte) error { + return parse(v, false, id) +} + +// Time returns the Unix time in milliseconds encoded in the ULID. +// Use the top level Time function to convert the returned value to +// a time.Time. +func (id ULID) Time() uint64 { + return uint64(id[5]) | uint64(id[4])<<8 | + uint64(id[3])<<16 | uint64(id[2])<<24 | + uint64(id[1])<<32 | uint64(id[0])<<40 +} + +// maxTime is the maximum Unix time in milliseconds that can be +// represented in an ULID. +var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() + +// MaxTime returns the maximum Unix time in milliseconds that +// can be encoded in an ULID. +func MaxTime() uint64 { return maxTime } + +// Now is a convenience function that returns the current +// UTC time in Unix milliseconds. Equivalent to: +// Timestamp(time.Now().UTC()) +func Now() uint64 { return Timestamp(time.Now().UTC()) } + +// Timestamp converts a time.Time to Unix milliseconds. +// +// Because of the way ULID stores time, times from the year +// 10889 produces undefined results. +func Timestamp(t time.Time) uint64 { + return uint64(t.Unix())*1000 + + uint64(t.Nanosecond()/int(time.Millisecond)) +} + +// Time converts Unix milliseconds in the format +// returned by the Timestamp function to a time.Time. +func Time(ms uint64) time.Time { + s := int64(ms / 1e3) + ns := int64((ms % 1e3) * 1e6) + return time.Unix(s, ns) +} + +// SetTime sets the time component of the ULID to the given Unix time +// in milliseconds. +func (id *ULID) SetTime(ms uint64) error { + if ms > maxTime { + return ErrBigTime + } + + (*id)[0] = byte(ms >> 40) + (*id)[1] = byte(ms >> 32) + (*id)[2] = byte(ms >> 24) + (*id)[3] = byte(ms >> 16) + (*id)[4] = byte(ms >> 8) + (*id)[5] = byte(ms) + + return nil +} + +// Entropy returns the entropy from the ULID. +func (id ULID) Entropy() []byte { + e := make([]byte, 10) + copy(e, id[6:]) + return e +} + +// SetEntropy sets the ULID entropy to the passed byte slice. +// ErrDataSize is returned if len(e) != 10. +func (id *ULID) SetEntropy(e []byte) error { + if len(e) != 10 { + return ErrDataSize + } + + copy((*id)[6:], e) + return nil +} + +// Compare returns an integer comparing id and other lexicographically. +// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. +func (id ULID) Compare(other ULID) int { + return bytes.Compare(id[:], other[:]) +} + +// Scan implements the sql.Scanner interface. It supports scanning +// a string or byte slice. +func (id *ULID) Scan(src interface{}) error { + switch x := src.(type) { + case nil: + return nil + case string: + return id.UnmarshalText([]byte(x)) + case []byte: + return id.UnmarshalBinary(x) + } + + return ErrScanValue +} + +// Value implements the sql/driver.Valuer interface. This returns the value +// represented as a byte slice. If instead a string is desirable, a wrapper +// type can be created that calls String(). +// +// // stringValuer wraps a ULID as a string-based driver.Valuer. +// type stringValuer ULID +// +// func (id stringValuer) Value() (driver.Value, error) { +// return ULID(id).String(), nil +// } +// +// // Example usage. +// db.Exec("...", stringValuer(id)) +func (id ULID) Value() (driver.Value, error) { + return id.MarshalBinary() +} + +// Monotonic returns an entropy source that is guaranteed to yield +// strictly increasing entropy bytes for the same ULID timestamp. +// On conflicts, the previous ULID entropy is incremented with a +// random number between 1 and `inc` (inclusive). +// +// The provided entropy source must actually yield random bytes or else +// monotonic reads are not guaranteed to terminate, since there isn't +// enough randomness to compute an increment number. +// +// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. +// The lower the value of `inc`, the easier the next ULID within the +// same millisecond is to guess. If your code depends on ULIDs having +// secure entropy bytes, then don't go under this default unless you know +// what you're doing. +// +// The returned io.Reader isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) io.Reader { + m := monotonic{ + Reader: bufio.NewReader(entropy), + inc: inc, + } + + if m.inc == 0 { + m.inc = math.MaxUint32 + } + + if rng, ok := entropy.(*rand.Rand); ok { + m.rng = rng + } + + return &m +} + +type monotonic struct { + io.Reader + ms uint64 + inc uint64 + entropy uint80 + rand [8]byte + rng *rand.Rand +} + +func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { + if !m.entropy.IsZero() && m.ms == ms { + err = m.increment() + m.entropy.AppendTo(entropy) + } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { + m.ms = ms + m.entropy.SetBytes(entropy) + } + return err +} + +// increment the previous entropy number with a random number +// of up to m.inc (inclusive). +func (m *monotonic) increment() error { + if inc, err := m.random(); err != nil { + return err + } else if m.entropy.Add(inc) { + return ErrMonotonicOverflow + } + return nil +} + +// random returns a uniform random value in [1, m.inc), reading entropy +// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. +// Adapted from: https://golang.org/pkg/crypto/rand/#Int +func (m *monotonic) random() (inc uint64, err error) { + if m.inc <= 1 { + return 1, nil + } + + // Fast path for using a underlying rand.Rand directly. + if m.rng != nil { + // Range: [1, m.inc) + return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil + } + + // bitLen is the maximum bit length needed to encode a value < m.inc. + bitLen := bits.Len64(m.inc) + + // byteLen is the maximum byte length needed to encode a value < m.inc. + byteLen := uint(bitLen+7) / 8 + + // msbitLen is the number of bits in the most significant byte of m.inc-1. + msbitLen := uint(bitLen % 8) + if msbitLen == 0 { + msbitLen = 8 + } + + for inc == 0 || inc >= m.inc { + if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { + return 0, err + } + + // Clear bits in the first byte to increase the probability + // that the candidate is < m.inc. + m.rand[0] &= uint8(int(1< +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) +------------------- + +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- + +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) + diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f0027349e8302d3631136c94fbead02cdaa27386 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..62abb63f58de6d895e99e170bfe4d02ccba4a528 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,20 @@ +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover -race ./... + +.PHONY: cover +cover: + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6ef1d7c9d27492b6b70e8563832e968663428d5e --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,171 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.SetGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +#### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 0000000000000000000000000000000000000000..52e889582a85947c2da6d7fbb1f2538574ae361d --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,210 @@ +package ext + +import "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = stringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side of + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = stringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = stringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = ipv4Tag("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = stringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = stringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = stringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = boolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +type stringTagName string + +// Set adds a string tag to the `span` +func (tag stringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +type uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +type uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +type boolTagName string + +// Add adds a bool tag to the `span` +func (tag boolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +type ipv4Tag string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag ipv4Tag) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 0000000000000000000000000000000000000000..4f7066a925cd9e2e80e2430c0a2e1fb8361f6f9b --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,42 @@ +package opentracing + +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + +var ( + globalTracer = registeredTracer{NoopTracer{}, false} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = registeredTracer{tracer, true} +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer.tracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.tracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 0000000000000000000000000000000000000000..08c00c04e82ae304b41704c7d0074cb879034792 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,60 @@ +package opentracing + +import "context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + } + span := tracer.StartSpan(operationName, opts...) + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 0000000000000000000000000000000000000000..50feea341a732865402478bdd1f741bf3df8a4b6 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,269 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 0000000000000000000000000000000000000000..3832feb5ceb297f01e6fda2e2b13224dcea0aedc --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 0000000000000000000000000000000000000000..0d32f692c4104649aa3f5b9b40b0d59974fb2a95 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 0000000000000000000000000000000000000000..b0c275eb05e4d5fb457f9fc723d83e502f5d0979 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Set(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 0000000000000000000000000000000000000000..0d3fb53418382d1c05b27ed16ce540b469303a9c --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,189 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 0000000000000000000000000000000000000000..715f0cedfb60ef71a4fea034e947fe2d50bf5c45 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,304 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see https://godoc.org/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e06d2081865a766a8668acc12878f98b27fc9ea0 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/NOTICE b/vendor/github.com/prometheus-operator/prometheus-operator/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..e520005cdda30b725b5534ce120ee905fd40b55a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2015 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e06d2081865a766a8668acc12878f98b27fc9ea0 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.mod b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..04fc053a6dfb361e63180b3c196b13af556fa1c9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.mod @@ -0,0 +1,8 @@ +module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring + +go 1.14 + +require ( + k8s.io/api v0.18.3 + k8s.io/apimachinery v0.18.3 +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.sum b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..34eb6e199b2f445465a66f6dfa0b22c07bed8224 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/go.sum @@ -0,0 +1,109 @@ +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go new file mode 100644 index 0000000000000000000000000000000000000000..a9914fb1a86d07047eb2dc3906c097d8eb90647b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go @@ -0,0 +1,19 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +const ( + GroupName = "monitoring.coreos.com" +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..64c47252730aa4e4f72ff4a5ab362761f7bd6e7e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2017 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=monitoring.coreos.com + +package v1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..37786147abf5135481df1a7d7b10b72be5fd1a8d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/register.go @@ -0,0 +1,67 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" +) + +// SchemeGroupVersion is the group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Prometheus{}, + &PrometheusList{}, + &ServiceMonitor{}, + &ServiceMonitorList{}, + &PodMonitor{}, + &PodMonitorList{}, + &Probe{}, + &ProbeList{}, + &Alertmanager{}, + &AlertmanagerList{}, + &PrometheusRule{}, + &PrometheusRuleList{}, + &ThanosRuler{}, + &ThanosRulerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go new file mode 100644 index 0000000000000000000000000000000000000000..f649cfaa63da0faede5db4325c216a1fa799bc9d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -0,0 +1,213 @@ +// Copyright 2020 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + ThanosRulerKind = "ThanosRuler" + ThanosRulerName = "thanosrulers" + ThanosRulerKindKey = "thanosrulers" +) + +// ThanosRuler defines a ThanosRuler deployment. +// +genclient +// +k8s:openapi-gen=true +type ThanosRuler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the desired behavior of the ThanosRuler cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ThanosRulerSpec `json:"spec"` + // Most recent observed status of the ThanosRuler cluster. Read-only. Not + // included when requesting from the apiserver, only from the ThanosRuler + // Operator API itself. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Status *ThanosRulerStatus `json:"status,omitempty"` +} + +// ThanosRulerList is a list of ThanosRulers. +// +k8s:openapi-gen=true +type ThanosRulerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + Items []*ThanosRuler `json:"items"` +} + +// ThanosRulerSpec is a specification of the desired behavior of the ThanosRuler. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerSpec struct { + // PodMetadata contains Labels and Annotations gets propagated to the thanos ruler pods. + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + // Thanos container image URL. + Image string `json:"image,omitempty"` + // An optional list of references to secrets in the same namespace + // to use for pulling thanos images from registries + // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // When a ThanosRuler deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + Paused bool `json:"paused,omitempty"` + // Number of thanos ruler instances to deploy. + Replicas *int32 `json:"replicas,omitempty"` + // Define which Nodes the Pods are scheduled on. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Resources defines the resource requirements for single Pods. + // If not provided, no requests/limits will be set + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // If specified, the pod's scheduling constraints. + Affinity *v1.Affinity `json:"affinity,omitempty"` + // If specified, the pod's tolerations. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // Priority class assigned to the Pods + PriorityClassName string `json:"priorityClassName,omitempty"` + // ServiceAccountName is the name of the ServiceAccount to use to run the + // Thanos Ruler Pods. + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // Storage spec to specify how storage shall be used. + Storage *StorageSpec `json:"storage,omitempty"` + // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will + // be appended to other volumes that are generated as a result of StorageSpec objects. + Volumes []v1.Volume `json:"volumes,omitempty"` + // ObjectStorageConfig configures object storage in Thanos. + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // ListenLocal makes the Thanos ruler listen on loopback, so that it + // does not bind against the Pod IP. + ListenLocal bool `json:"listenLocal,omitempty"` + // QueryEndpoints defines Thanos querier endpoints from which to query metrics. + // Maps to the --query flag of thanos ruler. + QueryEndpoints []string `json:"queryEndpoints,omitempty"` + // Define configuration for connecting to thanos query instances. + // If this is defined, the QueryEndpoints field will be ignored. + // Maps to the `query.config` CLI argument. + // Only available with thanos v0.11.0 and higher. + QueryConfig *v1.SecretKeySelector `json:"queryConfig,omitempty"` + // Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, + // AlertManagersConfig should be used instead. Note: this field will be ignored + // if AlertManagersConfig is specified. + // Maps to the `alertmanagers.url` arg. + AlertManagersURL []string `json:"alertmanagersUrl,omitempty"` + // Define configuration for connecting to alertmanager. Only available with thanos v0.10.0 + // and higher. Maps to the `alertmanagers.config` arg. + AlertManagersConfig *v1.SecretKeySelector `json:"alertmanagersConfig,omitempty"` + // A label selector to select which PrometheusRules to mount for alerting and + // recording. + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // Namespaces to be selected for Rules discovery. If unspecified, only + // the same namespace as the ThanosRuler object is in is used. + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + // EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert + // and metric that is user created. The label value will always be the namespace of the object that is + // being created. + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // PrometheusRulesExcludedFromEnforce - list of Prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + // Log level for ThanosRuler to be configured with. + LogLevel string `json:"logLevel,omitempty"` + // Log format for ThanosRuler to be configured with. + LogFormat string `json:"logFormat,omitempty"` + // Port name used for the pods and governing service. + // This defaults to web + PortName string `json:"portName,omitempty"` + // Interval between consecutive evaluations. + EvaluationInterval string `json:"evaluationInterval,omitempty"` + // Time duration ThanosRuler shall retain data for. Default is '24h', + // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). + Retention string `json:"retention,omitempty"` + // Containers allows injecting additional containers or modifying operator generated + // containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or + // to change the behavior of an operator generated container. Containers described here modify + // an operator generated container if they share the same name and modifications are done via a + // strategic merge patch. The current container names are: `thanos-ruler` and `rules-configmap-reloader`. + // Overriding containers is entirely outside the scope of what the maintainers will support and by doing + // so, you accept that this behaviour may break at any time without notice. + Containers []v1.Container `json:"containers,omitempty"` + // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the ThanosRuler configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // Using initContainers for any use case other then secret fetching is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that this behaviour may break + // at any time without notice. + InitContainers []v1.Container `json:"initContainers,omitempty"` + // TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // Labels configure the external label pairs to ThanosRuler. If not provided, default replica label + // `thanos_ruler_replica` will be added as a label and be dropped in alerts. + Labels map[string]string `json:"labels,omitempty"` + // AlertDropLabels configure the label names which should be dropped in ThanosRuler alerts. + // If `labels` field is not provided, `thanos_ruler_replica` will be dropped in alerts by default. + AlertDropLabels []string `json:"alertDropLabels,omitempty"` + // The external URL the Thanos Ruler instances will be available under. This is + // necessary to generate correct URLs. This is necessary if Thanos Ruler is not + // served from root of a DNS name. + ExternalPrefix string `json:"externalPrefix,omitempty"` + // The route prefix ThanosRuler registers HTTP handlers for. This allows thanos UI to be served on a sub-path. + RoutePrefix string `json:"routePrefix,omitempty"` + // GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads + // recorded rule data. + // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. + // Maps to the '--grpc-server-tls-*' CLI args. + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + // The external Query URL the Thanos Ruler will set in the 'Source' field + // of all alerts. + // Maps to the '--alert.query-url' CLI arg. + AlertQueryURL string `json:"alertQueryUrl,omitempty"` +} + +// ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. Not +// included when requesting from the apiserver, only from the Prometheus +// Operator API itself. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type ThanosRulerStatus struct { + // Represents whether any actions on the underlying managed objects are + // being performed. Only delete actions will be performed. + Paused bool `json:"paused"` + // Total number of non-terminated pods targeted by this ThanosRuler deployment + // (their labels match the selector). + Replicas int32 `json:"replicas"` + // Total number of non-terminated pods targeted by this ThanosRuler deployment + // that have the desired version spec. + UpdatedReplicas int32 `json:"updatedReplicas"` + // Total number of available pods (ready for at least minReadySeconds) + // targeted by this ThanosRuler deployment. + AvailableReplicas int32 `json:"availableReplicas"` + // Total number of unavailable pods targeted by this ThanosRuler deployment. + UnavailableReplicas int32 `json:"unavailableReplicas"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRuler) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ThanosRulerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..1d537f2c398afd07630841da6e28f194559b10d1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -0,0 +1,1321 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + Version = "v1" + + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + PrometheusKindKey = "prometheus" + + AlertmanagersKind = "Alertmanager" + AlertmanagerName = "alertmanagers" + AlertManagerKindKey = "alertmanager" + + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + ServiceMonitorKindKey = "servicemonitor" + + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + PodMonitorKindKey = "podmonitor" + + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + PrometheusRuleKindKey = "prometheusrule" + + ProbesKind = "Probe" + ProbeName = "probes" + ProbeKindKey = "probe" +) + +// Prometheus defines a Prometheus deployment. +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Prometheus" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The desired replicas number of Prometheuses" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type Prometheus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the desired behavior of the Prometheus cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec PrometheusSpec `json:"spec"` + // Most recent observed status of the Prometheus cluster. Read-only. Not + // included when requesting from the apiserver, only from the Prometheus + // Operator API itself. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Status *PrometheusStatus `json:"status,omitempty"` +} + +// PrometheusList is a list of Prometheuses. +// +k8s:openapi-gen=true +type PrometheusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Prometheuses + Items []*Prometheus `json:"items"` +} + +// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusSpec struct { + // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + // ServiceMonitors to be selected for target discovery. *Deprecated:* if + // neither this nor podMonitorSelector are specified, configuration is + // unmanaged. + ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` + // Namespaces to be selected for ServiceMonitor discovery. If nil, only + // check own namespace. + ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` + // *Experimental* PodMonitors to be selected for target discovery. + // *Deprecated:* if neither this nor serviceMonitorSelector are specified, + // configuration is unmanaged. + PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` + // Namespaces to be selected for PodMonitor discovery. If nil, only + // check own namespace. + PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` + // *Experimental* Probes to be selected for target discovery. + ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` + // *Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace. + ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` + // Version of Prometheus to be deployed. + Version string `json:"version,omitempty"` + // Tag of Prometheus container image to be deployed. Defaults to the value of `version`. + // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. + Tag string `json:"tag,omitempty"` + // SHA of Prometheus container image to be deployed. Defaults to the value of `version`. + // Similar to a tag, but the SHA explicitly deploys an immutable container image. + // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. + SHA string `json:"sha,omitempty"` + // When a Prometheus deployment is paused, no actions except for deletion + // will be performed on the underlying objects. + Paused bool `json:"paused,omitempty"` + // Image if specified has precedence over baseImage, tag and sha + // combinations. Specifying the version is still necessary to ensure the + // Prometheus Operator knows what version of Prometheus is being + // configured. + Image *string `json:"image,omitempty"` + // Base image to use for a Prometheus deployment. + // Deprecated: use 'image' instead + BaseImage string `json:"baseImage,omitempty"` + // An optional list of references to secrets in the same namespace + // to use for pulling prometheus and alertmanager images from registries + // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Number of instances to deploy for a Prometheus deployment. + Replicas *int32 `json:"replicas,omitempty"` + // Name of Prometheus external label used to denote replica name. + // Defaults to the value of `prometheus_replica`. External label will + // _not_ be added when value is set to empty string (`""`). + ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` + // Name of Prometheus external label used to denote Prometheus instance + // name. Defaults to the value of `prometheus`. External label will + // _not_ be added when value is set to empty string (`""`). + PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` + // Time duration Prometheus shall retain data for. Default is '24h', + // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). + Retention string `json:"retention,omitempty"` + // Maximum amount of disk space used by blocks. + RetentionSize string `json:"retentionSize,omitempty"` + // Disable prometheus compaction. + DisableCompaction bool `json:"disableCompaction,omitempty"` + // Enable compression of the write-ahead log using Snappy. This flag is + // only available in versions of Prometheus >= 2.11.0. + WALCompression *bool `json:"walCompression,omitempty"` + // Log level for Prometheus to be configured with. + LogLevel string `json:"logLevel,omitempty"` + // Log format for Prometheus to be configured with. + LogFormat string `json:"logFormat,omitempty"` + // Interval between consecutive scrapes. + ScrapeInterval string `json:"scrapeInterval,omitempty"` + // Number of seconds to wait for target to respond before erroring. + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` + // Interval between consecutive evaluations. + EvaluationInterval string `json:"evaluationInterval,omitempty"` + // /--rules.*/ command-line arguments. + Rules Rules `json:"rules,omitempty"` + // The labels to add to any time series or alerts when communicating with + // external systems (federation, remote storage, Alertmanager). + ExternalLabels map[string]string `json:"externalLabels,omitempty"` + // Enable access to prometheus web admin API. Defaults to the value of `false`. + // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, + // shutdown Prometheus, and more. Enabling this should be done with care and the + // user is advised to add additional authentication authorization via a proxy to + // ensure only clients authorized to perform these actions can do so. + // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` + // The external URL the Prometheus instances will be available under. This is + // necessary to generate correct URLs. This is necessary if Prometheus is not + // served from root of a DNS name. + ExternalURL string `json:"externalUrl,omitempty"` + // The route prefix Prometheus registers HTTP handlers for. This is useful, + // if using ExternalURL and a proxy is rewriting HTTP routes of a request, + // and the actual ExternalURL is still true, but the server serves requests + // under a different route prefix. For example for use with `kubectl proxy`. + RoutePrefix string `json:"routePrefix,omitempty"` + // QuerySpec defines the query command line flags when starting Prometheus. + Query *QuerySpec `json:"query,omitempty"` + // Storage spec to specify how storage shall be used. + Storage *StorageSpec `json:"storage,omitempty"` + // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will + // be appended to other volumes that are generated as a result of StorageSpec objects. + Volumes []v1.Volume `json:"volumes,omitempty"` + // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the prometheus container, + // that are generated as a result of StorageSpec objects. + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // A selector to select which PrometheusRules to mount for loading alerting/recording + // rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus + // Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom + // resources selected by RuleSelector. Make sure it does not match any config + // maps that you do not want to be migrated. + RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` + // Namespaces to be selected for PrometheusRules discovery. If unspecified, only + // the same namespace as the Prometheus object is in is used. + RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` + // Define details regarding alerting. + Alerting *AlertingSpec `json:"alerting,omitempty"` + // Define resources requests and limits for single Pods. + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // Define which Nodes the Pods are scheduled on. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // ServiceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // Secrets is a list of Secrets in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // The Secrets are mounted into /etc/prometheus/secrets/. + Secrets []string `json:"secrets,omitempty"` + // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus + // object, which shall be mounted into the Prometheus Pods. + // The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ConfigMaps []string `json:"configMaps,omitempty"` + // If specified, the pod's scheduling constraints. + Affinity *v1.Affinity `json:"affinity,omitempty"` + // If specified, the pod's tolerations. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. + RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + // If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. + RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // ListenLocal makes the Prometheus server listen on loopback, so that it + // does not bind against the Pod IP. + ListenLocal bool `json:"listenLocal,omitempty"` + // Containers allows injecting additional containers or modifying operator generated + // containers. This can be used to allow adding an authentication proxy to a Prometheus pod or + // to change the behavior of an operator generated container. Containers described here modify + // an operator generated container if they share the same name and modifications are done via a + // strategic merge patch. The current container names are: `prometheus`, + // `prometheus-config-reloader`, `rules-configmap-reloader`, and `thanos-sidecar`. Overriding + // containers is entirely outside the scope of what the maintainers will support and by doing + // so, you accept that this behaviour may break at any time without notice. + Containers []v1.Container `json:"containers,omitempty"` + // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the Prometheus configuration from external sources. Any errors + // during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // Using initContainers for any use case other then secret fetching is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that this behaviour may break + // at any time without notice. + InitContainers []v1.Container `json:"initContainers,omitempty"` + // AdditionalScrapeConfigs allows specifying a key of a Secret containing + // additional Prometheus scrape configurations. Scrape configurations + // specified are appended to the configurations generated by the Prometheus + // Operator. Job configurations specified must have the form as specified + // in the official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + // As scrape configs are appended, the user is responsible to make sure it + // is valid. Note that using this feature may expose the possibility to + // break upgrades of Prometheus. It is advised to review Prometheus release + // notes to ensure that no incompatible scrape configs are going to break + // Prometheus after the upgrade. + AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` + // AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing + // additional Prometheus alert relabel configurations. Alert relabel configurations + // specified are appended to the configurations generated by the Prometheus + // Operator. Alert relabel configurations specified must have the form as specified + // in the official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + // As alert relabel configs are appended, the user is responsible to make sure it + // is valid. Note that using this feature may expose the possibility to + // break upgrades of Prometheus. It is advised to review Prometheus release + // notes to ensure that no incompatible alert relabel configs are going to break + // Prometheus after the upgrade. + AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` + // AdditionalAlertManagerConfigs allows specifying a key of a Secret containing + // additional Prometheus AlertManager configurations. AlertManager configurations + // specified are appended to the configurations generated by the Prometheus + // Operator. Job configurations specified must have the form as specified + // in the official Prometheus documentation: + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. + // As AlertManager configs are appended, the user is responsible to make sure it + // is valid. Note that using this feature may expose the possibility to + // break upgrades of Prometheus. It is advised to review Prometheus release + // notes to ensure that no incompatible AlertManager configs are going to break + // Prometheus after the upgrade. + AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` + // APIServerConfig allows specifying a host and auth methods to access apiserver. + // If left empty, Prometheus is assumed to run inside of the cluster + // and will discover API servers automatically and use the pod's CA certificate + // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` + // Thanos configuration allows configuring various aspects of a Prometheus + // server in a Thanos environment. + // + // This section is experimental, it may change significantly without + // deprecation notice in any release. + // + // This is experimental and may change significantly without backward + // compatibility in any release. + Thanos *ThanosSpec `json:"thanos,omitempty"` + // Priority class assigned to the Pods + PriorityClassName string `json:"priorityClassName,omitempty"` + // Port name used for the pods and governing service. + // This defaults to web + PortName string `json:"portName,omitempty"` + // ArbitraryFSAccessThroughSMs configures whether configuration + // based on a service monitor can access arbitrary files on the file system + // of the Prometheus container e.g. bearer token files. + ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` + // OverrideHonorLabels if set to true overrides all user configured honor_labels. + // If HonorLabels is set in ServiceMonitor or PodMonitor to true, this overrides honor_labels to false. + OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` + // OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` + // IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from + // the podmonitor and servicemonitor configs, and they will only discover endpoints + // within their current namespace. Defaults to false. + IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` + // EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert + // and metric that is user created. The label value will always be the namespace of the object that is + // being created. + EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` + // QueryLogFile specifies the file to which PromQL queries are logged. + // Note that this location must be writable, and can be persisted using an attached volume. + // Alternatively, the location can be set to a stdout location such as `/dev/stdout` to log + // querie information to the default Prometheus log stream. + // This is only available in versions of Prometheus >= 2.16.0. + // For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + QueryLogFile string `json:"queryLogFile,omitempty"` + // EnforcedSampleLimit defines global limit on number of scraped samples + // that will be accepted. This overrides any SampleLimit set per + // ServiceMonitor or/and PodMonitor. It is meant to be used by admins to + // enforce the SampleLimit to keep overall number of samples/series under + // the desired limit. + // Note that if SampleLimit is lower that value will be taken instead. + EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` + // AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. + // This is still experimental in Prometheus so it may change in any upcoming release. + AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` +} + +// PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces +// to be ignored while enforcing namespace label for alerts and metrics. +type PrometheusRuleExcludeConfig struct { + // RuleNamespace - namespace of excluded rule + RuleNamespace string `json:"ruleNamespace"` + // RuleNamespace - name of excluded rule + RuleName string `json:"ruleName"` +} + +// ArbitraryFSAccessThroughSMsConfig enables users to configure, whether +// a service monitor selected by the Prometheus instance is allowed to use +// arbitrary files on the file system of the Prometheus container. This is the case +// when e.g. a service monitor specifies a BearerTokenFile in an endpoint. A +// malicious user could create a service monitor selecting arbitrary secret files +// in the Prometheus container. Those secrets would then be sent with a scrape +// request by Prometheus to a malicious target. Denying the above would prevent the +// attack, users can instead use the BearerTokenSecret field. +type ArbitraryFSAccessThroughSMsConfig struct { + Deny bool `json:"deny,omitempty"` +} + +// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not +// included when requesting from the apiserver, only from the Prometheus +// Operator API itself. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type PrometheusStatus struct { + // Represents whether any actions on the underlaying managed objects are + // being performed. Only delete actions will be performed. + Paused bool `json:"paused"` + // Total number of non-terminated pods targeted by this Prometheus deployment + // (their labels match the selector). + Replicas int32 `json:"replicas"` + // Total number of non-terminated pods targeted by this Prometheus deployment + // that have the desired version spec. + UpdatedReplicas int32 `json:"updatedReplicas"` + // Total number of available pods (ready for at least minReadySeconds) + // targeted by this Prometheus deployment. + AvailableReplicas int32 `json:"availableReplicas"` + // Total number of unavailable pods targeted by this Prometheus deployment. + UnavailableReplicas int32 `json:"unavailableReplicas"` +} + +// AlertingSpec defines parameters for alerting configuration of Prometheus servers. +// +k8s:openapi-gen=true +type AlertingSpec struct { + // AlertmanagerEndpoints Prometheus should fire alerts against. + Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` +} + +// StorageSpec defines the configured storage for a group Prometheus servers. +// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. +// +k8s:openapi-gen=true +type StorageSpec struct { + // Deprecated: subPath usage will be disabled by default in a future release, this option will become unnecessary. + // DisableMountSubPath allows to remove any subPath usage in volume mounts. + DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` + // EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More + // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // A PVC spec to be used by the Prometheus StatefulSets. + VolumeClaimTemplate EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim. +// It contains TypeMeta and a reduced ObjectMeta. +type EmbeddedPersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + + // EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + EmbeddedObjectMetadata `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EmbeddedObjectMetadata contains a subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta +// Only fields which are relevant to embedded resources are included. +type EmbeddedObjectMetadata struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` +} + +// QuerySpec defines the query command line flags when starting Prometheus. +// +k8s:openapi-gen=true +type QuerySpec struct { + // The delta difference allowed for retrieving metrics during expression evaluations. + LookbackDelta *string `json:"lookbackDelta,omitempty"` + // Number of concurrent queries that can be run at once. + MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` + // Maximum number of samples a single query can load into memory. Note that queries will fail if they would load more samples than this into memory, so this also limits the number of samples a query can return. + MaxSamples *int32 `json:"maxSamples,omitempty"` + // Maximum time a query may take before being aborted. + Timeout *string `json:"timeout,omitempty"` +} + +// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment. +// +k8s:openapi-gen=true +type ThanosSpec struct { + // Image if specified has precedence over baseImage, tag and sha + // combinations. Specifying the version is still necessary to ensure the + // Prometheus Operator knows what version of Thanos is being + // configured. + Image *string `json:"image,omitempty"` + // Version describes the version of Thanos to use. + Version *string `json:"version,omitempty"` + // Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. + // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. + Tag *string `json:"tag,omitempty"` + // SHA of Thanos container image to be deployed. Defaults to the value of `version`. + // Similar to a tag, but the SHA explicitly deploys an immutable container image. + // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. + SHA *string `json:"sha,omitempty"` + // Thanos base image if other than default. + // Deprecated: use 'image' instead + BaseImage *string `json:"baseImage,omitempty"` + // Resources defines the resource requirements for the Thanos sidecar. + // If not provided, no requests/limits will be set + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // ObjectStorageConfig configures object storage in Thanos. + ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` + // ListenLocal makes the Thanos sidecar listen on loopback, so that it + // does not bind against the Pod IP. + ListenLocal bool `json:"listenLocal,omitempty"` + // TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. + TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` + // GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads + // recorded rule data. + // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. + // Maps to the '--grpc-server-tls-*' CLI args. + GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` + // LogLevel for Thanos sidecar to be configured with. + LogLevel string `json:"logLevel,omitempty"` + // LogFormat for Thanos sidecar to be configured with. + LogFormat string `json:"logFormat,omitempty"` + // MinTime for Thanos sidecar to be configured with. Option can be a constant time in RFC3339 format or time duration relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y. + MinTime string `json:"minTime,omitempty"` +} + +// RemoteWriteSpec defines the remote_write configuration for prometheus. +// +k8s:openapi-gen=true +type RemoteWriteSpec struct { + // The URL of the endpoint to send samples to. + URL string `json:"url"` + // The name of the remote write queue, must be unique if specified. The + // name is used in metrics and logging in order to differentiate queues. + // Only valid in Prometheus versions 2.15.0 and newer. + Name string `json:"name,omitempty"` + // Timeout for requests to the remote write endpoint. + RemoteTimeout string `json:"remoteTimeout,omitempty"` + // The list of remote write relabel configurations. + WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` + //BasicAuth for the URL. + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // File to read bearer token for remote write. + BearerToken string `json:"bearerToken,omitempty"` + // File to read bearer token for remote write. + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // TLS Config to use for remote write. + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + // Optional ProxyURL + ProxyURL string `json:"proxyUrl,omitempty"` + // QueueConfig allows tuning of the remote write queue parameters. + QueueConfig *QueueConfig `json:"queueConfig,omitempty"` +} + +// QueueConfig allows the tuning of remote_write queue_config parameters. This object +// is referenced in the RemoteWriteSpec object. +// +k8s:openapi-gen=true +type QueueConfig struct { + // Capacity is the number of samples to buffer per shard before we start dropping them. + Capacity int `json:"capacity,omitempty"` + // MinShards is the minimum number of shards, i.e. amount of concurrency. + MinShards int `json:"minShards,omitempty"` + // MaxShards is the maximum number of shards, i.e. amount of concurrency. + MaxShards int `json:"maxShards,omitempty"` + // MaxSamplesPerSend is the maximum number of samples per send. + MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` + // BatchSendDeadline is the maximum time a sample will wait in buffer. + BatchSendDeadline string `json:"batchSendDeadline,omitempty"` + // MaxRetries is the maximum number of times to retry a batch on recoverable errors. + MaxRetries int `json:"maxRetries,omitempty"` + // MinBackoff is the initial retry delay. Gets doubled for every retry. + MinBackoff string `json:"minBackoff,omitempty"` + // MaxBackoff is the maximum retry delay. + MaxBackoff string `json:"maxBackoff,omitempty"` +} + +// RemoteReadSpec defines the remote_read configuration for prometheus. +// +k8s:openapi-gen=true +type RemoteReadSpec struct { + // The URL of the endpoint to send samples to. + URL string `json:"url"` + // The name of the remote read queue, must be unique if specified. The name + // is used in metrics and logging in order to differentiate read + // configurations. Only valid in Prometheus versions 2.15.0 and newer. + Name string `json:"name,omitempty"` + // An optional list of equality matchers which have to be present + // in a selector to query the remote read endpoint. + RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` + // Timeout for requests to the remote read endpoint. + RemoteTimeout string `json:"remoteTimeout,omitempty"` + // Whether reads should be made for queries for time ranges that + // the local storage should have complete data for. + ReadRecent bool `json:"readRecent,omitempty"` + // BasicAuth for the URL. + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // bearer token for remote read. + BearerToken string `json:"bearerToken,omitempty"` + // File to read bearer token for remote read. + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // TLS Config to use for remote read. + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + // Optional ProxyURL + ProxyURL string `json:"proxyUrl,omitempty"` +} + +// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. +// It defines ``-section of Prometheus configuration. +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs +// +k8s:openapi-gen=true +type RelabelConfig struct { + //The source labels select values from existing labels. Their content is concatenated + //using the configured separator and matched against the configured regular expression + //for the replace, keep, and drop actions. + SourceLabels []string `json:"sourceLabels,omitempty"` + //Separator placed between concatenated source label values. default is ';'. + Separator string `json:"separator,omitempty"` + //Label to which the resulting value is written in a replace action. + //It is mandatory for replace actions. Regex capture groups are available. + TargetLabel string `json:"targetLabel,omitempty"` + //Regular expression against which the extracted value is matched. Default is '(.*)' + Regex string `json:"regex,omitempty"` + // Modulus to take of the hash of the source label values. + Modulus uint64 `json:"modulus,omitempty"` + //Replacement value against which a regex replace is performed if the + //regular expression matches. Regex capture groups are available. Default is '$1' + Replacement string `json:"replacement,omitempty"` + // Action to perform based on regex matching. Default is 'replace' + Action string `json:"action,omitempty"` +} + +// APIServerConfig defines a host and auth methods to access apiserver. +// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config +// +k8s:openapi-gen=true +type APIServerConfig struct { + // Host of apiserver. + // A valid string consisting of a hostname or IP followed by an optional port number + Host string `json:"host"` + // BasicAuth allow an endpoint to authenticate over basic authentication + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // Bearer token for accessing apiserver. + BearerToken string `json:"bearerToken,omitempty"` + // File to read bearer token for accessing apiserver. + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // TLS Config to use for accessing apiserver. + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// AlertmanagerEndpoints defines a selection of a single Endpoints object +// containing alertmanager IPs to fire alerts against. +// +k8s:openapi-gen=true +type AlertmanagerEndpoints struct { + // Namespace of Endpoints object. + Namespace string `json:"namespace"` + // Name of Endpoints object in Namespace. + Name string `json:"name"` + // Port the Alertmanager API is exposed on. + Port intstr.IntOrString `json:"port"` + // Scheme to use when firing alerts. + Scheme string `json:"scheme,omitempty"` + // Prefix for the HTTP path alerts are pushed to. + PathPrefix string `json:"pathPrefix,omitempty"` + // TLS Config to use for alertmanager connection. + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + // BearerTokenFile to read from filesystem to use when authenticating to + // Alertmanager. + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // Version of the Alertmanager API that Prometheus uses to send alerts. It + // can be "v1" or "v2". + APIVersion string `json:"apiVersion,omitempty"` + // Timeout is a per-target Alertmanager timeout when pushing alerts. + Timeout *string `json:"timeout,omitempty"` +} + +// ServiceMonitor defines monitoring for a set of services. +// +genclient +// +k8s:openapi-gen=true +type ServiceMonitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of desired Service selection for target discovery by + // Prometheus. + Spec ServiceMonitorSpec `json:"spec"` +} + +// ServiceMonitorSpec contains specification parameters for a ServiceMonitor. +// +k8s:openapi-gen=true +type ServiceMonitorSpec struct { + // The label to use to retrieve the job name from. + JobLabel string `json:"jobLabel,omitempty"` + // TargetLabels transfers labels on the Kubernetes Service onto the target. + TargetLabels []string `json:"targetLabels,omitempty"` + // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + // A list of endpoints allowed as part of this ServiceMonitor. + Endpoints []Endpoint `json:"endpoints"` + // Selector to select Endpoints objects. + Selector metav1.LabelSelector `json:"selector"` + // Selector to select which namespaces the Endpoints objects are discovered from. + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + SampleLimit uint64 `json:"sampleLimit,omitempty"` +} + +// Endpoint defines a scrapeable endpoint serving Prometheus metrics. +// +k8s:openapi-gen=true +type Endpoint struct { + // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. + Port string `json:"port,omitempty"` + // Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port. + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + // HTTP path to scrape for metrics. + Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. + Scheme string `json:"scheme,omitempty"` + // Optional HTTP URL parameters + Params map[string][]string `json:"params,omitempty"` + // Interval at which metrics should be scraped + Interval string `json:"interval,omitempty"` + // Timeout after which the scrape is ended + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` + // TLS configuration to use when scraping the endpoint + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + // File to read bearer token for scraping targets. + BearerTokenFile string `json:"bearerTokenFile,omitempty"` + // Secret to mount to read bearer token for scraping targets. The secret + // needs to be in the same namespace as the service monitor and accessible by + // the Prometheus Operator. + BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` + // HonorLabels chooses the metric's labels on collisions with target labels. + HonorLabels bool `json:"honorLabels,omitempty"` + // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + // BasicAuth allow an endpoint to authenticate over basic authentication + // More info: https://prometheus.io/docs/operating/configuration/#endpoints + BasicAuth *BasicAuth `json:"basicAuth,omitempty"` + // MetricRelabelConfigs to apply to samples before ingestion. + MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` + // RelabelConfigs to apply to samples before scraping. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` + // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + ProxyURL *string `json:"proxyUrl,omitempty"` +} + +// PodMonitor defines monitoring for a set of pods. +// +genclient +// +k8s:openapi-gen=true +type PodMonitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of desired Pod selection for target discovery by Prometheus. + Spec PodMonitorSpec `json:"spec"` +} + +// PodMonitorSpec contains specification parameters for a PodMonitor. +// +k8s:openapi-gen=true +type PodMonitorSpec struct { + // The label to use to retrieve the job name from. + JobLabel string `json:"jobLabel,omitempty"` + // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + PodTargetLabels []string `json:"podTargetLabels,omitempty"` + // A list of endpoints allowed as part of this PodMonitor. + PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` + // Selector to select Pod objects. + Selector metav1.LabelSelector `json:"selector"` + // Selector to select which namespaces the Endpoints objects are discovered from. + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + SampleLimit uint64 `json:"sampleLimit,omitempty"` +} + +// PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. +// +k8s:openapi-gen=true +type PodMetricsEndpoint struct { + // Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. + Port string `json:"port,omitempty"` + // Deprecated: Use 'port' instead. + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + // HTTP path to scrape for metrics. + Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. + Scheme string `json:"scheme,omitempty"` + // Optional HTTP URL parameters + Params map[string][]string `json:"params,omitempty"` + // Interval at which metrics should be scraped + Interval string `json:"interval,omitempty"` + // Timeout after which the scrape is ended + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` + // HonorLabels chooses the metric's labels on collisions with target labels. + HonorLabels bool `json:"honorLabels,omitempty"` + // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + // MetricRelabelConfigs to apply to samples before ingestion. + MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` + // RelabelConfigs to apply to samples before ingestion. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` + // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + ProxyURL *string `json:"proxyUrl,omitempty"` +} + +// Probe defines monitoring for a set of static targets or ingresses. +// +genclient +// +k8s:openapi-gen=true +type Probe struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of desired Ingress selection for target discovery by Prometheus. + Spec ProbeSpec `json:"spec"` +} + +// ProbeSpec contains specification parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeSpec struct { + // The job name assigned to scraped metrics by default. + JobName string `json:"jobName,omitempty"` + // Specification for the prober to use for probing targets. + // The prober.URL parameter is required. Targets cannot be probed if left empty. + ProberSpec ProberSpec `json:"prober,omitempty"` + // The module to use for probing specifying how to probe the target. + // Example module configuring in the blackbox exporter: + // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml + Module string `json:"module,omitempty"` + // Targets defines a set of static and/or dynamically discovered targets to be probed using the prober. + Targets ProbeTargets `json:"targets,omitempty"` + // Interval at which targets are probed using the configured prober. + // If not specified Prometheus' global scrape interval is used. + Interval string `json:"interval,omitempty"` + // Timeout for scraping metrics from the Prometheus exporter. + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` +} + +// ProbeTargets defines a set of static and dynamically discovered targets for the prober. +// +k8s:openapi-gen=true +type ProbeTargets struct { + // StaticConfig defines static targets which are considers for probing. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config. + StaticConfig *ProbeTargetStaticConfig `json:"staticConfig,omitempty"` + // Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing. + Ingress *ProbeTargetIngress `json:"ingress,omitempty"` +} + +// ProbeTargetStaticConfig defines the set of static targets considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetStaticConfig struct { + // Targets is a list of URLs to probe using the configured prober. + Targets []string `json:"static,omitempty"` + // Labels assigned to all metrics scraped from the targets. + Labels map[string]string `json:"labels,omitempty"` +} + +// ProbeTargetIngress defines the set of Ingress objects considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetIngress struct { + // Select Ingress objects by labels. + Selector metav1.LabelSelector `json:"selector,omitempty"` + // Select Ingress objects by namespace. + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // RelabelConfigs to apply to samples before ingestion. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProberSpec contains specification parameters for the Prober used for probing. +// +k8s:openapi-gen=true +type ProberSpec struct { + // Mandatory URL of the prober. + URL string `json:"url"` + // HTTP scheme to use for scraping. + // Defaults to `http`. + Scheme string `json:"scheme,omitempty"` + // Path to collect metrics from. + // Defaults to `/probe`. + Path string `json:"path,omitempty"` +} + +// BasicAuth allow an endpoint to authenticate over basic authentication +// More info: https://prometheus.io/docs/operating/configuration/#endpoints +// +k8s:openapi-gen=true +type BasicAuth struct { + // The secret in the service monitor namespace that contains the username + // for authentication. + Username v1.SecretKeySelector `json:"username,omitempty"` + // The secret in the service monitor namespace that contains the password + // for authentication. + Password v1.SecretKeySelector `json:"password,omitempty"` +} + +// SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. +type SecretOrConfigMap struct { + // Secret containing data to use for the targets. + Secret *v1.SecretKeySelector `json:"secret,omitempty"` + // ConfigMap containing data to use for the targets. + ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` +} + +// SecretOrConfigMapValidationError is returned by SecretOrConfigMap.Validate() +// on semantically invalid configurations. +// +k8s:openapi-gen=false +type SecretOrConfigMapValidationError struct { + err string +} + +func (e *SecretOrConfigMapValidationError) Error() string { + return e.err +} + +// Validate semantically validates the given TLSConfig. +func (c *SecretOrConfigMap) Validate() error { + if c.Secret != nil && c.ConfigMap != nil { + return &SecretOrConfigMapValidationError{"SecretOrConfigMap can not specify both Secret and ConfigMap"} + } + + return nil +} + +// TLSConfig specifies TLS configuration parameters. +// +k8s:openapi-gen=true +type TLSConfig struct { + // Path to the CA cert in the Prometheus container to use for the targets. + CAFile string `json:"caFile,omitempty"` + // Stuct containing the CA cert to use for the targets. + CA SecretOrConfigMap `json:"ca,omitempty"` + + // Path to the client cert file in the Prometheus container for the targets. + CertFile string `json:"certFile,omitempty"` + // Struct containing the client cert file for the targets. + Cert SecretOrConfigMap `json:"cert,omitempty"` + + // Path to the client key file in the Prometheus container for the targets. + KeyFile string `json:"keyFile,omitempty"` + // Secret containing the client key file for the targets. + KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` + + // Used to verify the hostname for the targets. + ServerName string `json:"serverName,omitempty"` + // Disable target certificate validation. + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` +} + +// TLSConfigValidationError is returned by TLSConfig.Validate() on semantically +// invalid tls configurations. +// +k8s:openapi-gen=false +type TLSConfigValidationError struct { + err string +} + +func (e *TLSConfigValidationError) Error() string { + return e.err +} + +// Validate semantically validates the given TLSConfig. +func (c *TLSConfig) Validate() error { + if c.CA != (SecretOrConfigMap{}) { + if c.CAFile != "" { + return &TLSConfigValidationError{"tls config can not both specify CAFile and CA"} + } + if err := c.CA.Validate(); err != nil { + return err + } + } + + if c.Cert != (SecretOrConfigMap{}) { + if c.CertFile != "" { + return &TLSConfigValidationError{"tls config can not both specify CertFile and Cert"} + } + if err := c.Cert.Validate(); err != nil { + return err + } + } + + if c.KeyFile != "" && c.KeySecret != nil { + return &TLSConfigValidationError{"tls config can not both specify KeyFile and KeySecret"} + } + + return nil +} + +// ServiceMonitorList is a list of ServiceMonitors. +// +k8s:openapi-gen=true +type ServiceMonitorList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of ServiceMonitors + Items []*ServiceMonitor `json:"items"` +} + +// PodMonitorList is a list of PodMonitors. +// +k8s:openapi-gen=true +type PodMonitorList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of PodMonitors + Items []*PodMonitor `json:"items"` +} + +// ProbeList is a list of Probes. +// +k8s:openapi-gen=true +type ProbeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Probes + Items []*Probe `json:"items"` +} + +// PrometheusRuleList is a list of PrometheusRules. +// +k8s:openapi-gen=true +type PrometheusRuleList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Rules + Items []*PrometheusRule `json:"items"` +} + +// PrometheusRule defines alerting rules for a Prometheus instance +// +genclient +// +k8s:openapi-gen=true +type PrometheusRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of desired alerting rule definitions for Prometheus. + Spec PrometheusRuleSpec `json:"spec"` +} + +// PrometheusRuleSpec contains specification parameters for a Rule. +// +k8s:openapi-gen=true +type PrometheusRuleSpec struct { + // Content of Prometheus rule file + Groups []RuleGroup `json:"groups,omitempty"` +} + +// RuleGroup and Rule are copied instead of vendored because the +// upstream Prometheus struct definitions don't have json struct tags. + +// RuleGroup is a list of sequentially evaluated recording and alerting rules. +// Note: PartialResponseStrategy is only used by ThanosRuler and will +// be ignored by Prometheus instances. Valid values for this field are 'warn' +// or 'abort'. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response +// +k8s:openapi-gen=true +type RuleGroup struct { + Name string `json:"name"` + Interval string `json:"interval,omitempty"` + Rules []Rule `json:"rules"` + PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` +} + +// Rule describes an alerting or recording rule. +// +k8s:openapi-gen=true +type Rule struct { + Record string `json:"record,omitempty"` + Alert string `json:"alert,omitempty"` + Expr intstr.IntOrString `json:"expr"` + For string `json:"for,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// Alertmanager describes an Alertmanager cluster. +// +genclient +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Alertmanager" +// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The desired replicas number of Alertmanagers" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type Alertmanager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the desired behavior of the Alertmanager cluster. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec AlertmanagerSpec `json:"spec"` + // Most recent observed status of the Alertmanager cluster. Read-only. Not + // included when requesting from the apiserver, only from the Prometheus + // Operator API itself. More info: + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Status *AlertmanagerStatus `json:"status,omitempty"` +} + +// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type AlertmanagerSpec struct { + // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + // Image if specified has precedence over baseImage, tag and sha + // combinations. Specifying the version is still necessary to ensure the + // Prometheus Operator knows what version of Alertmanager is being + // configured. + Image *string `json:"image,omitempty"` + // Version the cluster should be on. + Version string `json:"version,omitempty"` + // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. + // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. + Tag string `json:"tag,omitempty"` + // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. + // Similar to a tag, but the SHA explicitly deploys an immutable container image. + // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. + SHA string `json:"sha,omitempty"` + // Base image that is used to deploy pods, without tag. + // Deprecated: use 'image' instead + BaseImage string `json:"baseImage,omitempty"` + // An optional list of references to secrets in the same namespace + // to use for pulling prometheus and alertmanager images from registries + // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Secrets is a list of Secrets in the same namespace as the Alertmanager + // object, which shall be mounted into the Alertmanager Pods. + // The Secrets are mounted into /etc/alertmanager/secrets/. + Secrets []string `json:"secrets,omitempty"` + // ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager + // object, which shall be mounted into the Alertmanager Pods. + // The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ConfigMaps []string `json:"configMaps,omitempty"` + // ConfigSecret is the name of a Kubernetes Secret in the same namespace as the + // Alertmanager object, which contains configuration for this Alertmanager + // instance. Defaults to 'alertmanager-' + // The secret is mounted into /etc/alertmanager/config. + ConfigSecret string `json:"configSecret,omitempty"` + // Log level for Alertmanager to be configured with. + LogLevel string `json:"logLevel,omitempty"` + // Log format for Alertmanager to be configured with. + LogFormat string `json:"logFormat,omitempty"` + // Size is the expected size of the alertmanager cluster. The controller will + // eventually make the size of the running cluster equal to the expected + // size. + Replicas *int32 `json:"replicas,omitempty"` + // Time duration Alertmanager shall retain data for. Default is '120h', + // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + Retention string `json:"retention,omitempty"` + // Storage is the definition of how storage will be used by the Alertmanager + // instances. + Storage *StorageSpec `json:"storage,omitempty"` + // Volumes allows configuration of additional volumes on the output StatefulSet definition. + // Volumes specified will be appended to other volumes that are generated as a result of + // StorageSpec objects. + Volumes []v1.Volume `json:"volumes,omitempty"` + // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. + // VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, + // that are generated as a result of StorageSpec objects. + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // The external URL the Alertmanager instances will be available under. This is + // necessary to generate correct URLs. This is necessary if Alertmanager is not + // served from root of a DNS name. + ExternalURL string `json:"externalUrl,omitempty"` + // The route prefix Alertmanager registers HTTP handlers for. This is useful, + // if using ExternalURL and a proxy is rewriting HTTP routes of a request, + // and the actual ExternalURL is still true, but the server serves requests + // under a different route prefix. For example for use with `kubectl proxy`. + RoutePrefix string `json:"routePrefix,omitempty"` + // If set to true all actions on the underlaying managed objects are not + // goint to be performed, except for delete actions. + Paused bool `json:"paused,omitempty"` + // Define which Nodes the Pods are scheduled on. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Define resources requests and limits for single Pods. + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // If specified, the pod's scheduling constraints. + Affinity *v1.Affinity `json:"affinity,omitempty"` + // If specified, the pod's tolerations. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. + // This defaults to the default PodSecurityContext. + SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // ServiceAccountName is the name of the ServiceAccount to use to run the + // Prometheus Pods. + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // ListenLocal makes the Alertmanager server listen on loopback, so that it + // does not bind against the Pod IP. Note this is only for the Alertmanager + // UI, not the gossip communication. + ListenLocal bool `json:"listenLocal,omitempty"` + // Containers allows injecting additional containers. This is meant to + // allow adding an authentication proxy to an Alertmanager pod. + Containers []v1.Container `json:"containers,omitempty"` + // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. + // fetch secrets for injection into the Alertmanager configuration from external sources. Any + // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // Using initContainers for any use case other then secret fetching is entirely outside the scope + // of what the maintainers will support and by doing so, you accept that this behaviour may break + // at any time without notice. + InitContainers []v1.Container `json:"initContainers,omitempty"` + // Priority class assigned to the Pods + PriorityClassName string `json:"priorityClassName,omitempty"` + // AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + AdditionalPeers []string `json:"additionalPeers,omitempty"` + // ClusterAdvertiseAddress is the explicit address to advertise in cluster. + // Needs to be provided for non RFC1918 [1] (public) addresses. + // [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` + // Port name used for the pods and governing service. + // This defaults to web + PortName string `json:"portName,omitempty"` + // ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + // Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` +} + +// AlertmanagerList is a list of Alertmanagers. +// +k8s:openapi-gen=true +type AlertmanagerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Alertmanagers + Items []Alertmanager `json:"items"` +} + +// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not +// included when requesting from the apiserver, only from the Prometheus +// Operator API itself. More info: +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +// +k8s:openapi-gen=true +type AlertmanagerStatus struct { + // Represents whether any actions on the underlaying managed objects are + // being performed. Only delete actions will be performed. + Paused bool `json:"paused"` + // Total number of non-terminated pods targeted by this Alertmanager + // cluster (their labels match the selector). + Replicas int32 `json:"replicas"` + // Total number of non-terminated pods targeted by this Alertmanager + // cluster that have the desired version spec. + UpdatedReplicas int32 `json:"updatedReplicas"` + // Total number of available pods (ready for at least minReadySeconds) + // targeted by this Alertmanager cluster. + AvailableReplicas int32 `json:"availableReplicas"` + // Total number of unavailable pods targeted by this Alertmanager cluster. + UnavailableReplicas int32 `json:"unavailableReplicas"` +} + +// NamespaceSelector is a selector for selecting either all namespaces or a +// list of namespaces. +// +k8s:openapi-gen=true +type NamespaceSelector struct { + // Boolean describing whether all namespaces are selected in contrast to a + // list restricting them. + Any bool `json:"any,omitempty"` + // List of namespace names. + MatchNames []string `json:"matchNames,omitempty"` + + // TODO(fabxc): this should embed metav1.LabelSelector eventually. + // Currently the selector is only used for namespaces which require more complex + // implementation to support label selections. +} + +// /--rules.*/ command-line arguments +// +k8s:openapi-gen=true +type Rules struct { + Alert RulesAlert `json:"alert,omitempty"` +} + +// /--rules.alert.*/ command-line arguments +// +k8s:openapi-gen=true +type RulesAlert struct { + // Max time to tolerate prometheus outage for restoring 'for' state of alert. + ForOutageTolerance string `json:"forOutageTolerance,omitempty"` + // Minimum duration between alert and restored 'for' state. + // This is maintained only for alerts with configured 'for' time greater than grace period. + ForGracePeriod string `json:"forGracePeriod,omitempty"` + // Minimum amount of time to wait before resending an alert to Alertmanager. + ResendDelay string `json:"resendDelay,omitempty"` +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Alertmanager) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *AlertmanagerList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Prometheus) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitor) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PodMonitorList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *Probe) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ProbeList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (f *PrometheusRule) DeepCopyObject() runtime.Object { + return f.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..23eebd38bed00ade6a540522b205d2e17f2589b3 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -0,0 +1,1786 @@ +// +build !ignore_autogenerated + +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerConfig) DeepCopyInto(out *APIServerConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerConfig. +func (in *APIServerConfig) DeepCopy() *APIServerConfig { + if in == nil { + return nil + } + out := new(APIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingSpec) DeepCopyInto(out *AlertingSpec) { + *out = *in + if in.Alertmanagers != nil { + in, out := &in.Alertmanagers, &out.Alertmanagers + *out = make([]AlertmanagerEndpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingSpec. +func (in *AlertingSpec) DeepCopy() *AlertingSpec { + if in == nil { + return nil + } + out := new(AlertingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alertmanager) DeepCopyInto(out *Alertmanager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(AlertmanagerStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alertmanager. +func (in *Alertmanager) DeepCopy() *Alertmanager { + if in == nil { + return nil + } + out := new(Alertmanager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { + *out = *in + out.Port = in.Port + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerEndpoints. +func (in *AlertmanagerEndpoints) DeepCopy() *AlertmanagerEndpoints { + if in == nil { + return nil + } + out := new(AlertmanagerEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerList) DeepCopyInto(out *AlertmanagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Alertmanager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerList. +func (in *AlertmanagerList) DeepCopy() *AlertmanagerList { + if in == nil { + return nil + } + out := new(AlertmanagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPeers != nil { + in, out := &in.AdditionalPeers, &out.AdditionalPeers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerSpec. +func (in *AlertmanagerSpec) DeepCopy() *AlertmanagerSpec { + if in == nil { + return nil + } + out := new(AlertmanagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerStatus) DeepCopyInto(out *AlertmanagerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerStatus. +func (in *AlertmanagerStatus) DeepCopy() *AlertmanagerStatus { + if in == nil { + return nil + } + out := new(AlertmanagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopyInto(out *ArbitraryFSAccessThroughSMsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArbitraryFSAccessThroughSMsConfig. +func (in *ArbitraryFSAccessThroughSMsConfig) DeepCopy() *ArbitraryFSAccessThroughSMsConfig { + if in == nil { + return nil + } + out := new(ArbitraryFSAccessThroughSMsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Password.DeepCopyInto(&out.Password) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. +func (in *BasicAuth) DeepCopy() *BasicAuth { + if in == nil { + return nil + } + out := new(BasicAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedObjectMetadata. +func (in *EmbeddedObjectMetadata) DeepCopy() *EmbeddedObjectMetadata { + if in == nil { + return nil + } + out := new(EmbeddedObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPersistentVolumeClaim) DeepCopyInto(out *EmbeddedPersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.EmbeddedObjectMetadata.DeepCopyInto(&out.EmbeddedObjectMetadata) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPersistentVolumeClaim. +func (in *EmbeddedPersistentVolumeClaim) DeepCopy() *EmbeddedPersistentVolumeClaim { + if in == nil { + return nil + } + out := new(EmbeddedPersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + in.BearerTokenSecret.DeepCopyInto(&out.BearerTokenSecret) + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } + if in.ProxyURL != nil { + in, out := &in.ProxyURL, &out.ProxyURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { + *out = *in + if in.MatchNames != nil { + in, out := &in.MatchNames, &out.MatchNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector. +func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { + if in == nil { + return nil + } + out := new(NamespaceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.HonorTimestamps != nil { + in, out := &in.HonorTimestamps, &out.HonorTimestamps + *out = new(bool) + **out = **in + } + if in.MetricRelabelConfigs != nil { + in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } + if in.ProxyURL != nil { + in, out := &in.ProxyURL, &out.ProxyURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsEndpoint. +func (in *PodMetricsEndpoint) DeepCopy() *PodMetricsEndpoint { + if in == nil { + return nil + } + out := new(PodMetricsEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitor) DeepCopyInto(out *PodMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitor. +func (in *PodMonitor) DeepCopy() *PodMonitor { + if in == nil { + return nil + } + out := new(PodMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorList) DeepCopyInto(out *PodMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*PodMonitor, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PodMonitor) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorList. +func (in *PodMonitorList) DeepCopy() *PodMonitorList { + if in == nil { + return nil + } + out := new(PodMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMonitorSpec) DeepCopyInto(out *PodMonitorSpec) { + *out = *in + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodMetricsEndpoints != nil { + in, out := &in.PodMetricsEndpoints, &out.PodMetricsEndpoints + *out = make([]PodMetricsEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMonitorSpec. +func (in *PodMonitorSpec) DeepCopy() *PodMonitorSpec { + if in == nil { + return nil + } + out := new(PodMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeList) DeepCopyInto(out *ProbeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*Probe, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeList. +func (in *ProbeList) DeepCopy() *ProbeList { + if in == nil { + return nil + } + out := new(ProbeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + out.ProberSpec = in.ProberSpec + in.Targets.DeepCopyInto(&out.Targets) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetIngress. +func (in *ProbeTargetIngress) DeepCopy() *ProbeTargetIngress { + if in == nil { + return nil + } + out := new(ProbeTargetIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetStaticConfig) DeepCopyInto(out *ProbeTargetStaticConfig) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetStaticConfig. +func (in *ProbeTargetStaticConfig) DeepCopy() *ProbeTargetStaticConfig { + if in == nil { + return nil + } + out := new(ProbeTargetStaticConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargets) DeepCopyInto(out *ProbeTargets) { + *out = *in + if in.StaticConfig != nil { + in, out := &in.StaticConfig, &out.StaticConfig + *out = new(ProbeTargetStaticConfig) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(ProbeTargetIngress) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargets. +func (in *ProbeTargets) DeepCopy() *ProbeTargets { + if in == nil { + return nil + } + out := new(ProbeTargets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProberSpec) DeepCopyInto(out *ProberSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProberSpec. +func (in *ProberSpec) DeepCopy() *ProberSpec { + if in == nil { + return nil + } + out := new(ProberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(PrometheusStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusList) DeepCopyInto(out *PrometheusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*Prometheus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Prometheus) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusList. +func (in *PrometheusList) DeepCopy() *PrometheusList { + if in == nil { + return nil + } + out := new(PrometheusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRule) DeepCopyInto(out *PrometheusRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRule. +func (in *PrometheusRule) DeepCopy() *PrometheusRule { + if in == nil { + return nil + } + out := new(PrometheusRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleExcludeConfig) DeepCopyInto(out *PrometheusRuleExcludeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleExcludeConfig. +func (in *PrometheusRuleExcludeConfig) DeepCopy() *PrometheusRuleExcludeConfig { + if in == nil { + return nil + } + out := new(PrometheusRuleExcludeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleList) DeepCopyInto(out *PrometheusRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*PrometheusRule, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PrometheusRule) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleList. +func (in *PrometheusRuleList) DeepCopy() *PrometheusRuleList { + if in == nil { + return nil + } + out := new(PrometheusRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleSpec) DeepCopyInto(out *PrometheusRuleSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]RuleGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleSpec. +func (in *PrometheusRuleSpec) DeepCopy() *PrometheusRuleSpec { + if in == nil { + return nil + } + out := new(PrometheusRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorSelector != nil { + in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ServiceMonitorNamespaceSelector != nil { + in, out := &in.ServiceMonitorNamespaceSelector, &out.ServiceMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorSelector != nil { + in, out := &in.PodMonitorSelector, &out.PodMonitorSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodMonitorNamespaceSelector != nil { + in, out := &in.PodMonitorNamespaceSelector, &out.PodMonitorNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeSelector != nil { + in, out := &in.ProbeSelector, &out.ProbeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeNamespaceSelector != nil { + in, out := &in.ProbeNamespaceSelector, &out.ProbeNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.ReplicaExternalLabelName != nil { + in, out := &in.ReplicaExternalLabelName, &out.ReplicaExternalLabelName + *out = new(string) + **out = **in + } + if in.PrometheusExternalLabelName != nil { + in, out := &in.PrometheusExternalLabelName, &out.PrometheusExternalLabelName + *out = new(string) + **out = **in + } + if in.WALCompression != nil { + in, out := &in.WALCompression, &out.WALCompression + *out = new(bool) + **out = **in + } + out.Rules = in.Rules + if in.ExternalLabels != nil { + in, out := &in.ExternalLabels, &out.ExternalLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(QuerySpec) + (*in).DeepCopyInto(*out) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(AlertingSpec) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteWrite != nil { + in, out := &in.RemoteWrite, &out.RemoteWrite + *out = make([]RemoteWriteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteRead != nil { + in, out := &in.RemoteRead, &out.RemoteRead + *out = make([]RemoteReadSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalScrapeConfigs != nil { + in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertRelabelConfigs != nil { + in, out := &in.AdditionalAlertRelabelConfigs, &out.AdditionalAlertRelabelConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AdditionalAlertManagerConfigs != nil { + in, out := &in.AdditionalAlertManagerConfigs, &out.AdditionalAlertManagerConfigs + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.APIServerConfig != nil { + in, out := &in.APIServerConfig, &out.APIServerConfig + *out = new(APIServerConfig) + (*in).DeepCopyInto(*out) + } + if in.Thanos != nil { + in, out := &in.Thanos, &out.Thanos + *out = new(ThanosSpec) + (*in).DeepCopyInto(*out) + } + out.ArbitraryFSAccessThroughSMs = in.ArbitraryFSAccessThroughSMs + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.EnforcedSampleLimit != nil { + in, out := &in.EnforcedSampleLimit, &out.EnforcedSampleLimit + *out = new(uint64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. +func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { + if in == nil { + return nil + } + out := new(PrometheusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusStatus) DeepCopyInto(out *PrometheusStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusStatus. +func (in *PrometheusStatus) DeepCopy() *PrometheusStatus { + if in == nil { + return nil + } + out := new(PrometheusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { + *out = *in + if in.LookbackDelta != nil { + in, out := &in.LookbackDelta, &out.LookbackDelta + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(int32) + **out = **in + } + if in.MaxSamples != nil { + in, out := &in.MaxSamples, &out.MaxSamples + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySpec. +func (in *QuerySpec) DeepCopy() *QuerySpec { + if in == nil { + return nil + } + out := new(QuerySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. +func (in *QueueConfig) DeepCopy() *QueueConfig { + if in == nil { + return nil + } + out := new(QueueConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { + *out = *in + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. +func (in *RelabelConfig) DeepCopy() *RelabelConfig { + if in == nil { + return nil + } + out := new(RelabelConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteReadSpec) DeepCopyInto(out *RemoteReadSpec) { + *out = *in + if in.RequiredMatchers != nil { + in, out := &in.RequiredMatchers, &out.RequiredMatchers + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteReadSpec. +func (in *RemoteReadSpec) DeepCopy() *RemoteReadSpec { + if in == nil { + return nil + } + out := new(RemoteReadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { + *out = *in + if in.WriteRelabelConfigs != nil { + in, out := &in.WriteRelabelConfigs, &out.WriteRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = new(QueueConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. +func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec { + if in == nil { + return nil + } + out := new(RemoteWriteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + out.Expr = in.Expr + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. +func (in *RuleGroup) DeepCopy() *RuleGroup { + if in == nil { + return nil + } + out := new(RuleGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rules) DeepCopyInto(out *Rules) { + *out = *in + out.Alert = in.Alert +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules. +func (in *Rules) DeepCopy() *Rules { + if in == nil { + return nil + } + out := new(Rules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesAlert) DeepCopyInto(out *RulesAlert) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesAlert. +func (in *RulesAlert) DeepCopy() *RulesAlert { + if in == nil { + return nil + } + out := new(RulesAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOrConfigMap) DeepCopyInto(out *SecretOrConfigMap) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMap. +func (in *SecretOrConfigMap) DeepCopy() *SecretOrConfigMap { + if in == nil { + return nil + } + out := new(SecretOrConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOrConfigMapValidationError) DeepCopyInto(out *SecretOrConfigMapValidationError) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMapValidationError. +func (in *SecretOrConfigMapValidationError) DeepCopy() *SecretOrConfigMapValidationError { + if in == nil { + return nil + } + out := new(SecretOrConfigMapValidationError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. +func (in *ServiceMonitor) DeepCopy() *ServiceMonitor { + if in == nil { + return nil + } + out := new(ServiceMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorList) DeepCopyInto(out *ServiceMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*ServiceMonitor, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ServiceMonitor) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorList. +func (in *ServiceMonitorList) DeepCopy() *ServiceMonitorList { + if in == nil { + return nil + } + out := new(ServiceMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { + *out = *in + if in.TargetLabels != nil { + in, out := &in.TargetLabels, &out.TargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PodTargetLabels != nil { + in, out := &in.PodTargetLabels, &out.PodTargetLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorSpec. +func (in *ServiceMonitorSpec) DeepCopy() *ServiceMonitorSpec { + if in == nil { + return nil + } + out := new(ServiceMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + in.VolumeClaimTemplate.DeepCopyInto(&out.VolumeClaimTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + in.CA.DeepCopyInto(&out.CA) + in.Cert.DeepCopyInto(&out.Cert) + if in.KeySecret != nil { + in, out := &in.KeySecret, &out.KeySecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigValidationError) DeepCopyInto(out *TLSConfigValidationError) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigValidationError. +func (in *TLSConfigValidationError) DeepCopy() *TLSConfigValidationError { + if in == nil { + return nil + } + out := new(TLSConfigValidationError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRuler) DeepCopyInto(out *ThanosRuler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ThanosRulerStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRuler. +func (in *ThanosRuler) DeepCopy() *ThanosRuler { + if in == nil { + return nil + } + out := new(ThanosRuler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerList) DeepCopyInto(out *ThanosRulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*ThanosRuler, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ThanosRuler) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerList. +func (in *ThanosRulerList) DeepCopy() *ThanosRulerList { + if in == nil { + return nil + } + out := new(ThanosRulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { + *out = *in + if in.PodMetadata != nil { + in, out := &in.PodMetadata, &out.PodMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.QueryEndpoints != nil { + in, out := &in.QueryEndpoints, &out.QueryEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.QueryConfig != nil { + in, out := &in.QueryConfig, &out.QueryConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AlertManagersURL != nil { + in, out := &in.AlertManagersURL, &out.AlertManagersURL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AlertManagersConfig != nil { + in, out := &in.AlertManagersConfig, &out.AlertManagersConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RuleNamespaceSelector != nil { + in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AlertDropLabels != nil { + in, out := &in.AlertDropLabels, &out.AlertDropLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerSpec. +func (in *ThanosRulerSpec) DeepCopy() *ThanosRulerSpec { + if in == nil { + return nil + } + out := new(ThanosRulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerStatus) DeepCopyInto(out *ThanosRulerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerStatus. +func (in *ThanosRulerStatus) DeepCopy() *ThanosRulerStatus { + if in == nil { + return nil + } + out := new(ThanosRulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosSpec) DeepCopyInto(out *ThanosSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.SHA != nil { + in, out := &in.SHA, &out.SHA + *out = new(string) + **out = **in + } + if in.BaseImage != nil { + in, out := &in.BaseImage, &out.BaseImage + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) + if in.ObjectStorageConfig != nil { + in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.GRPCServerTLSConfig != nil { + in, out := &in.GRPCServerTLSConfig, &out.GRPCServerTLSConfig + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosSpec. +func (in *ThanosSpec) DeepCopy() *ThanosSpec { + if in == nil { + return nil + } + out := new(ThanosSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/factory.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/factory.go new file mode 100644 index 0000000000000000000000000000000000000000..5a79df661282560c49bc689497640b439f12852a --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,178 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Monitoring() monitoring.Interface +} + +func (f *sharedInformerFactory) Monitoring() monitoring.Interface { + return monitoring.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/generic.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/generic.go new file mode 100644 index 0000000000000000000000000000000000000000..40c18d3416f51b408bfcf50051be032c4bf95a79 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,72 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=monitoring.coreos.com, Version=v1 + case v1.SchemeGroupVersion.WithResource("alertmanagers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().Alertmanagers().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("podmonitors"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().PodMonitors().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("probes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().Probes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("prometheuses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().Prometheuses().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("prometheusrules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().PrometheusRules().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("servicemonitors"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().ServiceMonitors().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("thanosrulers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Monitoring().V1().ThanosRulers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000000000000000000000000000000000..32393671fcea7cfa03f9e5710e2f668ab6b0e6ab --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/interface.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..646dae81dee3f2fe49fa80327a2d46ef05d97668 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/interface.go @@ -0,0 +1,44 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package monitoring + +import ( + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/alertmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..6b4f8769a9e4ea2a74f808eb8e075a7feb7612a1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/alertmanager.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// AlertmanagerInformer provides access to a shared informer and lister for +// Alertmanagers. +type AlertmanagerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.AlertmanagerLister +} + +type alertmanagerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewAlertmanagerInformer constructs a new informer for Alertmanager type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAlertmanagerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAlertmanagerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredAlertmanagerInformer constructs a new informer for Alertmanager type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAlertmanagerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Alertmanagers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Alertmanagers(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.Alertmanager{}, + resyncPeriod, + indexers, + ) +} + +func (f *alertmanagerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAlertmanagerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *alertmanagerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.Alertmanager{}, f.defaultInformer) +} + +func (f *alertmanagerInformer) Lister() v1.AlertmanagerLister { + return v1.NewAlertmanagerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/interface.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..e8213fcba15bc4cc84d296218c46e74459857805 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/interface.go @@ -0,0 +1,85 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Alertmanagers returns a AlertmanagerInformer. + Alertmanagers() AlertmanagerInformer + // PodMonitors returns a PodMonitorInformer. + PodMonitors() PodMonitorInformer + // Probes returns a ProbeInformer. + Probes() ProbeInformer + // Prometheuses returns a PrometheusInformer. + Prometheuses() PrometheusInformer + // PrometheusRules returns a PrometheusRuleInformer. + PrometheusRules() PrometheusRuleInformer + // ServiceMonitors returns a ServiceMonitorInformer. + ServiceMonitors() ServiceMonitorInformer + // ThanosRulers returns a ThanosRulerInformer. + ThanosRulers() ThanosRulerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Alertmanagers returns a AlertmanagerInformer. +func (v *version) Alertmanagers() AlertmanagerInformer { + return &alertmanagerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodMonitors returns a PodMonitorInformer. +func (v *version) PodMonitors() PodMonitorInformer { + return &podMonitorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Probes returns a ProbeInformer. +func (v *version) Probes() ProbeInformer { + return &probeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Prometheuses returns a PrometheusInformer. +func (v *version) Prometheuses() PrometheusInformer { + return &prometheusInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PrometheusRules returns a PrometheusRuleInformer. +func (v *version) PrometheusRules() PrometheusRuleInformer { + return &prometheusRuleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServiceMonitors returns a ServiceMonitorInformer. +func (v *version) ServiceMonitors() ServiceMonitorInformer { + return &serviceMonitorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ThanosRulers returns a ThanosRulerInformer. +func (v *version) ThanosRulers() ThanosRulerInformer { + return &thanosRulerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/podmonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..b73588b92e711fa4dc29ad286c7eca8e0356ac79 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/podmonitor.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodMonitorInformer provides access to a shared informer and lister for +// PodMonitors. +type PodMonitorInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodMonitorLister +} + +type podMonitorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodMonitorInformer constructs a new informer for PodMonitor type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodMonitorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodMonitorInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodMonitorInformer constructs a new informer for PodMonitor type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodMonitorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().PodMonitors(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().PodMonitors(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.PodMonitor{}, + resyncPeriod, + indexers, + ) +} + +func (f *podMonitorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodMonitorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podMonitorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.PodMonitor{}, f.defaultInformer) +} + +func (f *podMonitorInformer) Lister() v1.PodMonitorLister { + return v1.NewPodMonitorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/probe.go new file mode 100644 index 0000000000000000000000000000000000000000..fc683b711c8c5b04488dc5607380c9dde9d70385 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/probe.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ProbeInformer provides access to a shared informer and lister for +// Probes. +type ProbeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ProbeLister +} + +type probeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewProbeInformer constructs a new informer for Probe type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewProbeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredProbeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredProbeInformer constructs a new informer for Probe type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredProbeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Probes(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Probes(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.Probe{}, + resyncPeriod, + indexers, + ) +} + +func (f *probeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredProbeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *probeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.Probe{}, f.defaultInformer) +} + +func (f *probeInformer) Lister() v1.ProbeLister { + return v1.NewProbeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheus.go new file mode 100644 index 0000000000000000000000000000000000000000..fc37bcfa0fbbd963501d7b2b179236a256ff6fe2 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheus.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PrometheusInformer provides access to a shared informer and lister for +// Prometheuses. +type PrometheusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PrometheusLister +} + +type prometheusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPrometheusInformer constructs a new informer for Prometheus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPrometheusInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPrometheusInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPrometheusInformer constructs a new informer for Prometheus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPrometheusInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Prometheuses(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().Prometheuses(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.Prometheus{}, + resyncPeriod, + indexers, + ) +} + +func (f *prometheusInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPrometheusInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *prometheusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.Prometheus{}, f.defaultInformer) +} + +func (f *prometheusInformer) Lister() v1.PrometheusLister { + return v1.NewPrometheusLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheusrule.go new file mode 100644 index 0000000000000000000000000000000000000000..7fd04aa15ce52831ddfb2eebbb42b9c3748e1354 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/prometheusrule.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PrometheusRuleInformer provides access to a shared informer and lister for +// PrometheusRules. +type PrometheusRuleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PrometheusRuleLister +} + +type prometheusRuleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPrometheusRuleInformer constructs a new informer for PrometheusRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPrometheusRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPrometheusRuleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPrometheusRuleInformer constructs a new informer for PrometheusRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPrometheusRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().PrometheusRules(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().PrometheusRules(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.PrometheusRule{}, + resyncPeriod, + indexers, + ) +} + +func (f *prometheusRuleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPrometheusRuleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *prometheusRuleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.PrometheusRule{}, f.defaultInformer) +} + +func (f *prometheusRuleInformer) Lister() v1.PrometheusRuleLister { + return v1.NewPrometheusRuleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/servicemonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..04ad2447accc441ecc47a791e0315bae4fc0d0fe --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/servicemonitor.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceMonitorInformer provides access to a shared informer and lister for +// ServiceMonitors. +type ServiceMonitorInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceMonitorLister +} + +type serviceMonitorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceMonitorInformer constructs a new informer for ServiceMonitor type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceMonitorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceMonitorInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceMonitorInformer constructs a new informer for ServiceMonitor type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceMonitorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().ServiceMonitors(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().ServiceMonitors(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.ServiceMonitor{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceMonitorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceMonitorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceMonitorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.ServiceMonitor{}, f.defaultInformer) +} + +func (f *serviceMonitorInformer) Lister() v1.ServiceMonitorLister { + return v1.NewServiceMonitorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/thanosruler.go new file mode 100644 index 0000000000000000000000000000000000000000..a0f817422ffb07c8bb937fdc0e4b244134ec8a36 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1/thanosruler.go @@ -0,0 +1,88 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + internalinterfaces "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" + versioned "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ThanosRulerInformer provides access to a shared informer and lister for +// ThanosRulers. +type ThanosRulerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ThanosRulerLister +} + +type thanosRulerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewThanosRulerInformer constructs a new informer for ThanosRuler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewThanosRulerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredThanosRulerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredThanosRulerInformer constructs a new informer for ThanosRuler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredThanosRulerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().ThanosRulers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MonitoringV1().ThanosRulers(namespace).Watch(context.TODO(), options) + }, + }, + &monitoringv1.ThanosRuler{}, + resyncPeriod, + indexers, + ) +} + +func (f *thanosRulerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredThanosRulerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *thanosRulerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&monitoringv1.ThanosRuler{}, f.defaultInformer) +} + +func (f *thanosRulerInformer) Lister() v1.ThanosRulerLister { + return v1.NewThanosRulerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/alertmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..59ecf8f2f945544b4c01e5794e87fbcd8d3e8ca9 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/alertmanager.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// AlertmanagerLister helps list Alertmanagers. +type AlertmanagerLister interface { + // List lists all Alertmanagers in the indexer. + List(selector labels.Selector) (ret []*v1.Alertmanager, err error) + // Alertmanagers returns an object that can list and get Alertmanagers. + Alertmanagers(namespace string) AlertmanagerNamespaceLister + AlertmanagerListerExpansion +} + +// alertmanagerLister implements the AlertmanagerLister interface. +type alertmanagerLister struct { + indexer cache.Indexer +} + +// NewAlertmanagerLister returns a new AlertmanagerLister. +func NewAlertmanagerLister(indexer cache.Indexer) AlertmanagerLister { + return &alertmanagerLister{indexer: indexer} +} + +// List lists all Alertmanagers in the indexer. +func (s *alertmanagerLister) List(selector labels.Selector) (ret []*v1.Alertmanager, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Alertmanager)) + }) + return ret, err +} + +// Alertmanagers returns an object that can list and get Alertmanagers. +func (s *alertmanagerLister) Alertmanagers(namespace string) AlertmanagerNamespaceLister { + return alertmanagerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// AlertmanagerNamespaceLister helps list and get Alertmanagers. +type AlertmanagerNamespaceLister interface { + // List lists all Alertmanagers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Alertmanager, err error) + // Get retrieves the Alertmanager from the indexer for a given namespace and name. + Get(name string) (*v1.Alertmanager, error) + AlertmanagerNamespaceListerExpansion +} + +// alertmanagerNamespaceLister implements the AlertmanagerNamespaceLister +// interface. +type alertmanagerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Alertmanagers in the indexer for a given namespace. +func (s alertmanagerNamespaceLister) List(selector labels.Selector) (ret []*v1.Alertmanager, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Alertmanager)) + }) + return ret, err +} + +// Get retrieves the Alertmanager from the indexer for a given namespace and name. +func (s alertmanagerNamespaceLister) Get(name string) (*v1.Alertmanager, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("alertmanager"), name) + } + return obj.(*v1.Alertmanager), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/expansion_generated.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/expansion_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..198e808fc8f6870f0de11968a0868696798dbf31 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/expansion_generated.go @@ -0,0 +1,73 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// AlertmanagerListerExpansion allows custom methods to be added to +// AlertmanagerLister. +type AlertmanagerListerExpansion interface{} + +// AlertmanagerNamespaceListerExpansion allows custom methods to be added to +// AlertmanagerNamespaceLister. +type AlertmanagerNamespaceListerExpansion interface{} + +// PodMonitorListerExpansion allows custom methods to be added to +// PodMonitorLister. +type PodMonitorListerExpansion interface{} + +// PodMonitorNamespaceListerExpansion allows custom methods to be added to +// PodMonitorNamespaceLister. +type PodMonitorNamespaceListerExpansion interface{} + +// ProbeListerExpansion allows custom methods to be added to +// ProbeLister. +type ProbeListerExpansion interface{} + +// ProbeNamespaceListerExpansion allows custom methods to be added to +// ProbeNamespaceLister. +type ProbeNamespaceListerExpansion interface{} + +// PrometheusListerExpansion allows custom methods to be added to +// PrometheusLister. +type PrometheusListerExpansion interface{} + +// PrometheusNamespaceListerExpansion allows custom methods to be added to +// PrometheusNamespaceLister. +type PrometheusNamespaceListerExpansion interface{} + +// PrometheusRuleListerExpansion allows custom methods to be added to +// PrometheusRuleLister. +type PrometheusRuleListerExpansion interface{} + +// PrometheusRuleNamespaceListerExpansion allows custom methods to be added to +// PrometheusRuleNamespaceLister. +type PrometheusRuleNamespaceListerExpansion interface{} + +// ServiceMonitorListerExpansion allows custom methods to be added to +// ServiceMonitorLister. +type ServiceMonitorListerExpansion interface{} + +// ServiceMonitorNamespaceListerExpansion allows custom methods to be added to +// ServiceMonitorNamespaceLister. +type ServiceMonitorNamespaceListerExpansion interface{} + +// ThanosRulerListerExpansion allows custom methods to be added to +// ThanosRulerLister. +type ThanosRulerListerExpansion interface{} + +// ThanosRulerNamespaceListerExpansion allows custom methods to be added to +// ThanosRulerNamespaceLister. +type ThanosRulerNamespaceListerExpansion interface{} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/podmonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..20b8c80e79063aace43864bb457ff1f475887344 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/podmonitor.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodMonitorLister helps list PodMonitors. +type PodMonitorLister interface { + // List lists all PodMonitors in the indexer. + List(selector labels.Selector) (ret []*v1.PodMonitor, err error) + // PodMonitors returns an object that can list and get PodMonitors. + PodMonitors(namespace string) PodMonitorNamespaceLister + PodMonitorListerExpansion +} + +// podMonitorLister implements the PodMonitorLister interface. +type podMonitorLister struct { + indexer cache.Indexer +} + +// NewPodMonitorLister returns a new PodMonitorLister. +func NewPodMonitorLister(indexer cache.Indexer) PodMonitorLister { + return &podMonitorLister{indexer: indexer} +} + +// List lists all PodMonitors in the indexer. +func (s *podMonitorLister) List(selector labels.Selector) (ret []*v1.PodMonitor, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodMonitor)) + }) + return ret, err +} + +// PodMonitors returns an object that can list and get PodMonitors. +func (s *podMonitorLister) PodMonitors(namespace string) PodMonitorNamespaceLister { + return podMonitorNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodMonitorNamespaceLister helps list and get PodMonitors. +type PodMonitorNamespaceLister interface { + // List lists all PodMonitors in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodMonitor, err error) + // Get retrieves the PodMonitor from the indexer for a given namespace and name. + Get(name string) (*v1.PodMonitor, error) + PodMonitorNamespaceListerExpansion +} + +// podMonitorNamespaceLister implements the PodMonitorNamespaceLister +// interface. +type podMonitorNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodMonitors in the indexer for a given namespace. +func (s podMonitorNamespaceLister) List(selector labels.Selector) (ret []*v1.PodMonitor, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodMonitor)) + }) + return ret, err +} + +// Get retrieves the PodMonitor from the indexer for a given namespace and name. +func (s podMonitorNamespaceLister) Get(name string) (*v1.PodMonitor, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podmonitor"), name) + } + return obj.(*v1.PodMonitor), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/probe.go new file mode 100644 index 0000000000000000000000000000000000000000..a9f378f5c837687154372ad71ef4db5be66604ad --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/probe.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ProbeLister helps list Probes. +type ProbeLister interface { + // List lists all Probes in the indexer. + List(selector labels.Selector) (ret []*v1.Probe, err error) + // Probes returns an object that can list and get Probes. + Probes(namespace string) ProbeNamespaceLister + ProbeListerExpansion +} + +// probeLister implements the ProbeLister interface. +type probeLister struct { + indexer cache.Indexer +} + +// NewProbeLister returns a new ProbeLister. +func NewProbeLister(indexer cache.Indexer) ProbeLister { + return &probeLister{indexer: indexer} +} + +// List lists all Probes in the indexer. +func (s *probeLister) List(selector labels.Selector) (ret []*v1.Probe, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Probe)) + }) + return ret, err +} + +// Probes returns an object that can list and get Probes. +func (s *probeLister) Probes(namespace string) ProbeNamespaceLister { + return probeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ProbeNamespaceLister helps list and get Probes. +type ProbeNamespaceLister interface { + // List lists all Probes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Probe, err error) + // Get retrieves the Probe from the indexer for a given namespace and name. + Get(name string) (*v1.Probe, error) + ProbeNamespaceListerExpansion +} + +// probeNamespaceLister implements the ProbeNamespaceLister +// interface. +type probeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Probes in the indexer for a given namespace. +func (s probeNamespaceLister) List(selector labels.Selector) (ret []*v1.Probe, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Probe)) + }) + return ret, err +} + +// Get retrieves the Probe from the indexer for a given namespace and name. +func (s probeNamespaceLister) Get(name string) (*v1.Probe, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("probe"), name) + } + return obj.(*v1.Probe), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheus.go new file mode 100644 index 0000000000000000000000000000000000000000..15e9ffad169aac2cbb33df75ef08437ff7045e90 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheus.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PrometheusLister helps list Prometheuses. +type PrometheusLister interface { + // List lists all Prometheuses in the indexer. + List(selector labels.Selector) (ret []*v1.Prometheus, err error) + // Prometheuses returns an object that can list and get Prometheuses. + Prometheuses(namespace string) PrometheusNamespaceLister + PrometheusListerExpansion +} + +// prometheusLister implements the PrometheusLister interface. +type prometheusLister struct { + indexer cache.Indexer +} + +// NewPrometheusLister returns a new PrometheusLister. +func NewPrometheusLister(indexer cache.Indexer) PrometheusLister { + return &prometheusLister{indexer: indexer} +} + +// List lists all Prometheuses in the indexer. +func (s *prometheusLister) List(selector labels.Selector) (ret []*v1.Prometheus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Prometheus)) + }) + return ret, err +} + +// Prometheuses returns an object that can list and get Prometheuses. +func (s *prometheusLister) Prometheuses(namespace string) PrometheusNamespaceLister { + return prometheusNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PrometheusNamespaceLister helps list and get Prometheuses. +type PrometheusNamespaceLister interface { + // List lists all Prometheuses in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Prometheus, err error) + // Get retrieves the Prometheus from the indexer for a given namespace and name. + Get(name string) (*v1.Prometheus, error) + PrometheusNamespaceListerExpansion +} + +// prometheusNamespaceLister implements the PrometheusNamespaceLister +// interface. +type prometheusNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Prometheuses in the indexer for a given namespace. +func (s prometheusNamespaceLister) List(selector labels.Selector) (ret []*v1.Prometheus, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Prometheus)) + }) + return ret, err +} + +// Get retrieves the Prometheus from the indexer for a given namespace and name. +func (s prometheusNamespaceLister) Get(name string) (*v1.Prometheus, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("prometheus"), name) + } + return obj.(*v1.Prometheus), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheusrule.go new file mode 100644 index 0000000000000000000000000000000000000000..1923fd0b973f95d7fda51c68d65c75f9b66846ef --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/prometheusrule.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PrometheusRuleLister helps list PrometheusRules. +type PrometheusRuleLister interface { + // List lists all PrometheusRules in the indexer. + List(selector labels.Selector) (ret []*v1.PrometheusRule, err error) + // PrometheusRules returns an object that can list and get PrometheusRules. + PrometheusRules(namespace string) PrometheusRuleNamespaceLister + PrometheusRuleListerExpansion +} + +// prometheusRuleLister implements the PrometheusRuleLister interface. +type prometheusRuleLister struct { + indexer cache.Indexer +} + +// NewPrometheusRuleLister returns a new PrometheusRuleLister. +func NewPrometheusRuleLister(indexer cache.Indexer) PrometheusRuleLister { + return &prometheusRuleLister{indexer: indexer} +} + +// List lists all PrometheusRules in the indexer. +func (s *prometheusRuleLister) List(selector labels.Selector) (ret []*v1.PrometheusRule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PrometheusRule)) + }) + return ret, err +} + +// PrometheusRules returns an object that can list and get PrometheusRules. +func (s *prometheusRuleLister) PrometheusRules(namespace string) PrometheusRuleNamespaceLister { + return prometheusRuleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PrometheusRuleNamespaceLister helps list and get PrometheusRules. +type PrometheusRuleNamespaceLister interface { + // List lists all PrometheusRules in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PrometheusRule, err error) + // Get retrieves the PrometheusRule from the indexer for a given namespace and name. + Get(name string) (*v1.PrometheusRule, error) + PrometheusRuleNamespaceListerExpansion +} + +// prometheusRuleNamespaceLister implements the PrometheusRuleNamespaceLister +// interface. +type prometheusRuleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PrometheusRules in the indexer for a given namespace. +func (s prometheusRuleNamespaceLister) List(selector labels.Selector) (ret []*v1.PrometheusRule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PrometheusRule)) + }) + return ret, err +} + +// Get retrieves the PrometheusRule from the indexer for a given namespace and name. +func (s prometheusRuleNamespaceLister) Get(name string) (*v1.PrometheusRule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("prometheusrule"), name) + } + return obj.(*v1.PrometheusRule), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/servicemonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..e5bb72253ea6022c584b1caa41b3f101755280be --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/servicemonitor.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceMonitorLister helps list ServiceMonitors. +type ServiceMonitorLister interface { + // List lists all ServiceMonitors in the indexer. + List(selector labels.Selector) (ret []*v1.ServiceMonitor, err error) + // ServiceMonitors returns an object that can list and get ServiceMonitors. + ServiceMonitors(namespace string) ServiceMonitorNamespaceLister + ServiceMonitorListerExpansion +} + +// serviceMonitorLister implements the ServiceMonitorLister interface. +type serviceMonitorLister struct { + indexer cache.Indexer +} + +// NewServiceMonitorLister returns a new ServiceMonitorLister. +func NewServiceMonitorLister(indexer cache.Indexer) ServiceMonitorLister { + return &serviceMonitorLister{indexer: indexer} +} + +// List lists all ServiceMonitors in the indexer. +func (s *serviceMonitorLister) List(selector labels.Selector) (ret []*v1.ServiceMonitor, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceMonitor)) + }) + return ret, err +} + +// ServiceMonitors returns an object that can list and get ServiceMonitors. +func (s *serviceMonitorLister) ServiceMonitors(namespace string) ServiceMonitorNamespaceLister { + return serviceMonitorNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceMonitorNamespaceLister helps list and get ServiceMonitors. +type ServiceMonitorNamespaceLister interface { + // List lists all ServiceMonitors in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ServiceMonitor, err error) + // Get retrieves the ServiceMonitor from the indexer for a given namespace and name. + Get(name string) (*v1.ServiceMonitor, error) + ServiceMonitorNamespaceListerExpansion +} + +// serviceMonitorNamespaceLister implements the ServiceMonitorNamespaceLister +// interface. +type serviceMonitorNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceMonitors in the indexer for a given namespace. +func (s serviceMonitorNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceMonitor, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceMonitor)) + }) + return ret, err +} + +// Get retrieves the ServiceMonitor from the indexer for a given namespace and name. +func (s serviceMonitorNamespaceLister) Get(name string) (*v1.ServiceMonitor, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("servicemonitor"), name) + } + return obj.(*v1.ServiceMonitor), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/thanosruler.go new file mode 100644 index 0000000000000000000000000000000000000000..dc8ddb185f791c3081a36be24341dcc39555e9d1 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1/thanosruler.go @@ -0,0 +1,92 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ThanosRulerLister helps list ThanosRulers. +type ThanosRulerLister interface { + // List lists all ThanosRulers in the indexer. + List(selector labels.Selector) (ret []*v1.ThanosRuler, err error) + // ThanosRulers returns an object that can list and get ThanosRulers. + ThanosRulers(namespace string) ThanosRulerNamespaceLister + ThanosRulerListerExpansion +} + +// thanosRulerLister implements the ThanosRulerLister interface. +type thanosRulerLister struct { + indexer cache.Indexer +} + +// NewThanosRulerLister returns a new ThanosRulerLister. +func NewThanosRulerLister(indexer cache.Indexer) ThanosRulerLister { + return &thanosRulerLister{indexer: indexer} +} + +// List lists all ThanosRulers in the indexer. +func (s *thanosRulerLister) List(selector labels.Selector) (ret []*v1.ThanosRuler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ThanosRuler)) + }) + return ret, err +} + +// ThanosRulers returns an object that can list and get ThanosRulers. +func (s *thanosRulerLister) ThanosRulers(namespace string) ThanosRulerNamespaceLister { + return thanosRulerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ThanosRulerNamespaceLister helps list and get ThanosRulers. +type ThanosRulerNamespaceLister interface { + // List lists all ThanosRulers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ThanosRuler, err error) + // Get retrieves the ThanosRuler from the indexer for a given namespace and name. + Get(name string) (*v1.ThanosRuler, error) + ThanosRulerNamespaceListerExpansion +} + +// thanosRulerNamespaceLister implements the ThanosRulerNamespaceLister +// interface. +type thanosRulerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ThanosRulers in the indexer for a given namespace. +func (s thanosRulerNamespaceLister) List(selector labels.Selector) (ret []*v1.ThanosRuler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ThanosRuler)) + }) + return ret, err +} + +// Get retrieves the ThanosRuler from the indexer for a given namespace and name. +func (s thanosRulerNamespaceLister) Get(name string) (*v1.ThanosRuler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("thanosruler"), name) + } + return obj.(*v1.ThanosRuler), nil +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/clientset.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/clientset.go new file mode 100644 index 0000000000000000000000000000000000000000..d21fcfc0b5c93c02668dda00971377cb77e695cc --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/clientset.go @@ -0,0 +1,95 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MonitoringV1() monitoringv1.MonitoringV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + monitoringV1 *monitoringv1.MonitoringV1Client +} + +// MonitoringV1 retrieves the MonitoringV1Client +func (c *Clientset) MonitoringV1() monitoringv1.MonitoringV1Interface { + return c.monitoringV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.monitoringV1, err = monitoringv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.monitoringV1 = monitoringv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.monitoringV1 = monitoringv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..864d1ee9571509e5e99bec6c44e3671f0206cb53 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/clientset_generated.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..d13b7521b00f48e9bb20313dd9c8821c5c591ecf --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/clientset_generated.go @@ -0,0 +1,80 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" + fakemonitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// MonitoringV1 retrieves the MonitoringV1Client +func (c *Clientset) MonitoringV1() monitoringv1.MonitoringV1Interface { + return &fakemonitoringv1.FakeMonitoringV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..196645603102ab511c12c009ae1399e47427d9b3 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/register.go new file mode 100644 index 0000000000000000000000000000000000000000..c6d3f3127f6554f77200043da86181f8f7409d25 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake/register.go @@ -0,0 +1,54 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + monitoringv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ff8997207d5cd374beb1c54620763a3877c81fbb --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go new file mode 100644 index 0000000000000000000000000000000000000000..4c7ca3ef7a1ac2cca800de832fa481bd49aeb10e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme/register.go @@ -0,0 +1,54 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + monitoringv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..8fcdfe1b5a23767201e1c7cc02ce9994e2850238 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go @@ -0,0 +1,193 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// AlertmanagersGetter has a method to return a AlertmanagerInterface. +// A group's client should implement this interface. +type AlertmanagersGetter interface { + Alertmanagers(namespace string) AlertmanagerInterface +} + +// AlertmanagerInterface has methods to work with Alertmanager resources. +type AlertmanagerInterface interface { + Create(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.CreateOptions) (*v1.Alertmanager, error) + Update(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.UpdateOptions) (*v1.Alertmanager, error) + UpdateStatus(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.UpdateOptions) (*v1.Alertmanager, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Alertmanager, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.AlertmanagerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Alertmanager, err error) + AlertmanagerExpansion +} + +// alertmanagers implements AlertmanagerInterface +type alertmanagers struct { + client rest.Interface + ns string +} + +// newAlertmanagers returns a Alertmanagers +func newAlertmanagers(c *MonitoringV1Client, namespace string) *alertmanagers { + return &alertmanagers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the alertmanager, and returns the corresponding alertmanager object, and an error if there is any. +func (c *alertmanagers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Alertmanager, err error) { + result = &v1.Alertmanager{} + err = c.client.Get(). + Namespace(c.ns). + Resource("alertmanagers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Alertmanagers that match those selectors. +func (c *alertmanagers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AlertmanagerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.AlertmanagerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("alertmanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested alertmanagers. +func (c *alertmanagers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("alertmanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a alertmanager and creates it. Returns the server's representation of the alertmanager, and an error, if there is any. +func (c *alertmanagers) Create(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.CreateOptions) (result *v1.Alertmanager, err error) { + result = &v1.Alertmanager{} + err = c.client.Post(). + Namespace(c.ns). + Resource("alertmanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(alertmanager). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a alertmanager and updates it. Returns the server's representation of the alertmanager, and an error, if there is any. +func (c *alertmanagers) Update(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.UpdateOptions) (result *v1.Alertmanager, err error) { + result = &v1.Alertmanager{} + err = c.client.Put(). + Namespace(c.ns). + Resource("alertmanagers"). + Name(alertmanager.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(alertmanager). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *alertmanagers) UpdateStatus(ctx context.Context, alertmanager *v1.Alertmanager, opts metav1.UpdateOptions) (result *v1.Alertmanager, err error) { + result = &v1.Alertmanager{} + err = c.client.Put(). + Namespace(c.ns). + Resource("alertmanagers"). + Name(alertmanager.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(alertmanager). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the alertmanager and deletes it. Returns an error if one occurs. +func (c *alertmanagers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("alertmanagers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *alertmanagers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("alertmanagers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched alertmanager. +func (c *alertmanagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Alertmanager, err error) { + result = &v1.Alertmanager{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("alertmanagers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..7938dcdd2ce968b2aefe19e79a90fa25d4e98566 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/doc.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..04062e3f3ace4ae00fa49379d8abea8edbcb87ad --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_alertmanager.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_alertmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..6842b4b708d16014a59f1363c95e6faa69657b1d --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_alertmanager.go @@ -0,0 +1,140 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAlertmanagers implements AlertmanagerInterface +type FakeAlertmanagers struct { + Fake *FakeMonitoringV1 + ns string +} + +var alertmanagersResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "alertmanagers"} + +var alertmanagersKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "Alertmanager"} + +// Get takes name of the alertmanager, and returns the corresponding alertmanager object, and an error if there is any. +func (c *FakeAlertmanagers) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.Alertmanager, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(alertmanagersResource, c.ns, name), &monitoringv1.Alertmanager{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Alertmanager), err +} + +// List takes label and field selectors, and returns the list of Alertmanagers that match those selectors. +func (c *FakeAlertmanagers) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.AlertmanagerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(alertmanagersResource, alertmanagersKind, c.ns, opts), &monitoringv1.AlertmanagerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.AlertmanagerList{ListMeta: obj.(*monitoringv1.AlertmanagerList).ListMeta} + for _, item := range obj.(*monitoringv1.AlertmanagerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested alertmanagers. +func (c *FakeAlertmanagers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(alertmanagersResource, c.ns, opts)) + +} + +// Create takes the representation of a alertmanager and creates it. Returns the server's representation of the alertmanager, and an error, if there is any. +func (c *FakeAlertmanagers) Create(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts v1.CreateOptions) (result *monitoringv1.Alertmanager, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(alertmanagersResource, c.ns, alertmanager), &monitoringv1.Alertmanager{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Alertmanager), err +} + +// Update takes the representation of a alertmanager and updates it. Returns the server's representation of the alertmanager, and an error, if there is any. +func (c *FakeAlertmanagers) Update(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts v1.UpdateOptions) (result *monitoringv1.Alertmanager, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(alertmanagersResource, c.ns, alertmanager), &monitoringv1.Alertmanager{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Alertmanager), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAlertmanagers) UpdateStatus(ctx context.Context, alertmanager *monitoringv1.Alertmanager, opts v1.UpdateOptions) (*monitoringv1.Alertmanager, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(alertmanagersResource, "status", c.ns, alertmanager), &monitoringv1.Alertmanager{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Alertmanager), err +} + +// Delete takes name of the alertmanager and deletes it. Returns an error if one occurs. +func (c *FakeAlertmanagers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(alertmanagersResource, c.ns, name), &monitoringv1.Alertmanager{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAlertmanagers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(alertmanagersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.AlertmanagerList{}) + return err +} + +// Patch applies the patch and returns the patched alertmanager. +func (c *FakeAlertmanagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.Alertmanager, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(alertmanagersResource, c.ns, name, pt, data, subresources...), &monitoringv1.Alertmanager{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Alertmanager), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_monitoring_client.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_monitoring_client.go new file mode 100644 index 0000000000000000000000000000000000000000..e4b0abb9f7c81307cf59aa147eecf018e01f9a7e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_monitoring_client.go @@ -0,0 +1,62 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeMonitoringV1 struct { + *testing.Fake +} + +func (c *FakeMonitoringV1) Alertmanagers(namespace string) v1.AlertmanagerInterface { + return &FakeAlertmanagers{c, namespace} +} + +func (c *FakeMonitoringV1) PodMonitors(namespace string) v1.PodMonitorInterface { + return &FakePodMonitors{c, namespace} +} + +func (c *FakeMonitoringV1) Probes(namespace string) v1.ProbeInterface { + return &FakeProbes{c, namespace} +} + +func (c *FakeMonitoringV1) Prometheuses(namespace string) v1.PrometheusInterface { + return &FakePrometheuses{c, namespace} +} + +func (c *FakeMonitoringV1) PrometheusRules(namespace string) v1.PrometheusRuleInterface { + return &FakePrometheusRules{c, namespace} +} + +func (c *FakeMonitoringV1) ServiceMonitors(namespace string) v1.ServiceMonitorInterface { + return &FakeServiceMonitors{c, namespace} +} + +func (c *FakeMonitoringV1) ThanosRulers(namespace string) v1.ThanosRulerInterface { + return &FakeThanosRulers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMonitoringV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_podmonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..f77f10a9c03f19ce36ce608b953a1e512e6dec1e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_podmonitor.go @@ -0,0 +1,128 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodMonitors implements PodMonitorInterface +type FakePodMonitors struct { + Fake *FakeMonitoringV1 + ns string +} + +var podmonitorsResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "podmonitors"} + +var podmonitorsKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "PodMonitor"} + +// Get takes name of the podMonitor, and returns the corresponding podMonitor object, and an error if there is any. +func (c *FakePodMonitors) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.PodMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podmonitorsResource, c.ns, name), &monitoringv1.PodMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PodMonitor), err +} + +// List takes label and field selectors, and returns the list of PodMonitors that match those selectors. +func (c *FakePodMonitors) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.PodMonitorList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podmonitorsResource, podmonitorsKind, c.ns, opts), &monitoringv1.PodMonitorList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.PodMonitorList{ListMeta: obj.(*monitoringv1.PodMonitorList).ListMeta} + for _, item := range obj.(*monitoringv1.PodMonitorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podMonitors. +func (c *FakePodMonitors) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podmonitorsResource, c.ns, opts)) + +} + +// Create takes the representation of a podMonitor and creates it. Returns the server's representation of the podMonitor, and an error, if there is any. +func (c *FakePodMonitors) Create(ctx context.Context, podMonitor *monitoringv1.PodMonitor, opts v1.CreateOptions) (result *monitoringv1.PodMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podmonitorsResource, c.ns, podMonitor), &monitoringv1.PodMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PodMonitor), err +} + +// Update takes the representation of a podMonitor and updates it. Returns the server's representation of the podMonitor, and an error, if there is any. +func (c *FakePodMonitors) Update(ctx context.Context, podMonitor *monitoringv1.PodMonitor, opts v1.UpdateOptions) (result *monitoringv1.PodMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podmonitorsResource, c.ns, podMonitor), &monitoringv1.PodMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PodMonitor), err +} + +// Delete takes name of the podMonitor and deletes it. Returns an error if one occurs. +func (c *FakePodMonitors) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podmonitorsResource, c.ns, name), &monitoringv1.PodMonitor{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodMonitors) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podmonitorsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.PodMonitorList{}) + return err +} + +// Patch applies the patch and returns the patched podMonitor. +func (c *FakePodMonitors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.PodMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podmonitorsResource, c.ns, name, pt, data, subresources...), &monitoringv1.PodMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PodMonitor), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_probe.go new file mode 100644 index 0000000000000000000000000000000000000000..15d7e0acf03d5de87f60393e2673730e5e851f99 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_probe.go @@ -0,0 +1,128 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeProbes implements ProbeInterface +type FakeProbes struct { + Fake *FakeMonitoringV1 + ns string +} + +var probesResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "probes"} + +var probesKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "Probe"} + +// Get takes name of the probe, and returns the corresponding probe object, and an error if there is any. +func (c *FakeProbes) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.Probe, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(probesResource, c.ns, name), &monitoringv1.Probe{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Probe), err +} + +// List takes label and field selectors, and returns the list of Probes that match those selectors. +func (c *FakeProbes) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.ProbeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(probesResource, probesKind, c.ns, opts), &monitoringv1.ProbeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.ProbeList{ListMeta: obj.(*monitoringv1.ProbeList).ListMeta} + for _, item := range obj.(*monitoringv1.ProbeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested probes. +func (c *FakeProbes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(probesResource, c.ns, opts)) + +} + +// Create takes the representation of a probe and creates it. Returns the server's representation of the probe, and an error, if there is any. +func (c *FakeProbes) Create(ctx context.Context, probe *monitoringv1.Probe, opts v1.CreateOptions) (result *monitoringv1.Probe, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(probesResource, c.ns, probe), &monitoringv1.Probe{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Probe), err +} + +// Update takes the representation of a probe and updates it. Returns the server's representation of the probe, and an error, if there is any. +func (c *FakeProbes) Update(ctx context.Context, probe *monitoringv1.Probe, opts v1.UpdateOptions) (result *monitoringv1.Probe, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(probesResource, c.ns, probe), &monitoringv1.Probe{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Probe), err +} + +// Delete takes name of the probe and deletes it. Returns an error if one occurs. +func (c *FakeProbes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(probesResource, c.ns, name), &monitoringv1.Probe{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeProbes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(probesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.ProbeList{}) + return err +} + +// Patch applies the patch and returns the patched probe. +func (c *FakeProbes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.Probe, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(probesResource, c.ns, name, pt, data, subresources...), &monitoringv1.Probe{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Probe), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheus.go new file mode 100644 index 0000000000000000000000000000000000000000..fee0e1038409d9faf11afee1a85c675aa3e87e8e --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheus.go @@ -0,0 +1,140 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePrometheuses implements PrometheusInterface +type FakePrometheuses struct { + Fake *FakeMonitoringV1 + ns string +} + +var prometheusesResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheuses"} + +var prometheusesKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "Prometheus"} + +// Get takes name of the prometheus, and returns the corresponding prometheus object, and an error if there is any. +func (c *FakePrometheuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.Prometheus, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(prometheusesResource, c.ns, name), &monitoringv1.Prometheus{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Prometheus), err +} + +// List takes label and field selectors, and returns the list of Prometheuses that match those selectors. +func (c *FakePrometheuses) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.PrometheusList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(prometheusesResource, prometheusesKind, c.ns, opts), &monitoringv1.PrometheusList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.PrometheusList{ListMeta: obj.(*monitoringv1.PrometheusList).ListMeta} + for _, item := range obj.(*monitoringv1.PrometheusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested prometheuses. +func (c *FakePrometheuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(prometheusesResource, c.ns, opts)) + +} + +// Create takes the representation of a prometheus and creates it. Returns the server's representation of the prometheus, and an error, if there is any. +func (c *FakePrometheuses) Create(ctx context.Context, prometheus *monitoringv1.Prometheus, opts v1.CreateOptions) (result *monitoringv1.Prometheus, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(prometheusesResource, c.ns, prometheus), &monitoringv1.Prometheus{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Prometheus), err +} + +// Update takes the representation of a prometheus and updates it. Returns the server's representation of the prometheus, and an error, if there is any. +func (c *FakePrometheuses) Update(ctx context.Context, prometheus *monitoringv1.Prometheus, opts v1.UpdateOptions) (result *monitoringv1.Prometheus, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(prometheusesResource, c.ns, prometheus), &monitoringv1.Prometheus{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Prometheus), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePrometheuses) UpdateStatus(ctx context.Context, prometheus *monitoringv1.Prometheus, opts v1.UpdateOptions) (*monitoringv1.Prometheus, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(prometheusesResource, "status", c.ns, prometheus), &monitoringv1.Prometheus{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Prometheus), err +} + +// Delete takes name of the prometheus and deletes it. Returns an error if one occurs. +func (c *FakePrometheuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(prometheusesResource, c.ns, name), &monitoringv1.Prometheus{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePrometheuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(prometheusesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.PrometheusList{}) + return err +} + +// Patch applies the patch and returns the patched prometheus. +func (c *FakePrometheuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.Prometheus, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(prometheusesResource, c.ns, name, pt, data, subresources...), &monitoringv1.Prometheus{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.Prometheus), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheusrule.go new file mode 100644 index 0000000000000000000000000000000000000000..77f17d213e007d01ef741a78cde8637c043eb904 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_prometheusrule.go @@ -0,0 +1,128 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePrometheusRules implements PrometheusRuleInterface +type FakePrometheusRules struct { + Fake *FakeMonitoringV1 + ns string +} + +var prometheusrulesResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"} + +var prometheusrulesKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "PrometheusRule"} + +// Get takes name of the prometheusRule, and returns the corresponding prometheusRule object, and an error if there is any. +func (c *FakePrometheusRules) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.PrometheusRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(prometheusrulesResource, c.ns, name), &monitoringv1.PrometheusRule{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PrometheusRule), err +} + +// List takes label and field selectors, and returns the list of PrometheusRules that match those selectors. +func (c *FakePrometheusRules) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.PrometheusRuleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(prometheusrulesResource, prometheusrulesKind, c.ns, opts), &monitoringv1.PrometheusRuleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.PrometheusRuleList{ListMeta: obj.(*monitoringv1.PrometheusRuleList).ListMeta} + for _, item := range obj.(*monitoringv1.PrometheusRuleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested prometheusRules. +func (c *FakePrometheusRules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(prometheusrulesResource, c.ns, opts)) + +} + +// Create takes the representation of a prometheusRule and creates it. Returns the server's representation of the prometheusRule, and an error, if there is any. +func (c *FakePrometheusRules) Create(ctx context.Context, prometheusRule *monitoringv1.PrometheusRule, opts v1.CreateOptions) (result *monitoringv1.PrometheusRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(prometheusrulesResource, c.ns, prometheusRule), &monitoringv1.PrometheusRule{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PrometheusRule), err +} + +// Update takes the representation of a prometheusRule and updates it. Returns the server's representation of the prometheusRule, and an error, if there is any. +func (c *FakePrometheusRules) Update(ctx context.Context, prometheusRule *monitoringv1.PrometheusRule, opts v1.UpdateOptions) (result *monitoringv1.PrometheusRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(prometheusrulesResource, c.ns, prometheusRule), &monitoringv1.PrometheusRule{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PrometheusRule), err +} + +// Delete takes name of the prometheusRule and deletes it. Returns an error if one occurs. +func (c *FakePrometheusRules) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(prometheusrulesResource, c.ns, name), &monitoringv1.PrometheusRule{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePrometheusRules) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(prometheusrulesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.PrometheusRuleList{}) + return err +} + +// Patch applies the patch and returns the patched prometheusRule. +func (c *FakePrometheusRules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.PrometheusRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(prometheusrulesResource, c.ns, name, pt, data, subresources...), &monitoringv1.PrometheusRule{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.PrometheusRule), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_servicemonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..d1cc7e6eac04d64219be1e4eb223ba6ceb2715b2 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_servicemonitor.go @@ -0,0 +1,128 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServiceMonitors implements ServiceMonitorInterface +type FakeServiceMonitors struct { + Fake *FakeMonitoringV1 + ns string +} + +var servicemonitorsResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} + +var servicemonitorsKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"} + +// Get takes name of the serviceMonitor, and returns the corresponding serviceMonitor object, and an error if there is any. +func (c *FakeServiceMonitors) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.ServiceMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicemonitorsResource, c.ns, name), &monitoringv1.ServiceMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ServiceMonitor), err +} + +// List takes label and field selectors, and returns the list of ServiceMonitors that match those selectors. +func (c *FakeServiceMonitors) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.ServiceMonitorList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicemonitorsResource, servicemonitorsKind, c.ns, opts), &monitoringv1.ServiceMonitorList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.ServiceMonitorList{ListMeta: obj.(*monitoringv1.ServiceMonitorList).ListMeta} + for _, item := range obj.(*monitoringv1.ServiceMonitorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceMonitors. +func (c *FakeServiceMonitors) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicemonitorsResource, c.ns, opts)) + +} + +// Create takes the representation of a serviceMonitor and creates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. +func (c *FakeServiceMonitors) Create(ctx context.Context, serviceMonitor *monitoringv1.ServiceMonitor, opts v1.CreateOptions) (result *monitoringv1.ServiceMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicemonitorsResource, c.ns, serviceMonitor), &monitoringv1.ServiceMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ServiceMonitor), err +} + +// Update takes the representation of a serviceMonitor and updates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. +func (c *FakeServiceMonitors) Update(ctx context.Context, serviceMonitor *monitoringv1.ServiceMonitor, opts v1.UpdateOptions) (result *monitoringv1.ServiceMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicemonitorsResource, c.ns, serviceMonitor), &monitoringv1.ServiceMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ServiceMonitor), err +} + +// Delete takes name of the serviceMonitor and deletes it. Returns an error if one occurs. +func (c *FakeServiceMonitors) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicemonitorsResource, c.ns, name), &monitoringv1.ServiceMonitor{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceMonitors) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(servicemonitorsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.ServiceMonitorList{}) + return err +} + +// Patch applies the patch and returns the patched serviceMonitor. +func (c *FakeServiceMonitors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.ServiceMonitor, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicemonitorsResource, c.ns, name, pt, data, subresources...), &monitoringv1.ServiceMonitor{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ServiceMonitor), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_thanosruler.go new file mode 100644 index 0000000000000000000000000000000000000000..8af46031c1ef45c949aaa6ee077ce5027d141229 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake/fake_thanosruler.go @@ -0,0 +1,140 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeThanosRulers implements ThanosRulerInterface +type FakeThanosRulers struct { + Fake *FakeMonitoringV1 + ns string +} + +var thanosrulersResource = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "thanosrulers"} + +var thanosrulersKind = schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ThanosRuler"} + +// Get takes name of the thanosRuler, and returns the corresponding thanosRuler object, and an error if there is any. +func (c *FakeThanosRulers) Get(ctx context.Context, name string, options v1.GetOptions) (result *monitoringv1.ThanosRuler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(thanosrulersResource, c.ns, name), &monitoringv1.ThanosRuler{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ThanosRuler), err +} + +// List takes label and field selectors, and returns the list of ThanosRulers that match those selectors. +func (c *FakeThanosRulers) List(ctx context.Context, opts v1.ListOptions) (result *monitoringv1.ThanosRulerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(thanosrulersResource, thanosrulersKind, c.ns, opts), &monitoringv1.ThanosRulerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &monitoringv1.ThanosRulerList{ListMeta: obj.(*monitoringv1.ThanosRulerList).ListMeta} + for _, item := range obj.(*monitoringv1.ThanosRulerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested thanosRulers. +func (c *FakeThanosRulers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(thanosrulersResource, c.ns, opts)) + +} + +// Create takes the representation of a thanosRuler and creates it. Returns the server's representation of the thanosRuler, and an error, if there is any. +func (c *FakeThanosRulers) Create(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts v1.CreateOptions) (result *monitoringv1.ThanosRuler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(thanosrulersResource, c.ns, thanosRuler), &monitoringv1.ThanosRuler{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ThanosRuler), err +} + +// Update takes the representation of a thanosRuler and updates it. Returns the server's representation of the thanosRuler, and an error, if there is any. +func (c *FakeThanosRulers) Update(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts v1.UpdateOptions) (result *monitoringv1.ThanosRuler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(thanosrulersResource, c.ns, thanosRuler), &monitoringv1.ThanosRuler{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ThanosRuler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeThanosRulers) UpdateStatus(ctx context.Context, thanosRuler *monitoringv1.ThanosRuler, opts v1.UpdateOptions) (*monitoringv1.ThanosRuler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(thanosrulersResource, "status", c.ns, thanosRuler), &monitoringv1.ThanosRuler{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ThanosRuler), err +} + +// Delete takes name of the thanosRuler and deletes it. Returns an error if one occurs. +func (c *FakeThanosRulers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(thanosrulersResource, c.ns, name), &monitoringv1.ThanosRuler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeThanosRulers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(thanosrulersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &monitoringv1.ThanosRulerList{}) + return err +} + +// Patch applies the patch and returns the patched thanosRuler. +func (c *FakeThanosRulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monitoringv1.ThanosRuler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(thanosrulersResource, c.ns, name, pt, data, subresources...), &monitoringv1.ThanosRuler{}) + + if obj == nil { + return nil, err + } + return obj.(*monitoringv1.ThanosRuler), err +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go new file mode 100644 index 0000000000000000000000000000000000000000..c31271dab268bb65f63b9275884dbc5dab584e0b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go @@ -0,0 +1,31 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type AlertmanagerExpansion interface{} + +type PodMonitorExpansion interface{} + +type ProbeExpansion interface{} + +type PrometheusExpansion interface{} + +type PrometheusRuleExpansion interface{} + +type ServiceMonitorExpansion interface{} + +type ThanosRulerExpansion interface{} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go new file mode 100644 index 0000000000000000000000000000000000000000..f7a8e9edf426b223a4e053e574d2e8152fd4ae98 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go @@ -0,0 +1,117 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type MonitoringV1Interface interface { + RESTClient() rest.Interface + AlertmanagersGetter + PodMonitorsGetter + ProbesGetter + PrometheusesGetter + PrometheusRulesGetter + ServiceMonitorsGetter + ThanosRulersGetter +} + +// MonitoringV1Client is used to interact with features provided by the monitoring.coreos.com group. +type MonitoringV1Client struct { + restClient rest.Interface +} + +func (c *MonitoringV1Client) Alertmanagers(namespace string) AlertmanagerInterface { + return newAlertmanagers(c, namespace) +} + +func (c *MonitoringV1Client) PodMonitors(namespace string) PodMonitorInterface { + return newPodMonitors(c, namespace) +} + +func (c *MonitoringV1Client) Probes(namespace string) ProbeInterface { + return newProbes(c, namespace) +} + +func (c *MonitoringV1Client) Prometheuses(namespace string) PrometheusInterface { + return newPrometheuses(c, namespace) +} + +func (c *MonitoringV1Client) PrometheusRules(namespace string) PrometheusRuleInterface { + return newPrometheusRules(c, namespace) +} + +func (c *MonitoringV1Client) ServiceMonitors(namespace string) ServiceMonitorInterface { + return newServiceMonitors(c, namespace) +} + +func (c *MonitoringV1Client) ThanosRulers(namespace string) ThanosRulerInterface { + return newThanosRulers(c, namespace) +} + +// NewForConfig creates a new MonitoringV1Client for the given config. +func NewForConfig(c *rest.Config) (*MonitoringV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &MonitoringV1Client{client}, nil +} + +// NewForConfigOrDie creates a new MonitoringV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MonitoringV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MonitoringV1Client for the given RESTClient. +func New(c rest.Interface) *MonitoringV1Client { + return &MonitoringV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MonitoringV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..e803df9ddc905265ca768840f208c99fcb459609 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/podmonitor.go @@ -0,0 +1,176 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodMonitorsGetter has a method to return a PodMonitorInterface. +// A group's client should implement this interface. +type PodMonitorsGetter interface { + PodMonitors(namespace string) PodMonitorInterface +} + +// PodMonitorInterface has methods to work with PodMonitor resources. +type PodMonitorInterface interface { + Create(ctx context.Context, podMonitor *v1.PodMonitor, opts metav1.CreateOptions) (*v1.PodMonitor, error) + Update(ctx context.Context, podMonitor *v1.PodMonitor, opts metav1.UpdateOptions) (*v1.PodMonitor, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodMonitor, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodMonitorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodMonitor, err error) + PodMonitorExpansion +} + +// podMonitors implements PodMonitorInterface +type podMonitors struct { + client rest.Interface + ns string +} + +// newPodMonitors returns a PodMonitors +func newPodMonitors(c *MonitoringV1Client, namespace string) *podMonitors { + return &podMonitors{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podMonitor, and returns the corresponding podMonitor object, and an error if there is any. +func (c *podMonitors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodMonitor, err error) { + result = &v1.PodMonitor{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podmonitors"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodMonitors that match those selectors. +func (c *podMonitors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodMonitorList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodMonitorList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podmonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podMonitors. +func (c *podMonitors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podmonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podMonitor and creates it. Returns the server's representation of the podMonitor, and an error, if there is any. +func (c *podMonitors) Create(ctx context.Context, podMonitor *v1.PodMonitor, opts metav1.CreateOptions) (result *v1.PodMonitor, err error) { + result = &v1.PodMonitor{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podmonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podMonitor). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podMonitor and updates it. Returns the server's representation of the podMonitor, and an error, if there is any. +func (c *podMonitors) Update(ctx context.Context, podMonitor *v1.PodMonitor, opts metav1.UpdateOptions) (result *v1.PodMonitor, err error) { + result = &v1.PodMonitor{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podmonitors"). + Name(podMonitor.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podMonitor). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podMonitor and deletes it. Returns an error if one occurs. +func (c *podMonitors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podmonitors"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podMonitors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podmonitors"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podMonitor. +func (c *podMonitors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodMonitor, err error) { + result = &v1.PodMonitor{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podmonitors"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go new file mode 100644 index 0000000000000000000000000000000000000000..0f0a05d0a2a18ad2defe140b225f18016b7ebd64 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/probe.go @@ -0,0 +1,176 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ProbesGetter has a method to return a ProbeInterface. +// A group's client should implement this interface. +type ProbesGetter interface { + Probes(namespace string) ProbeInterface +} + +// ProbeInterface has methods to work with Probe resources. +type ProbeInterface interface { + Create(ctx context.Context, probe *v1.Probe, opts metav1.CreateOptions) (*v1.Probe, error) + Update(ctx context.Context, probe *v1.Probe, opts metav1.UpdateOptions) (*v1.Probe, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Probe, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ProbeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Probe, err error) + ProbeExpansion +} + +// probes implements ProbeInterface +type probes struct { + client rest.Interface + ns string +} + +// newProbes returns a Probes +func newProbes(c *MonitoringV1Client, namespace string) *probes { + return &probes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the probe, and returns the corresponding probe object, and an error if there is any. +func (c *probes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Probe, err error) { + result = &v1.Probe{} + err = c.client.Get(). + Namespace(c.ns). + Resource("probes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Probes that match those selectors. +func (c *probes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProbeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ProbeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("probes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested probes. +func (c *probes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("probes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a probe and creates it. Returns the server's representation of the probe, and an error, if there is any. +func (c *probes) Create(ctx context.Context, probe *v1.Probe, opts metav1.CreateOptions) (result *v1.Probe, err error) { + result = &v1.Probe{} + err = c.client.Post(). + Namespace(c.ns). + Resource("probes"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(probe). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a probe and updates it. Returns the server's representation of the probe, and an error, if there is any. +func (c *probes) Update(ctx context.Context, probe *v1.Probe, opts metav1.UpdateOptions) (result *v1.Probe, err error) { + result = &v1.Probe{} + err = c.client.Put(). + Namespace(c.ns). + Resource("probes"). + Name(probe.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(probe). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the probe and deletes it. Returns an error if one occurs. +func (c *probes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("probes"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *probes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("probes"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched probe. +func (c *probes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Probe, err error) { + result = &v1.Probe{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("probes"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go new file mode 100644 index 0000000000000000000000000000000000000000..a850a0377b4b0c31d8b14c69387f0e734a946b0c --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go @@ -0,0 +1,193 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PrometheusesGetter has a method to return a PrometheusInterface. +// A group's client should implement this interface. +type PrometheusesGetter interface { + Prometheuses(namespace string) PrometheusInterface +} + +// PrometheusInterface has methods to work with Prometheus resources. +type PrometheusInterface interface { + Create(ctx context.Context, prometheus *v1.Prometheus, opts metav1.CreateOptions) (*v1.Prometheus, error) + Update(ctx context.Context, prometheus *v1.Prometheus, opts metav1.UpdateOptions) (*v1.Prometheus, error) + UpdateStatus(ctx context.Context, prometheus *v1.Prometheus, opts metav1.UpdateOptions) (*v1.Prometheus, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Prometheus, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PrometheusList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Prometheus, err error) + PrometheusExpansion +} + +// prometheuses implements PrometheusInterface +type prometheuses struct { + client rest.Interface + ns string +} + +// newPrometheuses returns a Prometheuses +func newPrometheuses(c *MonitoringV1Client, namespace string) *prometheuses { + return &prometheuses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the prometheus, and returns the corresponding prometheus object, and an error if there is any. +func (c *prometheuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Prometheus, err error) { + result = &v1.Prometheus{} + err = c.client.Get(). + Namespace(c.ns). + Resource("prometheuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Prometheuses that match those selectors. +func (c *prometheuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PrometheusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PrometheusList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("prometheuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested prometheuses. +func (c *prometheuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("prometheuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a prometheus and creates it. Returns the server's representation of the prometheus, and an error, if there is any. +func (c *prometheuses) Create(ctx context.Context, prometheus *v1.Prometheus, opts metav1.CreateOptions) (result *v1.Prometheus, err error) { + result = &v1.Prometheus{} + err = c.client.Post(). + Namespace(c.ns). + Resource("prometheuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(prometheus). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a prometheus and updates it. Returns the server's representation of the prometheus, and an error, if there is any. +func (c *prometheuses) Update(ctx context.Context, prometheus *v1.Prometheus, opts metav1.UpdateOptions) (result *v1.Prometheus, err error) { + result = &v1.Prometheus{} + err = c.client.Put(). + Namespace(c.ns). + Resource("prometheuses"). + Name(prometheus.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(prometheus). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *prometheuses) UpdateStatus(ctx context.Context, prometheus *v1.Prometheus, opts metav1.UpdateOptions) (result *v1.Prometheus, err error) { + result = &v1.Prometheus{} + err = c.client.Put(). + Namespace(c.ns). + Resource("prometheuses"). + Name(prometheus.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(prometheus). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the prometheus and deletes it. Returns an error if one occurs. +func (c *prometheuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("prometheuses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *prometheuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("prometheuses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched prometheus. +func (c *prometheuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Prometheus, err error) { + result = &v1.Prometheus{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("prometheuses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go new file mode 100644 index 0000000000000000000000000000000000000000..4222d7f8c22e4a46e2179e83223a7612ad5a9049 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go @@ -0,0 +1,176 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PrometheusRulesGetter has a method to return a PrometheusRuleInterface. +// A group's client should implement this interface. +type PrometheusRulesGetter interface { + PrometheusRules(namespace string) PrometheusRuleInterface +} + +// PrometheusRuleInterface has methods to work with PrometheusRule resources. +type PrometheusRuleInterface interface { + Create(ctx context.Context, prometheusRule *v1.PrometheusRule, opts metav1.CreateOptions) (*v1.PrometheusRule, error) + Update(ctx context.Context, prometheusRule *v1.PrometheusRule, opts metav1.UpdateOptions) (*v1.PrometheusRule, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PrometheusRule, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PrometheusRuleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PrometheusRule, err error) + PrometheusRuleExpansion +} + +// prometheusRules implements PrometheusRuleInterface +type prometheusRules struct { + client rest.Interface + ns string +} + +// newPrometheusRules returns a PrometheusRules +func newPrometheusRules(c *MonitoringV1Client, namespace string) *prometheusRules { + return &prometheusRules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the prometheusRule, and returns the corresponding prometheusRule object, and an error if there is any. +func (c *prometheusRules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PrometheusRule, err error) { + result = &v1.PrometheusRule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("prometheusrules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PrometheusRules that match those selectors. +func (c *prometheusRules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PrometheusRuleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PrometheusRuleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("prometheusrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested prometheusRules. +func (c *prometheusRules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("prometheusrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a prometheusRule and creates it. Returns the server's representation of the prometheusRule, and an error, if there is any. +func (c *prometheusRules) Create(ctx context.Context, prometheusRule *v1.PrometheusRule, opts metav1.CreateOptions) (result *v1.PrometheusRule, err error) { + result = &v1.PrometheusRule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("prometheusrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(prometheusRule). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a prometheusRule and updates it. Returns the server's representation of the prometheusRule, and an error, if there is any. +func (c *prometheusRules) Update(ctx context.Context, prometheusRule *v1.PrometheusRule, opts metav1.UpdateOptions) (result *v1.PrometheusRule, err error) { + result = &v1.PrometheusRule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("prometheusrules"). + Name(prometheusRule.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(prometheusRule). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the prometheusRule and deletes it. Returns an error if one occurs. +func (c *prometheusRules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("prometheusrules"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *prometheusRules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("prometheusrules"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched prometheusRule. +func (c *prometheusRules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PrometheusRule, err error) { + result = &v1.PrometheusRule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("prometheusrules"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go new file mode 100644 index 0000000000000000000000000000000000000000..b9a6d3655864d7cd0482b421749ef2144adec587 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go @@ -0,0 +1,176 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServiceMonitorsGetter has a method to return a ServiceMonitorInterface. +// A group's client should implement this interface. +type ServiceMonitorsGetter interface { + ServiceMonitors(namespace string) ServiceMonitorInterface +} + +// ServiceMonitorInterface has methods to work with ServiceMonitor resources. +type ServiceMonitorInterface interface { + Create(ctx context.Context, serviceMonitor *v1.ServiceMonitor, opts metav1.CreateOptions) (*v1.ServiceMonitor, error) + Update(ctx context.Context, serviceMonitor *v1.ServiceMonitor, opts metav1.UpdateOptions) (*v1.ServiceMonitor, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceMonitor, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceMonitorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceMonitor, err error) + ServiceMonitorExpansion +} + +// serviceMonitors implements ServiceMonitorInterface +type serviceMonitors struct { + client rest.Interface + ns string +} + +// newServiceMonitors returns a ServiceMonitors +func newServiceMonitors(c *MonitoringV1Client, namespace string) *serviceMonitors { + return &serviceMonitors{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the serviceMonitor, and returns the corresponding serviceMonitor object, and an error if there is any. +func (c *serviceMonitors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceMonitor, err error) { + result = &v1.ServiceMonitor{} + err = c.client.Get(). + Namespace(c.ns). + Resource("servicemonitors"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceMonitors that match those selectors. +func (c *serviceMonitors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceMonitorList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceMonitorList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("servicemonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceMonitors. +func (c *serviceMonitors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("servicemonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceMonitor and creates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. +func (c *serviceMonitors) Create(ctx context.Context, serviceMonitor *v1.ServiceMonitor, opts metav1.CreateOptions) (result *v1.ServiceMonitor, err error) { + result = &v1.ServiceMonitor{} + err = c.client.Post(). + Namespace(c.ns). + Resource("servicemonitors"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceMonitor). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceMonitor and updates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. +func (c *serviceMonitors) Update(ctx context.Context, serviceMonitor *v1.ServiceMonitor, opts metav1.UpdateOptions) (result *v1.ServiceMonitor, err error) { + result = &v1.ServiceMonitor{} + err = c.client.Put(). + Namespace(c.ns). + Resource("servicemonitors"). + Name(serviceMonitor.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceMonitor). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceMonitor and deletes it. Returns an error if one occurs. +func (c *serviceMonitors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("servicemonitors"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceMonitors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("servicemonitors"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceMonitor. +func (c *serviceMonitors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceMonitor, err error) { + result = &v1.ServiceMonitor{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("servicemonitors"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go new file mode 100644 index 0000000000000000000000000000000000000000..02f06ee275127e4e586617a7f3cae5f57baadae3 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/thanosruler.go @@ -0,0 +1,193 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + scheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ThanosRulersGetter has a method to return a ThanosRulerInterface. +// A group's client should implement this interface. +type ThanosRulersGetter interface { + ThanosRulers(namespace string) ThanosRulerInterface +} + +// ThanosRulerInterface has methods to work with ThanosRuler resources. +type ThanosRulerInterface interface { + Create(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.CreateOptions) (*v1.ThanosRuler, error) + Update(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.UpdateOptions) (*v1.ThanosRuler, error) + UpdateStatus(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.UpdateOptions) (*v1.ThanosRuler, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ThanosRuler, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ThanosRulerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ThanosRuler, err error) + ThanosRulerExpansion +} + +// thanosRulers implements ThanosRulerInterface +type thanosRulers struct { + client rest.Interface + ns string +} + +// newThanosRulers returns a ThanosRulers +func newThanosRulers(c *MonitoringV1Client, namespace string) *thanosRulers { + return &thanosRulers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the thanosRuler, and returns the corresponding thanosRuler object, and an error if there is any. +func (c *thanosRulers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ThanosRuler, err error) { + result = &v1.ThanosRuler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("thanosrulers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThanosRulers that match those selectors. +func (c *thanosRulers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ThanosRulerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ThanosRulerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("thanosrulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thanosRulers. +func (c *thanosRulers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("thanosrulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a thanosRuler and creates it. Returns the server's representation of the thanosRuler, and an error, if there is any. +func (c *thanosRulers) Create(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.CreateOptions) (result *v1.ThanosRuler, err error) { + result = &v1.ThanosRuler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("thanosrulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(thanosRuler). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a thanosRuler and updates it. Returns the server's representation of the thanosRuler, and an error, if there is any. +func (c *thanosRulers) Update(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.UpdateOptions) (result *v1.ThanosRuler, err error) { + result = &v1.ThanosRuler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("thanosrulers"). + Name(thanosRuler.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(thanosRuler). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *thanosRulers) UpdateStatus(ctx context.Context, thanosRuler *v1.ThanosRuler, opts metav1.UpdateOptions) (result *v1.ThanosRuler, err error) { + result = &v1.ThanosRuler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("thanosrulers"). + Name(thanosRuler.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(thanosRuler). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the thanosRuler and deletes it. Returns an error if one occurs. +func (c *thanosRulers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("thanosrulers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thanosRulers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("thanosrulers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched thanosRuler. +func (c *thanosRulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ThanosRuler, err error) { + result = &v1.ThanosRuler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("thanosrulers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go new file mode 100644 index 0000000000000000000000000000000000000000..c6ea0db94da19706d780bdae6d72ee89e6ef8335 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go @@ -0,0 +1,24 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar + +import "github.com/prometheus/prometheus/pkg/labels" + +// Exemplar is additional information associated with a time series. +type Exemplar struct { + Labels labels.Labels + Value float64 + HasTs bool + Ts int64 +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go new file mode 100644 index 0000000000000000000000000000000000000000..1d6716beb69a00193be20fb5941f10d9692d06e4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go @@ -0,0 +1,297 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rulefmt + +import ( + "context" + "io/ioutil" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + yaml "gopkg.in/yaml.v3" + + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/template" +) + +// Error represents semantic errors on parsing rule groups. +type Error struct { + Group string + Rule int + RuleName string + Err WrappedError +} + +// WrappedError wraps error with the yaml node which can be used to represent +// the line and column numbers of the error. +type WrappedError struct { + err error + node *yaml.Node + nodeAlt *yaml.Node +} + +func (err *Error) Error() string { + if err.Err.nodeAlt != nil { + return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error() + } else if err.Err.node != nil { + return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error() + } + return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() +} + +// RuleGroups is a set of rule groups that are typically exposed in a file. +type RuleGroups struct { + Groups []RuleGroup `yaml:"groups"` +} + +type ruleGroups struct { + Groups []yaml.Node `yaml:"groups"` +} + +// Validate validates all rules in the rule groups. +func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { + set := map[string]struct{}{} + + for j, g := range g.Groups { + if g.Name == "" { + errs = append(errs, errors.Errorf("%d:%d: Groupname should not be empty", node.Groups[j].Line, node.Groups[j].Column)) + } + + if _, ok := set[g.Name]; ok { + errs = append( + errs, + errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name), + ) + } + + set[g.Name] = struct{}{} + + for i, r := range g.Rules { + for _, node := range r.Validate() { + var ruleName yaml.Node + if r.Alert.Value != "" { + ruleName = r.Alert + } else { + ruleName = r.Record + } + errs = append(errs, &Error{ + Group: g.Name, + Rule: i, + RuleName: ruleName.Value, + Err: node, + }) + } + } + } + + return errs +} + +// RuleGroup is a list of sequentially evaluated recording and alerting rules. +type RuleGroup struct { + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + Rules []RuleNode `yaml:"rules"` +} + +// Rule describes an alerting or recording rule. +type Rule struct { + Record string `yaml:"record,omitempty"` + Alert string `yaml:"alert,omitempty"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` +} + +// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. +type RuleNode struct { + Record yaml.Node `yaml:"record,omitempty"` + Alert yaml.Node `yaml:"alert,omitempty"` + Expr yaml.Node `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` +} + +// Validate the rule and return a list of encountered errors. +func (r *RuleNode) Validate() (nodes []WrappedError) { + if r.Record.Value != "" && r.Alert.Value != "" { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("only one of 'record' and 'alert' must be set"), + node: &r.Record, + nodeAlt: &r.Alert, + }) + } + if r.Record.Value == "" && r.Alert.Value == "" { + if r.Record.Value == "0" { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("one of 'record' or 'alert' must be set"), + node: &r.Alert, + }) + } else { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("one of 'record' or 'alert' must be set"), + node: &r.Record, + }) + } + } + + if r.Expr.Value == "" { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("field 'expr' must be set in rule"), + node: &r.Expr, + }) + } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { + nodes = append(nodes, WrappedError{ + err: errors.Wrapf(err, "could not parse expression"), + node: &r.Expr, + }) + } + if r.Record.Value != "" { + if len(r.Annotations) > 0 { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid field 'annotations' in recording rule"), + node: &r.Record, + }) + } + if r.For != 0 { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid field 'for' in recording rule"), + node: &r.Record, + }) + } + if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid recording rule name: %s", r.Record.Value), + node: &r.Record, + }) + } + } + + for k, v := range r.Labels { + if !model.LabelName(k).IsValid() { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid label name: %s", k), + }) + } + + if !model.LabelValue(v).IsValid() { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid label value: %s", v), + }) + } + } + + for k := range r.Annotations { + if !model.LabelName(k).IsValid() { + nodes = append(nodes, WrappedError{ + err: errors.Errorf("invalid annotation name: %s", k), + }) + } + } + + for _, err := range testTemplateParsing(r) { + nodes = append(nodes, WrappedError{err: err}) + } + + return +} + +// testTemplateParsing checks if the templates used in labels and annotations +// of the alerting rules are parsed correctly. +func testTemplateParsing(rl *RuleNode) (errs []error) { + if rl.Alert.Value == "" { + // Not an alerting rule. + return errs + } + + // Trying to parse templates. + tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0) + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } + parseTest := func(text string) error { + tmpl := template.NewTemplateExpander( + context.TODO(), + strings.Join(append(defs, text), ""), + "__alert_"+rl.Alert.Value, + tmplData, + model.Time(timestamp.FromTime(time.Now())), + nil, + nil, + ) + return tmpl.ParseTest() + } + + // Parsing Labels. + for k, val := range rl.Labels { + err := parseTest(val) + if err != nil { + errs = append(errs, errors.Wrapf(err, "label %q", k)) + } + } + + // Parsing Annotations. + for k, val := range rl.Annotations { + err := parseTest(val) + if err != nil { + errs = append(errs, errors.Wrapf(err, "annotation %q", k)) + } + } + + return errs +} + +// Parse parses and validates a set of rules. +func Parse(content []byte) (*RuleGroups, []error) { + var ( + groups RuleGroups + node ruleGroups + errs []error + ) + + err := yaml.Unmarshal(content, &groups) + if err != nil { + errs = append(errs, err) + } + err = yaml.Unmarshal(content, &node) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return nil, errs + } + + return &groups, groups.Validate(node) +} + +// ParseFile reads and parses rules from a file. +func ParseFile(file string) (*RuleGroups, []error) { + b, err := ioutil.ReadFile(file) + if err != nil { + return nil, []error{errors.Wrap(err, file)} + } + rgs, errs := Parse(b) + for i := range errs { + errs[i] = errors.Wrap(errs[i], file) + } + return rgs, errs +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/README.md b/vendor/github.com/prometheus/prometheus/pkg/textparse/README.md new file mode 100644 index 0000000000000000000000000000000000000000..697966f0975a0b9b3cf66457a79fdb4aa9577354 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/README.md @@ -0,0 +1,6 @@ +# Making changes to textparse lexers +In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: +`golex -o=promlex.l.go promlex.l` + +Note that you need golex installed: +`go get -u modernc.org/golex` \ No newline at end of file diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..cfcd05e210f0447088932680bbd961c970d71fee --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "mime" + + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" +) + +// Parser parses samples from a byte slice of samples in the official +// Prometheus and OpenMetrics text exposition formats. +type Parser interface { + // Series returns the bytes of the series, the timestamp if set, and the value + // of the current sample. + Series() ([]byte, *int64, float64) + + // Help returns the metric name and help text in the current entry. + // Must only be called after Next returned a help entry. + // The returned byte slices become invalid after the next call to Next. + Help() ([]byte, []byte) + + // Type returns the metric name and type in the current entry. + // Must only be called after Next returned a type entry. + // The returned byte slices become invalid after the next call to Next. + Type() ([]byte, MetricType) + + // Unit returns the metric name and unit in the current entry. + // Must only be called after Next returned a unit entry. + // The returned byte slices become invalid after the next call to Next. + Unit() ([]byte, []byte) + + // Comment returns the text of the current comment. + // Must only be called after Next returned a comment entry. + // The returned byte slice becomes invalid after the next call to Next. + Comment() []byte + + // Metric writes the labels of the current sample into the passed labels. + // It returns the string from which the metric was parsed. + Metric(l *labels.Labels) string + + // Exemplar writes the exemplar of the current sample into the passed + // exemplar. It returns if an exemplar exists or not. + Exemplar(l *exemplar.Exemplar) bool + + // Next advances the parser to the next sample. It returns false if no + // more samples were read or an error occurred. + Next() (Entry, error) +} + +// New returns a new parser of the byte slice. +func New(b []byte, contentType string) Parser { + mediaType, _, err := mime.ParseMediaType(contentType) + if err == nil && mediaType == "application/openmetrics-text" { + return NewOpenMetricsParser(b) + } + return NewPromParser(b) +} + +// Entry represents the type of a parsed entry. +type Entry int + +const ( + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 + EntryComment Entry = 3 + EntryUnit Entry = 4 +) + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = "counter" + MetricTypeGauge = "gauge" + MetricTypeHistogram = "histogram" + MetricTypeGaugeHistogram = "gaugehistogram" + MetricTypeSummary = "summary" + MetricTypeInfo = "info" + MetricTypeStateset = "stateset" + MetricTypeUnknown = "unknown" +) diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l new file mode 100644 index 0000000000000000000000000000000000000000..91e4439423a0555864362257e879233f08e155c2 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l @@ -0,0 +1,80 @@ +%{ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] +S [ ] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +#{S} l.state = sComment +HELP{S} l.state = sMeta1; return tHelp +TYPE{S} l.state = sMeta1; return tType +UNIT{S} l.state = sMeta1; return tUnit +"EOF"\n? l.state = sInit; return tEOFWord +{M}({M}|{D})* l.state = sMeta2; return tMName +{S}{C}*\n l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"\n])*\" l.state = sLabels; return tLValue +{S}[^ \n]+ l.state = sTimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak +{S}#{S}\{ l.state = sExemplar; return tComment + +{L}({L}|{D})* return tLName +\} l.state = sEValue; return tBraceClose += l.state = sEValue; return tEqual +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue +, return tComma +{S}[^ \n]+ l.state = sETimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + + return tInvalid +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go new file mode 100644 index 0000000000000000000000000000000000000000..6093c9f59d5b721e7b4ab4835ecd711235a03373 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go @@ -0,0 +1,762 @@ +// Code generated by golex. DO NOT EDIT. + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(fmt.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart5 + case 2: // start condition: sMeta1 + goto yystart25 + case 3: // start condition: sMeta2 + goto yystart27 + case 4: // start condition: sLabels + goto yystart30 + case 5: // start condition: sLValue + goto yystart35 + case 6: // start condition: sValue + goto yystart39 + case 7: // start condition: sTimestamp + goto yystart43 + case 8: // start condition: sExemplar + goto yystart50 + case 9: // start condition: sEValue + goto yystart55 + case 10: // start condition: sETimestamp + goto yystart61 + } + + goto yystate0 // silence unused label error + goto yystate1 // silence unused label error +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate2 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + } + +yystate2: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate3 + } + +yystate3: + c = l.next() + goto yyrule1 + +yystate4: + c = l.next() + switch { + default: + goto yyrule8 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + } + + goto yystate5 // silence unused label error +yystate5: + c = l.next() +yystart5: + switch { + default: + goto yyabort + case c == 'E': + goto yystate6 + case c == 'H': + goto yystate10 + case c == 'T': + goto yystate15 + case c == 'U': + goto yystate20 + } + +yystate6: + c = l.next() + switch { + default: + goto yyabort + case c == 'O': + goto yystate7 + } + +yystate7: + c = l.next() + switch { + default: + goto yyabort + case c == 'F': + goto yystate8 + } + +yystate8: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\n': + goto yystate9 + } + +yystate9: + c = l.next() + goto yyrule5 + +yystate10: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate11 + } + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate14 + } + +yystate14: + c = l.next() + goto yyrule2 + +yystate15: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate16 + } + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate19 + } + +yystate19: + c = l.next() + goto yyrule3 + +yystate20: + c = l.next() + switch { + default: + goto yyabort + case c == 'N': + goto yystate21 + } + +yystate21: + c = l.next() + switch { + default: + goto yyabort + case c == 'I': + goto yystate22 + } + +yystate22: + c = l.next() + switch { + default: + goto yyabort + case c == 'T': + goto yystate23 + } + +yystate23: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate24 + } + +yystate24: + c = l.next() + goto yyrule4 + + goto yystate25 // silence unused label error +yystate25: + c = l.next() +yystart25: + switch { + default: + goto yyabort + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate26 + } + +yystate26: + c = l.next() + switch { + default: + goto yyrule6 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate26 + } + + goto yystate27 // silence unused label error +yystate27: + c = l.next() +yystart27: + switch { + default: + goto yyabort + case c == ' ': + goto yystate28 + } + +yystate28: + c = l.next() + switch { + default: + goto yyabort + case c == '\n': + goto yystate29 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate28 + } + +yystate29: + c = l.next() + goto yyrule7 + + goto yystate30 // silence unused label error +yystate30: + c = l.next() +yystart30: + switch { + default: + goto yyabort + case c == ',': + goto yystate31 + case c == '=': + goto yystate32 + case c == '}': + goto yystate34 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate33 + } + +yystate31: + c = l.next() + goto yyrule13 + +yystate32: + c = l.next() + goto yyrule12 + +yystate33: + c = l.next() + switch { + default: + goto yyrule10 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate33 + } + +yystate34: + c = l.next() + goto yyrule11 + + goto yystate35 // silence unused label error +yystate35: + c = l.next() +yystart35: + switch { + default: + goto yyabort + case c == '"': + goto yystate36 + } + +yystate36: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate37 + case c == '\\': + goto yystate38 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate36 + } + +yystate37: + c = l.next() + goto yyrule14 + +yystate38: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate36 + } + + goto yystate39 // silence unused label error +yystate39: + c = l.next() +yystart39: + switch { + default: + goto yyabort + case c == ' ': + goto yystate40 + case c == '{': + goto yystate42 + } + +yystate40: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate41 + } + +yystate41: + c = l.next() + switch { + default: + goto yyrule15 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate41 + } + +yystate42: + c = l.next() + goto yyrule9 + + goto yystate43 // silence unused label error +yystate43: + c = l.next() +yystart43: + switch { + default: + goto yyabort + case c == ' ': + goto yystate45 + case c == '\n': + goto yystate44 + } + +yystate44: + c = l.next() + goto yyrule17 + +yystate45: + c = l.next() + switch { + default: + goto yyabort + case c == '#': + goto yystate47 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ': + goto yystate46 + } + +yystate46: + c = l.next() + switch { + default: + goto yyrule16 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate46 + } + +yystate47: + c = l.next() + switch { + default: + goto yyrule16 + case c == ' ': + goto yystate48 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate46 + } + +yystate48: + c = l.next() + switch { + default: + goto yyabort + case c == '{': + goto yystate49 + } + +yystate49: + c = l.next() + goto yyrule18 + + goto yystate50 // silence unused label error +yystate50: + c = l.next() +yystart50: + switch { + default: + goto yyabort + case c == ',': + goto yystate51 + case c == '=': + goto yystate52 + case c == '}': + goto yystate54 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate53 + } + +yystate51: + c = l.next() + goto yyrule23 + +yystate52: + c = l.next() + goto yyrule21 + +yystate53: + c = l.next() + switch { + default: + goto yyrule19 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate53 + } + +yystate54: + c = l.next() + goto yyrule20 + + goto yystate55 // silence unused label error +yystate55: + c = l.next() +yystart55: + switch { + default: + goto yyabort + case c == ' ': + goto yystate56 + case c == '"': + goto yystate58 + } + +yystate56: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate57 + } + +yystate57: + c = l.next() + switch { + default: + goto yyrule24 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate57 + } + +yystate58: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate59 + case c == '\\': + goto yystate60 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate58 + } + +yystate59: + c = l.next() + goto yyrule22 + +yystate60: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate58 + } + + goto yystate61 // silence unused label error +yystate61: + c = l.next() +yystart61: + switch { + default: + goto yyabort + case c == ' ': + goto yystate63 + case c == '\n': + goto yystate62 + } + +yystate62: + c = l.next() + goto yyrule26 + +yystate63: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yystate64: + c = l.next() + switch { + default: + goto yyrule25 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yyrule1: // #{S} + { + l.state = sComment + goto yystate0 + } +yyrule2: // HELP{S} + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule3: // TYPE{S} + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule4: // UNIT{S} + { + l.state = sMeta1 + return tUnit + goto yystate0 + } +yyrule5: // "EOF"\n? + { + l.state = sInit + return tEOFWord + goto yystate0 + } +yyrule6: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule7: // {S}{C}*\n + { + l.state = sInit + return tText + goto yystate0 + } +yyrule8: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule9: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule10: // {L}({L}|{D})* + { + return tLName + } +yyrule11: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule12: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule13: // , + { + return tComma + } +yyrule14: // \"(\\.|[^\\"\n])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule15: // {S}[^ \n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule16: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule17: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule18: // {S}#{S}\{ + { + l.state = sExemplar + return tComment + goto yystate0 + } +yyrule19: // {L}({L}|{D})* + { + return tLName + } +yyrule20: // \} + { + l.state = sEValue + return tBraceClose + goto yystate0 + } +yyrule21: // = + { + l.state = sEValue + return tEqual + goto yystate0 + } +yyrule22: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tLValue + goto yystate0 + } +yyrule23: // , + { + return tComma + } +yyrule24: // {S}[^ \n]+ + { + l.state = sETimestamp + return tValue + goto yystate0 + } +yyrule25: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule26: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + + goto yyabort // silence unused label error + +yyabort: // no lexem recognized + + return tInvalid +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go new file mode 100644 index 0000000000000000000000000000000000000000..6cfdd8391f5d1378530dd966aa8711ce8d6041ce --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go @@ -0,0 +1,477 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=openmetricslex.l.go openmetricslex.l + +package textparse + +import ( + "bytes" + "fmt" + "io" + "math" + "sort" + "strings" + "unicode/utf8" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" +) + +var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} + +type openMetricsLexer struct { + b []byte + i int + start int + err error + state int +} + +// buf returns the buffer of the current token. +func (l *openMetricsLexer) buf() []byte { + return l.b[l.start:l.i] +} + +func (l *openMetricsLexer) cur() byte { + if l.i < len(l.b) { + return l.b[l.i] + } + return byte(' ') +} + +// next advances the openMetricsLexer to the next character. +func (l *openMetricsLexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + } + return l.b[l.i] +} + +func (l *openMetricsLexer) Error(es string) { + l.err = errors.New(es) +} + +// OpenMetricsParser parses samples from a byte slice of samples in the official +// OpenMetrics text exposition format. +// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit +type OpenMetricsParser struct { + l *openMetricsLexer + series []byte + text []byte + mtype MetricType + val float64 + ts int64 + hasTS bool + start int + offsets []int + + eOffsets []int + exemplar []byte + exemplarVal float64 + exemplarTs int64 + hasExemplarTs bool +} + +// NewOpenMetricsParser returns a new parser of the byte slice. +func NewOpenMetricsParser(b []byte) Parser { + return &OpenMetricsParser{l: &openMetricsLexer{b: b}} +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + ts := p.ts + return p.series, &ts, p.val + } + return p.series, nil, p.val +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + // OpenMetrics always uses the Prometheus format label value escaping. + return m, []byte(lvalReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Type() ([]byte, MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Unit() ([]byte, []byte) { + // The Prometheus format does not have units. + return p.l.b[p.offsets[0]:p.offsets[1]], p.text +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *OpenMetricsParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *OpenMetricsParser) Metric(l *labels.Labels) string { + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.series) + + *l = append(*l, labels.Label{ + Name: labels.MetricName, + Value: s[:p.offsets[0]-p.start], + }) + + for i := 1; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s[c:d], byte('\\')) >= 0 { + *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) + continue + } + *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort labels. We can skip the first entry since the metric name is + // already at the right place. + sort.Sort((*l)[1:]) + + return s +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns the whether an exemplar exists. +func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { + if len(p.exemplar) == 0 { + return false + } + + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.exemplar) + + e.Value = p.exemplarVal + if p.hasExemplarTs { + e.HasTs = true + e.Ts = p.exemplarTs + } + + for i := 0; i < len(p.eOffsets); i += 4 { + a := p.eOffsets[i] - p.start + b := p.eOffsets[i+1] - p.start + c := p.eOffsets[i+2] - p.start + d := p.eOffsets[i+3] - p.start + + e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort the labels. + sort.Sort(e.Labels) + + return true +} + +// nextToken returns the next token from the openMetricsLexer. +func (p *OpenMetricsParser) nextToken() token { + tok := p.l.Lex() + return tok +} + +// Next advances the parser to the next sample. It returns false if no +// more samples were read or an error occurred. +func (p *OpenMetricsParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + + switch t := p.nextToken(); t { + case tEOFWord: + if t := p.nextToken(); t != tEOF { + return EntryInvalid, errors.New("unexpected data after # EOF") + } + return EntryInvalid, io.EOF + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tHelp, tType, tUnit: + switch t := p.nextToken(); t { + case tMName: + p.offsets = append(p.offsets, p.l.start, p.l.i) + default: + return EntryInvalid, parseError("expected metric name after HELP", t) + } + switch t := p.nextToken(); t { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1 : len(p.l.buf())-1] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, parseError("expected text in HELP", t) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = MetricTypeCounter + case "gauge": + p.mtype = MetricTypeGauge + case "histogram": + p.mtype = MetricTypeHistogram + case "gaugehistogram": + p.mtype = MetricTypeGaugeHistogram + case "summary": + p.mtype = MetricTypeSummary + case "info": + p.mtype = MetricTypeInfo + case "stateset": + p.mtype = MetricTypeStateset + case "unknown": + p.mtype = MetricTypeUnknown + default: + return EntryInvalid, errors.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, errors.New("help text is not a valid utf8 string") + } + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + case tUnit: + m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]]) + u := yoloString(p.text) + if len(u) > 0 { + if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { + return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m) + } + } + return EntryUnit, nil + } + + case tMName: + p.offsets = append(p.offsets, p.l.i) + p.series = p.l.b[p.start:p.l.i] + + t2 := p.nextToken() + if t2 == tBraceOpen { + offsets, err := p.parseLVals() + if err != nil { + return EntryInvalid, err + } + p.offsets = append(p.offsets, offsets...) + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + p.val, err = p.getFloatValue(t2, "metric") + if err != nil { + return EntryInvalid, err + } + + p.hasTS = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tLinebreak: + break + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + case tTimestamp: + p.hasTS = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return EntryInvalid, err + } + p.ts = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, parseError("expected next entry after timestamp", t3) + } + default: + return EntryInvalid, parseError("expected timestamp or # symbol", t2) + } + return EntrySeries, nil + + default: + err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + } + return EntryInvalid, err +} + +func (p *OpenMetricsParser) parseComment() error { + // Validate the name of the metric. It must have _total or _bucket as + // suffix for exemplars to be supported. + if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil { + return err + } + + // Parse the labels. + offsets, err := p.parseLVals() + if err != nil { + return err + } + p.eOffsets = append(p.eOffsets, offsets...) + p.exemplar = p.l.b[p.start:p.l.i] + + // Get the value. + p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels") + if err != nil { + return err + } + + // Read the optional timestamp. + p.hasExemplarTs = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return errors.New("data does not end with # EOF") + case tLinebreak: + break + case tTimestamp: + p.hasExemplarTs = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return err + } + p.exemplarTs = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + default: + return parseError("expected next entry after exemplar timestamp", t3) + } + default: + return parseError("expected timestamp or comment", t2) + } + return nil +} + +func (p *OpenMetricsParser) parseLVals() ([]int, error) { + var offsets []int + first := true + for { + t := p.nextToken() + switch t { + case tBraceClose: + return offsets, nil + case tComma: + if first { + return nil, parseError("expected label name or left brace", t) + } + t = p.nextToken() + if t != tLName { + return nil, parseError("expected label name", t) + } + case tLName: + if !first { + return nil, parseError("expected comma", t) + } + default: + if first { + return nil, parseError("expected label name or left brace", t) + } + return nil, parseError("expected comma or left brace", t) + + } + first = false + // t is now a label name. + + offsets = append(offsets, p.l.start, p.l.i) + + if t := p.nextToken(); t != tEqual { + return nil, parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return nil, parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return nil, errors.New("invalid UTF-8 label value") + } + + // The openMetricsLexer ensures the value string is quoted. Strip first + // and last character. + offsets = append(offsets, p.l.start+1, p.l.i-1) + } +} + +func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { + if t != tValue { + return 0, parseError(fmt.Sprintf("expected value after %v", after), t) + } + val, err := parseFloat(yoloString(p.l.buf()[1:])) + if err != nil { + return 0, err + } + // Ensure canonical NaN value. + if math.IsNaN(p.exemplarVal) { + val = math.Float64frombits(value.NormalNaN) + } + return val, nil +} + +func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error { + for _, suffix := range allowedSuffixes { + if bytes.HasSuffix(name, suffix) { + return nil + } + } + return fmt.Errorf("metric name %v does not support exemplars", string(name)) +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l b/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l new file mode 100644 index 0000000000000000000000000000000000000000..c3c5c3bb0017422983543372e0d840c0d5a6671d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l @@ -0,0 +1,100 @@ +%{ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +\0 return tEOF +\n l.state = sInit; return tLinebreak +<*>[ \t]+ return tWhitespace + +#[ \t]+ l.state = sComment +# return l.consumeComment() +HELP[\t ]+ l.state = sMeta1; return tHelp +TYPE[\t ]+ l.state = sMeta1; return tType +{M}({M}|{D})* l.state = sMeta2; return tMName +{C}* l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"])*\" l.state = sLabels; return tLValue +[^{ \t\n]+ l.state = sTimestamp; return tValue +{D}+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l.go new file mode 100644 index 0000000000000000000000000000000000000000..690ec4e05bb45940a20d3c043d1b7f6f3d45ae3e --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l.go @@ -0,0 +1,553 @@ +// CAUTION: Generated file - DO NOT EDIT. + +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "github.com/pkg/errors" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp + sExemplar + sEValue + sETimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(errors.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart8 + case 2: // start condition: sMeta1 + goto yystart19 + case 3: // start condition: sMeta2 + goto yystart21 + case 4: // start condition: sLabels + goto yystart24 + case 5: // start condition: sLValue + goto yystart29 + case 6: // start condition: sValue + goto yystart33 + case 7: // start condition: sTimestamp + goto yystart36 + } + + goto yystate0 // silence unused label error + goto yystate1 // silence unused label error +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate5 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + case c == '\n': + goto yystate4 + case c == '\t' || c == ' ': + goto yystate3 + case c == '\x00': + goto yystate2 + } + +yystate2: + c = l.next() + goto yyrule1 + +yystate3: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate4: + c = l.next() + goto yyrule2 + +yystate5: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate6: + c = l.next() + switch { + default: + goto yyrule4 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate7: + c = l.next() + switch { + default: + goto yyrule10 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + } + + goto yystate8 // silence unused label error +yystate8: + c = l.next() +yystart8: + switch { + default: + goto yyabort + case c == 'H': + goto yystate9 + case c == 'T': + goto yystate14 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate9: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate10 + } + +yystate10: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate11 + } + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyrule6 + case c == '\t' || c == ' ': + goto yystate13 + } + +yystate14: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate15 + } + +yystate15: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate16 + } + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyrule7 + case c == '\t' || c == ' ': + goto yystate18 + } + + goto yystate19 // silence unused label error +yystate19: + c = l.next() +yystart19: + switch { + default: + goto yyabort + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate20 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate20: + c = l.next() + switch { + default: + goto yyrule8 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate20 + } + + goto yystate21 // silence unused label error +yystate21: + c = l.next() +yystart21: + switch { + default: + goto yyrule9 + case c == '\t' || c == ' ': + goto yystate23 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate22 + } + +yystate22: + c = l.next() + switch { + default: + goto yyrule9 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate22 + } + +yystate23: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate23 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate22 + } + + goto yystate24 // silence unused label error +yystate24: + c = l.next() +yystart24: + switch { + default: + goto yyabort + case c == ',': + goto yystate25 + case c == '=': + goto yystate26 + case c == '\t' || c == ' ': + goto yystate3 + case c == '}': + goto yystate28 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate27 + } + +yystate25: + c = l.next() + goto yyrule15 + +yystate26: + c = l.next() + goto yyrule14 + +yystate27: + c = l.next() + switch { + default: + goto yyrule12 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate27 + } + +yystate28: + c = l.next() + goto yyrule13 + + goto yystate29 // silence unused label error +yystate29: + c = l.next() +yystart29: + switch { + default: + goto yyabort + case c == '"': + goto yystate30 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate30: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate31 + case c == '\\': + goto yystate32 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate30 + } + +yystate31: + c = l.next() + goto yyrule16 + +yystate32: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate30 + } + + goto yystate33 // silence unused label error +yystate33: + c = l.next() +yystart33: + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate3 + case c == '{': + goto yystate35 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate34 + } + +yystate34: + c = l.next() + switch { + default: + goto yyrule17 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate34 + } + +yystate35: + c = l.next() + goto yyrule11 + + goto yystate36 // silence unused label error +yystate36: + c = l.next() +yystart36: + switch { + default: + goto yyabort + case c == '\n': + goto yystate37 + case c == '\t' || c == ' ': + goto yystate3 + case c >= '0' && c <= '9': + goto yystate38 + } + +yystate37: + c = l.next() + goto yyrule19 + +yystate38: + c = l.next() + switch { + default: + goto yyrule18 + case c >= '0' && c <= '9': + goto yystate38 + } + +yyrule1: // \0 + { + return tEOF + } +yyrule2: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule3: // [ \t]+ + { + return tWhitespace + } +yyrule4: // #[ \t]+ + { + l.state = sComment + goto yystate0 + } +yyrule5: // # + { + return l.consumeComment() + } +yyrule6: // HELP[\t ]+ + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule7: // TYPE[\t ]+ + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule8: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule9: // {C}* + { + l.state = sInit + return tText + goto yystate0 + } +yyrule10: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule11: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule12: // {L}({L}|{D})* + { + return tLName + } +yyrule13: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule14: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule15: // , + { + return tComma + } +yyrule16: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule17: // [^{ \t\n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule18: // {D}+ + { + return tTimestamp + } +yyrule19: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + + goto yyabort // silence unused label error + +yyabort: // no lexem recognized + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go new file mode 100644 index 0000000000000000000000000000000000000000..3c885af0ba674b33ac8aaf7b6867b125a04e456d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go @@ -0,0 +1,426 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=promlex.l.go promlex.l + +package textparse + +import ( + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" + "unsafe" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" +) + +type promlexer struct { + b []byte + i int + start int + err error + state int +} + +type token int + +const ( + tInvalid token = -1 + tEOF token = 0 + tLinebreak token = iota + tWhitespace + tHelp + tType + tUnit + tEOFWord + tText + tComment + tBlank + tMName + tBraceOpen + tBraceClose + tLName + tLValue + tComma + tEqual + tTimestamp + tValue +) + +func (t token) String() string { + switch t { + case tInvalid: + return "INVALID" + case tEOF: + return "EOF" + case tLinebreak: + return "LINEBREAK" + case tWhitespace: + return "WHITESPACE" + case tHelp: + return "HELP" + case tType: + return "TYPE" + case tUnit: + return "UNIT" + case tEOFWord: + return "EOFWORD" + case tText: + return "TEXT" + case tComment: + return "COMMENT" + case tBlank: + return "BLANK" + case tMName: + return "MNAME" + case tBraceOpen: + return "BOPEN" + case tBraceClose: + return "BCLOSE" + case tLName: + return "LNAME" + case tLValue: + return "LVALUE" + case tEqual: + return "EQUAL" + case tComma: + return "COMMA" + case tTimestamp: + return "TIMESTAMP" + case tValue: + return "VALUE" + } + return fmt.Sprintf("", t) +} + +// buf returns the buffer of the current token. +func (l *promlexer) buf() []byte { + return l.b[l.start:l.i] +} + +func (l *promlexer) cur() byte { + return l.b[l.i] +} + +// next advances the promlexer to the next character. +func (l *promlexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + } + return l.b[l.i] +} + +func (l *promlexer) Error(es string) { + l.err = errors.New(es) +} + +// PromParser parses samples from a byte slice of samples in the official +// Prometheus text exposition format. +type PromParser struct { + l *promlexer + series []byte + text []byte + mtype MetricType + val float64 + ts int64 + hasTS bool + start int + offsets []int +} + +// NewPromParser returns a new parser of the byte slice. +func NewPromParser(b []byte) Parser { + return &PromParser{l: &promlexer{b: append(b, '\n')}} +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *PromParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + return p.series, &p.ts, p.val + } + return p.series, nil, p.val +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + return m, []byte(helpReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Type() ([]byte, MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Unit() ([]byte, []byte) { + // The Prometheus format does not have units. + return nil, nil +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *PromParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *PromParser) Metric(l *labels.Labels) string { + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.series) + + *l = append(*l, labels.Label{ + Name: labels.MetricName, + Value: s[:p.offsets[0]-p.start], + }) + + for i := 1; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s[c:d], byte('\\')) >= 0 { + *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) + continue + } + *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort labels to maintain the sorted labels invariant. + sort.Sort(*l) + + return s +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists. +func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { + return false +} + +// nextToken returns the next token from the promlexer. It skips over tabs +// and spaces. +func (p *PromParser) nextToken() token { + for { + if tok := p.l.Lex(); tok != tWhitespace { + return tok + } + } +} + +func parseError(exp string, got token) error { + return errors.Errorf("%s, got %q", exp, got) +} + +// Next advances the parser to the next sample. It returns false if no +// more samples were read or an error occurred. +func (p *PromParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + + switch t := p.nextToken(); t { + case tEOF: + return EntryInvalid, io.EOF + case tLinebreak: + // Allow full blank lines. + return p.Next() + + case tHelp, tType: + switch t := p.nextToken(); t { + case tMName: + p.offsets = append(p.offsets, p.l.start, p.l.i) + default: + return EntryInvalid, parseError("expected metric name after HELP", t) + } + switch t := p.nextToken(); t { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1:] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, parseError("expected text in HELP", t) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = MetricTypeCounter + case "gauge": + p.mtype = MetricTypeGauge + case "histogram": + p.mtype = MetricTypeHistogram + case "summary": + p.mtype = MetricTypeSummary + case "untyped": + p.mtype = MetricTypeUnknown + default: + return EntryInvalid, errors.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, errors.Errorf("help text is not a valid utf8 string") + } + } + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, parseError("linebreak expected after metadata", t) + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + } + case tComment: + p.text = p.l.buf() + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, parseError("linebreak expected after comment", t) + } + return EntryComment, nil + + case tMName: + p.offsets = append(p.offsets, p.l.i) + p.series = p.l.b[p.start:p.l.i] + + t2 := p.nextToken() + if t2 == tBraceOpen { + if err := p.parseLVals(); err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + if t2 != tValue { + return EntryInvalid, parseError("expected value after metric", t) + } + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { + return EntryInvalid, err + } + // Ensure canonical NaN value. + if math.IsNaN(p.val) { + p.val = math.Float64frombits(value.NormalNaN) + } + p.hasTS = false + switch p.nextToken() { + case tLinebreak: + break + case tTimestamp: + p.hasTS = true + if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { + return EntryInvalid, err + } + if t2 := p.nextToken(); t2 != tLinebreak { + return EntryInvalid, parseError("expected next entry after timestamp", t) + } + default: + return EntryInvalid, parseError("expected timestamp or new record", t) + } + return EntrySeries, nil + + default: + err = errors.Errorf("%q is not a valid start token", t) + } + return EntryInvalid, err +} + +func (p *PromParser) parseLVals() error { + t := p.nextToken() + for { + switch t { + case tBraceClose: + return nil + case tLName: + default: + return parseError("expected label name", t) + } + p.offsets = append(p.offsets, p.l.start, p.l.i) + + if t := p.nextToken(); t != tEqual { + return parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return errors.Errorf("invalid UTF-8 label value") + } + + // The promlexer ensures the value string is quoted. Strip first + // and last character. + p.offsets = append(p.offsets, p.l.start+1, p.l.i-1) + + // Free trailing commas are allowed. + if t = p.nextToken(); t == tComma { + t = p.nextToken() + } + } +} + +var lvalReplacer = strings.NewReplacer( + `\"`, "\"", + `\\`, "\\", + `\n`, "\n", +) + +var helpReplacer = strings.NewReplacer( + `\\`, "\\", + `\n`, "\n", +) + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func parseFloat(s string) (float64, error) { + // Keep to pre-Go 1.13 float formats. + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.nometa.txt b/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.nometa.txt new file mode 100644 index 0000000000000000000000000000000000000000..235f0aa464b9655514d30cd89ffeb6600b5783de --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.nometa.txt @@ -0,0 +1,411 @@ +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +go_goroutines 33 +go_memstats_alloc_bytes 1.7518624e+07 +go_memstats_alloc_bytes_total 8.3062296e+08 +go_memstats_buck_hash_sys_bytes 1.494637e+06 +go_memstats_frees_total 4.65658e+06 +go_memstats_gc_sys_bytes 1.107968e+06 +go_memstats_heap_alloc_bytes 1.7518624e+07 +go_memstats_heap_idle_bytes 6.668288e+06 +go_memstats_heap_inuse_bytes 1.8956288e+07 +go_memstats_heap_objects 72755 +go_memstats_heap_released_bytes_total 0 +go_memstats_heap_sys_bytes 2.5624576e+07 +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +go_memstats_lookups_total 2089 +go_memstats_mallocs_total 4.729335e+06 +go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_sys_bytes 16384 +go_memstats_mspan_inuse_bytes 211520 +go_memstats_mspan_sys_bytes 245760 +go_memstats_next_gc_bytes 2.033527e+07 +go_memstats_other_sys_bytes 2.077323e+06 +go_memstats_stack_inuse_bytes 1.6384e+06 +go_memstats_stack_sys_bytes 1.6384e+06 +go_memstats_sys_bytes 3.2205048e+07 +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +prometheus_config_last_reload_successful 1 +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +prometheus_evaluator_iterations_skipped_total 0 +prometheus_notifications_dropped_total 0 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +prometheus_sd_azure_refresh_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +prometheus_sd_ec2_refresh_failures_total 0 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +prometheus_sd_gce_refresh_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +prometheus_sd_marathon_refresh_failures_total 0 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +prometheus_target_skipped_scrapes_total 0 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.txt b/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.txt new file mode 100644 index 0000000000000000000000000000000000000000..174f383e911fff8c85e06ef297fd878f3ffddac9 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.txt @@ -0,0 +1,529 @@ +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 1.7518624e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 8.3062296e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.494637e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 4.65658e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 1.107968e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.7518624e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 6.668288e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.8956288e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 72755 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.5624576e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 2089 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 4.729335e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 211520 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 245760 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.033527e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.077323e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.6384e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.6384e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.2205048e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_evaluator_duration_seconds summary +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. +# TYPE prometheus_evaluator_iterations_skipped_total counter +prometheus_evaluator_iterations_skipped_total 0 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. +# TYPE prometheus_sd_azure_refresh_duration_seconds summary +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. +# TYPE prometheus_sd_azure_refresh_failures_total counter +prometheus_sd_azure_refresh_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. +# TYPE prometheus_sd_ec2_refresh_duration_seconds summary +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. +# TYPE prometheus_sd_ec2_refresh_failures_total counter +prometheus_sd_ec2_refresh_failures_total 0 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. +# TYPE prometheus_sd_gce_refresh_duration summary +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. +# TYPE prometheus_sd_gce_refresh_failures_total counter +prometheus_sd_gce_refresh_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. +# TYPE prometheus_sd_marathon_refresh_duration_seconds summary +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. +# TYPE prometheus_sd_marathon_refresh_failures_total counter +prometheus_sd_marathon_refresh_failures_total 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. +# TYPE prometheus_target_skipped_scrapes_total counter +prometheus_target_skipped_scrapes_total 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go new file mode 100644 index 0000000000000000000000000000000000000000..a7f03b0caf1b7b0295077c704a4209199fa96cea --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import "time" + +// FromTime returns a new millisecond timestamp from a time. +func FromTime(t time.Time) int64 { + return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) +} + +// Time returns a new time.Time object from a millisecond timestamp. +func Time(ts int64) time.Time { + return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() +} diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go new file mode 100644 index 0000000000000000000000000000000000000000..2360f34aca1bed67d7f8aef851c5c4b8df650d17 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -0,0 +1,2119 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "container/heap" + "context" + "fmt" + "math" + "regexp" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/uber/jaeger-client-go" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/stats" +) + +const ( + namespace = "prometheus" + subsystem = "engine" + queryTag = "query" + env = "query execution" + defaultLookbackDelta = 5 * time.Minute + + // The largest SampleValue that can be converted to an int64 without overflow. + maxInt64 = 9223372036854774784 + // The smallest SampleValue that can be converted to an int64 without underflow. + minInt64 = -9223372036854775808 +) + +var ( + // DefaultEvaluationInterval is the default evaluation interval of + // a subquery in milliseconds. + DefaultEvaluationInterval int64 +) + +// SetDefaultEvaluationInterval sets DefaultEvaluationInterval. +func SetDefaultEvaluationInterval(ev time.Duration) { + atomic.StoreInt64(&DefaultEvaluationInterval, durationToInt64Millis(ev)) +} + +// GetDefaultEvaluationInterval returns the DefaultEvaluationInterval as time.Duration. +func GetDefaultEvaluationInterval() int64 { + return atomic.LoadInt64(&DefaultEvaluationInterval) +} + +type engineMetrics struct { + currentQueries prometheus.Gauge + maxConcurrentQueries prometheus.Gauge + queryLogEnabled prometheus.Gauge + queryLogFailures prometheus.Counter + queryQueueTime prometheus.Summary + queryPrepareTime prometheus.Summary + queryInnerEval prometheus.Summary + queryResultSort prometheus.Summary +} + +// convertibleToInt64 returns true if v does not over-/underflow an int64. +func convertibleToInt64(v float64) bool { + return v <= maxInt64 && v >= minInt64 +} + +type ( + // ErrQueryTimeout is returned if a query timed out during processing. + ErrQueryTimeout string + // ErrQueryCanceled is returned if a query was canceled during processing. + ErrQueryCanceled string + // ErrTooManySamples is returned if a query would load more than the maximum allowed samples into memory. + ErrTooManySamples string + // ErrStorage is returned if an error was encountered in the storage layer + // during query handling. + ErrStorage struct{ Err error } +) + +func (e ErrQueryTimeout) Error() string { + return fmt.Sprintf("query timed out in %s", string(e)) +} +func (e ErrQueryCanceled) Error() string { + return fmt.Sprintf("query was canceled in %s", string(e)) +} +func (e ErrTooManySamples) Error() string { + return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) +} +func (e ErrStorage) Error() string { + return e.Err.Error() +} + +// QueryLogger is an interface that can be used to log all the queries logged +// by the engine. +type QueryLogger interface { + Log(...interface{}) error + Close() error +} + +// A Query is derived from an a raw query string and can be run against an engine +// it is associated with. +type Query interface { + // Exec processes the query. Can only be called once. + Exec(ctx context.Context) *Result + // Close recovers memory used by the query result. + Close() + // Statement returns the parsed statement of the query. + Statement() parser.Statement + // Stats returns statistics about the lifetime of the query. + Stats() *stats.QueryTimers + // Cancel signals that a running query execution should be aborted. + Cancel() +} + +// query implements the Query interface. +type query struct { + // Underlying data provider. + queryable storage.Queryable + // The original query string. + q string + // Statement of the parsed query. + stmt parser.Statement + // Timer stats for the query execution. + stats *stats.QueryTimers + // Result matrix for reuse. + matrix Matrix + // Cancellation function for the query. + cancel func() + + // The engine against which the query is executed. + ng *Engine +} + +type queryOrigin struct{} + +// Statement implements the Query interface. +func (q *query) Statement() parser.Statement { + return q.stmt +} + +// Stats implements the Query interface. +func (q *query) Stats() *stats.QueryTimers { + return q.stats +} + +// Cancel implements the Query interface. +func (q *query) Cancel() { + if q.cancel != nil { + q.cancel() + } +} + +// Close implements the Query interface. +func (q *query) Close() { + for _, s := range q.matrix { + putPointSlice(s.Points) + } +} + +// Exec implements the Query interface. +func (q *query) Exec(ctx context.Context) *Result { + if span := opentracing.SpanFromContext(ctx); span != nil { + span.SetTag(queryTag, q.stmt.String()) + } + + // Exec query. + res, warnings, err := q.ng.exec(ctx, q) + + return &Result{Err: err, Value: res, Warnings: warnings} +} + +// contextDone returns an error if the context was canceled or timed out. +func contextDone(ctx context.Context, env string) error { + if err := ctx.Err(); err != nil { + return contextErr(err, env) + } + return nil +} + +func contextErr(err error, env string) error { + switch err { + case context.Canceled: + return ErrQueryCanceled(env) + case context.DeadlineExceeded: + return ErrQueryTimeout(env) + default: + return err + } +} + +// EngineOpts contains configuration options used when creating a new Engine. +type EngineOpts struct { + Logger log.Logger + Reg prometheus.Registerer + MaxSamples int + Timeout time.Duration + ActiveQueryTracker *ActiveQueryTracker + // LookbackDelta determines the time since the last sample after which a time + // series is considered stale. + LookbackDelta time.Duration +} + +// Engine handles the lifetime of queries from beginning to end. +// It is connected to a querier. +type Engine struct { + logger log.Logger + metrics *engineMetrics + timeout time.Duration + maxSamplesPerQuery int + activeQueryTracker *ActiveQueryTracker + queryLogger QueryLogger + queryLoggerLock sync.RWMutex + lookbackDelta time.Duration +} + +// NewEngine returns a new engine. +func NewEngine(opts EngineOpts) *Engine { + if opts.Logger == nil { + opts.Logger = log.NewNopLogger() + } + + metrics := &engineMetrics{ + currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queries", + Help: "The current number of queries being executed or waiting.", + }), + queryLogEnabled: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_log_enabled", + Help: "State of the query log.", + }), + queryLogFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_log_failures_total", + Help: "The number of query log failures.", + }), + maxConcurrentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queries_concurrent_max", + Help: "The max number of concurrent queries.", + }), + queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "queue_time"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "prepare_time"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "inner_eval"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "result_sort"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + } + + if t := opts.ActiveQueryTracker; t != nil { + metrics.maxConcurrentQueries.Set(float64(t.GetMaxConcurrent())) + } else { + metrics.maxConcurrentQueries.Set(-1) + } + + if opts.LookbackDelta == 0 { + opts.LookbackDelta = defaultLookbackDelta + if l := opts.Logger; l != nil { + level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + } + } + + if opts.Reg != nil { + opts.Reg.MustRegister( + metrics.currentQueries, + metrics.maxConcurrentQueries, + metrics.queryLogEnabled, + metrics.queryLogFailures, + metrics.queryQueueTime, + metrics.queryPrepareTime, + metrics.queryInnerEval, + metrics.queryResultSort, + ) + } + + return &Engine{ + timeout: opts.Timeout, + logger: opts.Logger, + metrics: metrics, + maxSamplesPerQuery: opts.MaxSamples, + activeQueryTracker: opts.ActiveQueryTracker, + lookbackDelta: opts.LookbackDelta, + } +} + +// SetQueryLogger sets the query logger. +func (ng *Engine) SetQueryLogger(l QueryLogger) { + ng.queryLoggerLock.Lock() + defer ng.queryLoggerLock.Unlock() + + if ng.queryLogger != nil { + // An error closing the old file descriptor should + // not make reload fail; only log a warning. + err := ng.queryLogger.Close() + if err != nil { + level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + } + } + + ng.queryLogger = l + + if l != nil { + ng.metrics.queryLogEnabled.Set(1) + } else { + ng.metrics.queryLogEnabled.Set(0) + } +} + +// NewInstantQuery returns an evaluation query for the given expression at the given time. +func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) (Query, error) { + expr, err := parser.ParseExpr(qs) + if err != nil { + return nil, err + } + qry := ng.newQuery(q, expr, ts, ts, 0) + qry.q = qs + + return qry, nil +} + +// NewRangeQuery returns an evaluation query for the given time range and with +// the resolution set by the interval. +func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) { + expr, err := parser.ParseExpr(qs) + if err != nil { + return nil, err + } + if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { + return nil, errors.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) + } + qry := ng.newQuery(q, expr, start, end, interval) + qry.q = qs + + return qry, nil +} + +func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) *query { + es := &parser.EvalStmt{ + Expr: expr, + Start: start, + End: end, + Interval: interval, + } + qry := &query{ + stmt: es, + ng: ng, + stats: stats.NewQueryTimers(), + queryable: q, + } + return qry +} + +func (ng *Engine) newTestQuery(f func(context.Context) error) Query { + qry := &query{ + q: "test statement", + stmt: parser.TestStmt(f), + ng: ng, + stats: stats.NewQueryTimers(), + } + return qry +} + +// exec executes the query. +// +// At this point per query only one EvalStmt is evaluated. Alert and record +// statements are not handled by the Engine. +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, w storage.Warnings, err error) { + ng.metrics.currentQueries.Inc() + defer ng.metrics.currentQueries.Dec() + + ctx, cancel := context.WithTimeout(ctx, ng.timeout) + q.cancel = cancel + + defer func() { + ng.queryLoggerLock.RLock() + if l := ng.queryLogger; l != nil { + params := make(map[string]interface{}, 4) + params["query"] = q.q + if eq, ok := q.Statement().(*parser.EvalStmt); ok { + params["start"] = formatDate(eq.Start) + params["end"] = formatDate(eq.End) + // The step provided by the user is in seconds. + params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) + } + f := []interface{}{"params", params} + if err != nil { + f = append(f, "error", err) + } + f = append(f, "stats", stats.NewQueryStats(q.Stats())) + if span := opentracing.SpanFromContext(ctx); span != nil { + if spanCtx, ok := span.Context().(jaeger.SpanContext); ok { + f = append(f, "spanID", spanCtx.SpanID()) + } + } + if origin := ctx.Value(queryOrigin{}); origin != nil { + for k, v := range origin.(map[string]interface{}) { + f = append(f, k, v) + } + } + if err := l.Log(f...); err != nil { + ng.metrics.queryLogFailures.Inc() + level.Error(ng.logger).Log("msg", "can't log query", "err", err) + } + } + ng.queryLoggerLock.RUnlock() + }() + + execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) + defer execSpanTimer.Finish() + + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + // Log query in active log. The active log guarantees that we don't run over + // MaxConcurrent queries. + if ng.activeQueryTracker != nil { + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + if err != nil { + queueSpanTimer.Finish() + return nil, nil, contextErr(err, "query queue") + } + defer ng.activeQueryTracker.Delete(queryIndex) + } + queueSpanTimer.Finish() + + // Cancel when execution is done or an error was raised. + defer q.cancel() + + const env = "query execution" + + evalSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.EvalTotalTime) + defer evalSpanTimer.Finish() + + // The base context might already be canceled on the first iteration (e.g. during shutdown). + if err := contextDone(ctx, env); err != nil { + return nil, nil, err + } + + switch s := q.Statement().(type) { + case *parser.EvalStmt: + return ng.execEvalStmt(ctx, q, s) + case parser.TestStmt: + return nil, nil, s(ctx) + } + + panic(errors.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) +} + +func timeMilliseconds(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} + +// execEvalStmt evaluates the expression of an evaluation statement for the given time range. +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { + prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) + mint := ng.findMinTime(s) + querier, err := query.queryable.Querier(ctxPrepare, timestamp.FromTime(mint), timestamp.FromTime(s.End)) + if err != nil { + prepareSpanTimer.Finish() + return nil, nil, err + } + defer querier.Close() + + warnings, err := ng.populateSeries(ctxPrepare, querier, s) + prepareSpanTimer.Finish() + + if err != nil { + return nil, warnings, err + } + + evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) + // Instant evaluation. This is executed as a range evaluation with one step. + if s.Start == s.End && s.Interval == 0 { + start := timeMilliseconds(s.Start) + evaluator := &evaluator{ + startTimestamp: start, + endTimestamp: start, + interval: 1, + ctx: ctxInnerEval, + maxSamples: ng.maxSamplesPerQuery, + defaultEvalInterval: GetDefaultEvaluationInterval(), + logger: ng.logger, + lookbackDelta: ng.lookbackDelta, + } + + val, err := evaluator.Eval(s.Expr) + if err != nil { + return nil, warnings, err + } + + evalSpanTimer.Finish() + + var mat Matrix + + switch result := val.(type) { + case Matrix: + mat = result + case String: + return result, warnings, nil + default: + panic(errors.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) + } + + query.matrix = mat + switch s.Expr.Type() { + case parser.ValueTypeVector: + // Convert matrix with one value per series into vector. + vector := make(Vector, len(mat)) + for i, s := range mat { + // Point might have a different timestamp, force it to the evaluation + // timestamp as that is when we ran the evaluation. + vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} + } + return vector, warnings, nil + case parser.ValueTypeScalar: + return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil + case parser.ValueTypeMatrix: + return mat, warnings, nil + default: + panic(errors.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) + } + } + + // Range evaluation. + evaluator := &evaluator{ + startTimestamp: timeMilliseconds(s.Start), + endTimestamp: timeMilliseconds(s.End), + interval: durationMilliseconds(s.Interval), + ctx: ctxInnerEval, + maxSamples: ng.maxSamplesPerQuery, + defaultEvalInterval: GetDefaultEvaluationInterval(), + logger: ng.logger, + lookbackDelta: ng.lookbackDelta, + } + val, err := evaluator.Eval(s.Expr) + if err != nil { + return nil, warnings, err + } + evalSpanTimer.Finish() + + mat, ok := val.(Matrix) + if !ok { + panic(errors.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) + } + query.matrix = mat + + if err := contextDone(ctx, "expression evaluation"); err != nil { + return nil, warnings, err + } + + // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. + sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort) + sort.Sort(mat) + sortSpanTimer.Finish() + + return mat, warnings, nil +} + +// cumulativeSubqueryOffset returns the sum of range and offset of all subqueries in the path. +func (ng *Engine) cumulativeSubqueryOffset(path []parser.Node) time.Duration { + var subqOffset time.Duration + for _, node := range path { + switch n := node.(type) { + case *parser.SubqueryExpr: + subqOffset += n.Range + n.Offset + } + } + return subqOffset +} + +func (ng *Engine) findMinTime(s *parser.EvalStmt) time.Time { + var maxOffset time.Duration + parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { + subqOffset := ng.cumulativeSubqueryOffset(path) + switch n := node.(type) { + case *parser.VectorSelector: + if maxOffset < ng.lookbackDelta+subqOffset { + maxOffset = ng.lookbackDelta + subqOffset + } + if n.Offset+ng.lookbackDelta+subqOffset > maxOffset { + maxOffset = n.Offset + ng.lookbackDelta + subqOffset + } + case *parser.MatrixSelector: + if maxOffset < n.Range+subqOffset { + maxOffset = n.Range + subqOffset + } + if m := n.VectorSelector.(*parser.VectorSelector).Offset + n.Range + subqOffset; m > maxOffset { + maxOffset = m + } + } + return nil + }) + return s.Start.Add(-maxOffset) +} + +func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) (storage.Warnings, error) { + var ( + // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. + // The evaluation of the VectorSelector inside then evaluates the given range and unsets + // the variable. + evalRange time.Duration + warnings storage.Warnings + err error + ) + + parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { + var set storage.SeriesSet + var wrn storage.Warnings + hints := &storage.SelectHints{ + Start: timestamp.FromTime(s.Start), + End: timestamp.FromTime(s.End), + Step: durationToInt64Millis(s.Interval), + } + + // We need to make sure we select the timerange selected by the subquery. + // TODO(gouthamve): cumulativeSubqueryOffset gives the sum of range and the offset + // we can optimise it by separating out the range and offsets, and subtracting the offsets + // from end also. + subqOffset := ng.cumulativeSubqueryOffset(path) + offsetMilliseconds := durationMilliseconds(subqOffset) + hints.Start = hints.Start - offsetMilliseconds + + switch n := node.(type) { + case *parser.VectorSelector: + if evalRange == 0 { + hints.Start = hints.Start - durationMilliseconds(ng.lookbackDelta) + } else { + hints.Range = durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have (end-start) + range selected + // this way we have `range` data before the start time + hints.Start = hints.Start - durationMilliseconds(evalRange) + evalRange = 0 + } + + hints.Func = extractFuncFromPath(path) + hints.By, hints.Grouping = extractGroupsFromPath(path) + if n.Offset > 0 { + offsetMilliseconds := durationMilliseconds(n.Offset) + hints.Start = hints.Start - offsetMilliseconds + hints.End = hints.End - offsetMilliseconds + } + + set, wrn, err = querier.Select(false, hints, n.LabelMatchers...) + warnings = append(warnings, wrn...) + if err != nil { + level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) + return err + } + n.UnexpandedSeriesSet = set + + case *parser.MatrixSelector: + evalRange = n.Range + } + return nil + }) + return warnings, err +} + +// extractFuncFromPath walks up the path and searches for the first instance of +// a function or aggregation. +func extractFuncFromPath(p []parser.Node) string { + if len(p) == 0 { + return "" + } + switch n := p[len(p)-1].(type) { + case *parser.AggregateExpr: + return n.Op.String() + case *parser.Call: + return n.Func.Name + case *parser.BinaryExpr: + // If we hit a binary expression we terminate since we only care about functions + // or aggregations over a single metric. + return "" + } + return extractFuncFromPath(p[:len(p)-1]) +} + +// extractGroupsFromPath parses vector outer function and extracts grouping information if by or without was used. +func extractGroupsFromPath(p []parser.Node) (bool, []string) { + if len(p) == 0 { + return false, nil + } + switch n := p[len(p)-1].(type) { + case *parser.AggregateExpr: + return !n.Without, n.Grouping + } + return false, nil +} + +func checkForSeriesSetExpansion(ctx context.Context, expr parser.Expr) { + switch e := expr.(type) { + case *parser.MatrixSelector: + checkForSeriesSetExpansion(ctx, e.VectorSelector) + case *parser.VectorSelector: + if e.Series == nil { + series, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) + if err != nil { + panic(err) + } else { + e.Series = series + } + } + } +} + +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, err error) { + for it.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + res = append(res, it.At()) + } + return res, it.Err() +} + +// An evaluator evaluates given expressions over given fixed timestamps. It +// is attached to an engine through which it connects to a querier and reports +// errors. On timeout or cancellation of its context it terminates. +type evaluator struct { + ctx context.Context + + startTimestamp int64 // Start time in milliseconds. + endTimestamp int64 // End time in milliseconds. + interval int64 // Interval in milliseconds. + + maxSamples int + currentSamples int + defaultEvalInterval int64 + logger log.Logger + lookbackDelta time.Duration +} + +// errorf causes a panic with the input formatted into an error. +func (ev *evaluator) errorf(format string, args ...interface{}) { + ev.error(errors.Errorf(format, args...)) +} + +// error causes a panic with the given error. +func (ev *evaluator) error(err error) { + panic(err) +} + +// recover is the handler that turns panics into returns from the top level of evaluation. +func (ev *evaluator) recover(errp *error) { + e := recover() + if e == nil { + return + } + if err, ok := e.(runtime.Error); ok { + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) + *errp = errors.Wrap(err, "unexpected error") + } else { + *errp = e.(error) + } +} + +func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, err error) { + defer ev.recover(&err) + return ev.eval(expr), nil +} + +// EvalNodeHelper stores extra information and caches for evaluating a single node across steps. +type EvalNodeHelper struct { + // Evaluation timestamp. + ts int64 + // Vector that can be used for output. + out Vector + + // Caches. + // dropMetricName and label_*. + dmn map[uint64]labels.Labels + // signatureFunc. + sigf map[uint64]uint64 + // funcHistogramQuantile. + signatureToMetricWithBuckets map[uint64]*metricWithBuckets + // label_replace. + regex *regexp.Regexp + + // For binary vector matching. + rightSigs map[uint64]Sample + matchedSigs map[uint64]map[uint64]struct{} + resultMetric map[uint64]labels.Labels +} + +// dropMetricName is a cached version of dropMetricName. +func (enh *EvalNodeHelper) dropMetricName(l labels.Labels) labels.Labels { + if enh.dmn == nil { + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + h := l.Hash() + ret, ok := enh.dmn[h] + if ok { + return ret + } + ret = dropMetricName(l) + enh.dmn[h] = ret + return ret +} + +// signatureFunc is a cached version of signatureFunc. +func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { + if enh.sigf == nil { + enh.sigf = make(map[uint64]uint64, len(enh.out)) + } + f := signatureFunc(on, names...) + return func(l labels.Labels) uint64 { + h := l.Hash() + ret, ok := enh.sigf[h] + if ok { + return ret + } + ret = f(l) + enh.sigf[h] = ret + return ret + } +} + +// rangeEval evaluates the given expressions, and then for each step calls +// the given function with the values computed for each expression at that +// step. The return value is the combination into time series of all the +// function call results. +func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) Vector, exprs ...parser.Expr) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + matrixes := make([]Matrix, len(exprs)) + origMatrixes := make([]Matrix, len(exprs)) + originalNumSamples := ev.currentSamples + + for i, e := range exprs { + // Functions will take string arguments from the expressions, not the values. + if e != nil && e.Type() != parser.ValueTypeString { + // ev.currentSamples will be updated to the correct value within the ev.eval call. + matrixes[i] = ev.eval(e).(Matrix) + + // Keep a copy of the original point slices so that they + // can be returned to the pool. + origMatrixes[i] = make(Matrix, len(matrixes[i])) + copy(origMatrixes[i], matrixes[i]) + } + } + + vectors := make([]Vector, len(exprs)) // Input vectors for the function. + args := make([]parser.Value, len(exprs)) // Argument to function. + // Create an output vector that is as big as the input matrix with + // the most time series. + biggestLen := 1 + for i := range exprs { + vectors[i] = make(Vector, 0, len(matrixes[i])) + if len(matrixes[i]) > biggestLen { + biggestLen = len(matrixes[i]) + } + } + enh := &EvalNodeHelper{out: make(Vector, 0, biggestLen)} + seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + tempNumSamples := ev.currentSamples + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + // Reset number of samples in memory after each timestamp. + ev.currentSamples = tempNumSamples + // Gather input vectors for this timestamp. + for i := range exprs { + vectors[i] = vectors[i][:0] + for si, series := range matrixes[i] { + for _, point := range series.Points { + if point.T == ts { + if ev.currentSamples < ev.maxSamples { + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Points = series.Points[1:] + ev.currentSamples++ + } else { + ev.error(ErrTooManySamples(env)) + } + } + break + } + } + args[i] = vectors[i] + } + // Make the function call. + enh.ts = ts + result := f(args, enh) + if result.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + enh.out = result[:0] // Reuse result vector. + + ev.currentSamples += len(result) + // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also + // needs to include the samples from the result here, as they're still in memory. + tempNumSamples += len(result) + + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + + // If this could be an instant query, shortcut so as not to change sort order. + if ev.endTimestamp == ev.startTimestamp { + mat := make(Matrix, len(result)) + for i, s := range result { + s.Point.T = ts + mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + } + ev.currentSamples = originalNumSamples + mat.TotalSamples() + return mat + } + + // Add samples in output vector to output series. + for _, sample := range result { + h := sample.Metric.Hash() + ss, ok := seriess[h] + if !ok { + ss = Series{ + Metric: sample.Metric, + Points: getPointSlice(numSteps), + } + } + sample.Point.T = ts + ss.Points = append(ss.Points, sample.Point) + seriess[h] = ss + + } + } + + // Reuse the original point slices. + for _, m := range origMatrixes { + for _, s := range m { + putPointSlice(s.Points) + } + } + // Assemble the output matrix. By the time we get here we know we don't have too many samples. + mat := make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + mat = append(mat, ss) + } + ev.currentSamples = originalNumSamples + mat.TotalSamples() + return mat +} + +// evalSubquery evaluates given SubqueryExpr and returns an equivalent +// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. +func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) *parser.MatrixSelector { + val := ev.eval(subq).(Matrix) + vs := &parser.VectorSelector{ + Offset: subq.Offset, + Series: make([]storage.Series, 0, len(val)), + } + ms := &parser.MatrixSelector{ + Range: subq.Range, + VectorSelector: vs, + } + for _, s := range val { + vs.Series = append(vs.Series, NewStorageSeries(s)) + } + return ms +} + +// eval evaluates the given expression as the given AST expression node requires. +func (ev *evaluator) eval(expr parser.Expr) parser.Value { + // This is the top-level evaluation method. + // Thus, we check for timeout/cancellation here. + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + switch e := expr.(type) { + case *parser.AggregateExpr: + unwrapParenExpr(&e.Param) + if s, ok := e.Param.(*parser.StringLiteral); ok { + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh) + }, e.Expr) + } + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + var param float64 + if e.Param != nil { + param = v[0].(Vector)[0].V + } + return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh) + }, e.Param, e.Expr) + + case *parser.Call: + call := FunctionCalls[e.Func.Name] + + if e.Func.Name == "timestamp" { + // Matrix evaluation always returns the evaluation time, + // so this function needs special handling when given + // a vector selector. + vs, ok := e.Args[0].(*parser.VectorSelector) + if ok { + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return call([]parser.Value{ev.vectorSelector(vs, enh.ts)}, e.Args, enh) + }) + } + } + + // Check if the function has a matrix argument. + var matrixArgIndex int + var matrixArg bool + for i := range e.Args { + unwrapParenExpr(&e.Args[i]) + a := e.Args[i] + if _, ok := a.(*parser.MatrixSelector); ok { + matrixArgIndex = i + matrixArg = true + break + } + // parser.SubqueryExpr can be used in place of parser.MatrixSelector. + if subq, ok := a.(*parser.SubqueryExpr); ok { + matrixArgIndex = i + matrixArg = true + // Replacing parser.SubqueryExpr with parser.MatrixSelector. + e.Args[i] = ev.evalSubquery(subq) + break + } + } + if !matrixArg { + // Does not have a matrix argument. + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return call(v, e.Args, enh) + }, e.Args...) + } + + inArgs := make([]parser.Value, len(e.Args)) + // Evaluate any non-matrix arguments. + otherArgs := make([]Matrix, len(e.Args)) + otherInArgs := make([]Vector, len(e.Args)) + for i, e := range e.Args { + if i != matrixArgIndex { + otherArgs[i] = ev.eval(e).(Matrix) + otherInArgs[i] = Vector{Sample{}} + inArgs[i] = otherInArgs[i] + } + } + + sel := e.Args[matrixArgIndex].(*parser.MatrixSelector) + selVS := sel.VectorSelector.(*parser.VectorSelector) + + checkForSeriesSetExpansion(ev.ctx, sel) + mat := make(Matrix, 0, len(selVS.Series)) // Output matrix. + offset := durationMilliseconds(selVS.Offset) + selRange := durationMilliseconds(sel.Range) + stepRange := selRange + if stepRange > ev.interval { + stepRange = ev.interval + } + // Reuse objects across steps to save memory allocations. + points := getPointSlice(16) + inMatrix := make(Matrix, 1) + inArgs[matrixArgIndex] = inMatrix + enh := &EvalNodeHelper{out: make(Vector, 0, 1)} + // Process all the calls for one time series at a time. + it := storage.NewBuffer(selRange) + for i, s := range selVS.Series { + points = points[:0] + it.Reset(s.Iterator()) + ss := Series{ + // For all range vector functions, the only change to the + // output labels is dropping the metric name so just do + // it once here. + Metric: dropMetricName(selVS.Series[i].Labels()), + Points: getPointSlice(numSteps), + } + inMatrix[0].Metric = selVS.Series[i].Labels() + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + // Set the non-matrix arguments. + // They are scalar, so it is safe to use the step number + // when looking up the argument, as there will be no gaps. + for j := range e.Args { + if j != matrixArgIndex { + otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + } + } + maxt := ts - offset + mint := maxt - selRange + // Evaluate the matrix selector for this series for this step. + points = ev.matrixIterSlice(it, mint, maxt, points) + if len(points) == 0 { + continue + } + inMatrix[0].Points = points + enh.ts = ts + // Make the function call. + outVec := call(inArgs, e.Args, enh) + enh.out = outVec[:0] + if len(outVec) > 0 { + ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) + } + // Only buffer stepRange milliseconds from the second step on. + it.ReduceDelta(stepRange) + } + if len(ss.Points) > 0 { + if ev.currentSamples < ev.maxSamples { + mat = append(mat, ss) + ev.currentSamples += len(ss.Points) + } else { + ev.error(ErrTooManySamples(env)) + } + } else { + putPointSlice(ss.Points) + } + } + + putPointSlice(points) + + // The absent_over_time function returns 0 or 1 series. So far, the matrix + // contains multiple series. The following code will create a new series + // with values of 1 for the timestamps where no series has value. + if e.Func.Name == "absent_over_time" { + steps := int(1 + (ev.endTimestamp-ev.startTimestamp)/ev.interval) + // Iterate once to look for a complete series. + for _, s := range mat { + if len(s.Points) == steps { + return Matrix{} + } + } + + found := map[int64]struct{}{} + + for i, s := range mat { + for _, p := range s.Points { + found[p.T] = struct{}{} + } + if i > 0 && len(found) == steps { + return Matrix{} + } + } + + newp := make([]Point, 0, steps-len(found)) + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if _, ok := found[ts]; !ok { + newp = append(newp, Point{T: ts, V: 1}) + } + } + + return Matrix{ + Series{ + Metric: createLabelsForAbsentFunction(e.Args[0]), + Points: newp, + }, + } + } + + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + + return mat + + case *parser.ParenExpr: + return ev.eval(e.Expr) + + case *parser.UnaryExpr: + mat := ev.eval(e.Expr).(Matrix) + if e.Op == parser.SUB { + for i := range mat { + mat[i].Metric = dropMetricName(mat[i].Metric) + for j := range mat[i].Points { + mat[i].Points[j].V = -mat[i].Points[j].V + } + } + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } + return mat + + case *parser.BinaryExpr: + switch lt, rt := e.LHS.Type(), e.RHS.Type(); { + case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) + return append(enh.out, Sample{Point: Point{V: val}}) + }, e.LHS, e.RHS) + case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: + switch e.Op { + case parser.LAND: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + case parser.LOR: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + case parser.LUNLESS: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + default: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh) + }, e.LHS, e.RHS) + } + + case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh) + }, e.LHS, e.RHS) + + case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh) + }, e.LHS, e.RHS) + } + + case *parser.NumberLiteral: + return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { + return append(enh.out, Sample{Point: Point{V: e.Val}}) + }) + + case *parser.VectorSelector: + checkForSeriesSetExpansion(ev.ctx, e) + mat := make(Matrix, 0, len(e.Series)) + it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta)) + for i, s := range e.Series { + it.Reset(s.Iterator()) + ss := Series{ + Metric: e.Series[i].Labels(), + Points: getPointSlice(numSteps), + } + + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + _, v, ok := ev.vectorSelectorSingle(it, e, ts) + if ok { + if ev.currentSamples < ev.maxSamples { + ss.Points = append(ss.Points, Point{V: v, T: ts}) + ev.currentSamples++ + } else { + ev.error(ErrTooManySamples(env)) + } + } + } + + if len(ss.Points) > 0 { + mat = append(mat, ss) + } else { + putPointSlice(ss.Points) + } + + } + return mat + + case *parser.MatrixSelector: + if ev.startTimestamp != ev.endTimestamp { + panic(errors.New("cannot do range evaluation of matrix selector")) + } + return ev.matrixSelector(e) + + case *parser.SubqueryExpr: + offsetMillis := durationToInt64Millis(e.Offset) + rangeMillis := durationToInt64Millis(e.Range) + newEv := &evaluator{ + endTimestamp: ev.endTimestamp - offsetMillis, + interval: ev.defaultEvalInterval, + ctx: ev.ctx, + currentSamples: ev.currentSamples, + maxSamples: ev.maxSamples, + defaultEvalInterval: ev.defaultEvalInterval, + logger: ev.logger, + lookbackDelta: ev.lookbackDelta, + } + + if e.Step != 0 { + newEv.interval = durationToInt64Millis(e.Step) + } + + // Start with the first timestamp after (ev.startTimestamp - offset - range) + // that is aligned with the step (multiple of 'newEv.interval'). + newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval) + if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) { + newEv.startTimestamp += newEv.interval + } + + res := newEv.eval(e.Expr) + ev.currentSamples = newEv.currentSamples + return res + case *parser.StringLiteral: + return String{V: e.Val, T: ev.startTimestamp} + } + + panic(errors.Errorf("unhandled expression of type: %T", expr)) +} + +func durationToInt64Millis(d time.Duration) int64 { + return int64(d / time.Millisecond) +} + +// vectorSelector evaluates a *parser.VectorSelector expression. +func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) Vector { + checkForSeriesSetExpansion(ev.ctx, node) + + var ( + vec = make(Vector, 0, len(node.Series)) + ) + + it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta)) + for i, s := range node.Series { + it.Reset(s.Iterator()) + + t, v, ok := ev.vectorSelectorSingle(it, node, ts) + if ok { + vec = append(vec, Sample{ + Metric: node.Series[i].Labels(), + Point: Point{V: v, T: t}, + }) + ev.currentSamples++ + } + + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + return vec +} + +// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. +func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) { + refTime := ts - durationMilliseconds(node.Offset) + var t int64 + var v float64 + + ok := it.Seek(refTime) + if !ok { + if it.Err() != nil { + ev.error(it.Err()) + } + } + + if ok { + t, v = it.Values() + } + + if !ok || t > refTime { + t, v, ok = it.PeekBack(1) + if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + return 0, 0, false + } + } + if value.IsStaleNaN(v) { + return 0, 0, false + } + return t, v, true +} + +var pointPool = sync.Pool{} + +func getPointSlice(sz int) []Point { + p := pointPool.Get() + if p != nil { + return p.([]Point) + } + return make([]Point, 0, sz) +} + +func putPointSlice(p []Point) { + //lint:ignore SA6002 relax staticcheck verification. + pointPool.Put(p[:0]) +} + +// matrixSelector evaluates a *parser.MatrixSelector expression. +func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) Matrix { + checkForSeriesSetExpansion(ev.ctx, node) + + vs := node.VectorSelector.(*parser.VectorSelector) + + var ( + offset = durationMilliseconds(vs.Offset) + maxt = ev.startTimestamp - offset + mint = maxt - durationMilliseconds(node.Range) + matrix = make(Matrix, 0, len(vs.Series)) + ) + + it := storage.NewBuffer(durationMilliseconds(node.Range)) + series := vs.Series + + for i, s := range series { + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + it.Reset(s.Iterator()) + ss := Series{ + Metric: series[i].Labels(), + } + + ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) + + if len(ss.Points) > 0 { + matrix = append(matrix, ss) + } else { + putPointSlice(ss.Points) + } + } + return matrix +} + +// matrixIterSlice populates a matrix vector covering the requested range for a +// single time series, with points retrieved from an iterator. +// +// As an optimization, the matrix vector may already contain points of the same +// time series from the evaluation of an earlier step (with lower mint and maxt +// values). Any such points falling before mint are discarded; points that fall +// into the [mint, maxt] range are retained; only points with later timestamps +// are populated from the iterator. +func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { + if len(out) > 0 && out[len(out)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; out[drop].T < mint; drop++ { + } + copy(out, out[drop:]) + out = out[:len(out)-drop] + // Only append points with timestamps after the last timestamp we have. + mint = out[len(out)-1].T + 1 + } else { + out = out[:0] + } + + ok := it.Seek(maxt) + if !ok { + if it.Err() != nil { + ev.error(it.Err()) + } + } + + buf := it.Buffer() + for buf.Next() { + t, v := buf.At() + if value.IsStaleNaN(v) { + continue + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + out = append(out, Point{T: t, V: v}) + ev.currentSamples++ + } + } + // The seeked sample might also be in the range. + if ok { + t, v := it.Values() + if t == maxt && !value.IsStaleNaN(v) { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + out = append(out, Point{T: t, V: v}) + ev.currentSamples++ + } + } + return out +} + +func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != parser.CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + // The set of signatures for the right-hand side Vector. + rightSigs := map[uint64]struct{}{} + // Add all rhs samples to a map so we can easily find matches later. + for _, rs := range rhs { + rightSigs[sigf(rs.Metric)] = struct{}{} + } + + for _, ls := range lhs { + // If there's a matching entry in the right-hand side Vector, add the sample. + if _, ok := rightSigs[sigf(ls.Metric)]; ok { + enh.out = append(enh.out, ls) + } + } + return enh.out +} + +func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != parser.CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + leftSigs := map[uint64]struct{}{} + // Add everything from the left-hand-side Vector. + for _, ls := range lhs { + leftSigs[sigf(ls.Metric)] = struct{}{} + enh.out = append(enh.out, ls) + } + // Add all right-hand side elements which have not been added from the left-hand side. + for _, rs := range rhs { + if _, ok := leftSigs[sigf(rs.Metric)]; !ok { + enh.out = append(enh.out, rs) + } + } + return enh.out +} + +func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != parser.CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + rightSigs := map[uint64]struct{}{} + for _, rs := range rhs { + rightSigs[sigf(rs.Metric)] = struct{}{} + } + + for _, ls := range lhs { + if _, ok := rightSigs[sigf(ls.Metric)]; !ok { + enh.out = append(enh.out, ls) + } + } + return enh.out +} + +// VectorBinop evaluates a binary operation between two Vectors, excluding set operators. +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, enh *EvalNodeHelper) Vector { + if matching.Card == parser.CardManyToMany { + panic("many-to-many only allowed for set operators") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + // The control flow below handles one-to-one or many-to-one matching. + // For one-to-many, swap sidedness and account for the swap when calculating + // values. + if matching.Card == parser.CardOneToMany { + lhs, rhs = rhs, lhs + } + + // All samples from the rhs hashed by the matching label/values. + if enh.rightSigs == nil { + enh.rightSigs = make(map[uint64]Sample, len(enh.out)) + } else { + for k := range enh.rightSigs { + delete(enh.rightSigs, k) + } + } + rightSigs := enh.rightSigs + + // Add all rhs samples to a map so we can easily find matches later. + for _, rs := range rhs { + sig := sigf(rs.Metric) + // The rhs is guaranteed to be the 'one' side. Having multiple samples + // with the same signature means that the matching is many-to-many. + if duplSample, found := rightSigs[sig]; found { + // oneSide represents which side of the vector represents the 'one' in the many-to-one relationship. + oneSide := "right" + if matching.Card == parser.CardOneToMany { + oneSide = "left" + } + matchedLabels := rs.Metric.MatchLabels(matching.On, matching.MatchingLabels...) + // Many-to-many matching not allowed. + ev.errorf("found duplicate series for the match group %s on the %s hand-side of the operation: [%s, %s]"+ + ";many-to-many matching not allowed: matching labels must be unique on one side", matchedLabels.String(), oneSide, rs.Metric.String(), duplSample.Metric.String()) + } + rightSigs[sig] = rs + } + + // Tracks the match-signature. For one-to-one operations the value is nil. For many-to-one + // the value is a set of signatures to detect duplicated result elements. + if enh.matchedSigs == nil { + enh.matchedSigs = make(map[uint64]map[uint64]struct{}, len(rightSigs)) + } else { + for k := range enh.matchedSigs { + delete(enh.matchedSigs, k) + } + } + matchedSigs := enh.matchedSigs + + // For all lhs samples find a respective rhs sample and perform + // the binary operation. + for _, ls := range lhs { + sig := sigf(ls.Metric) + + rs, found := rightSigs[sig] // Look for a match in the rhs Vector. + if !found { + continue + } + + // Account for potentially swapped sidedness. + vl, vr := ls.V, rs.V + if matching.Card == parser.CardOneToMany { + vl, vr = vr, vl + } + value, keep := vectorElemBinop(op, vl, vr) + if returnBool { + if keep { + value = 1.0 + } else { + value = 0.0 + } + } else if !keep { + continue + } + metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) + + insertedSigs, exists := matchedSigs[sig] + if matching.Card == parser.CardOneToOne { + if exists { + ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)") + } + matchedSigs[sig] = nil // Set existence to true. + } else { + // In many-to-one matching the grouping labels have to ensure a unique metric + // for the result Vector. Check whether those labels have already been added for + // the same matching labels. + insertSig := metric.Hash() + + if !exists { + insertedSigs = map[uint64]struct{}{} + matchedSigs[sig] = insertedSigs + } else if _, duplicate := insertedSigs[insertSig]; duplicate { + ev.errorf("multiple matches for labels: grouping labels must ensure unique matches") + } + insertedSigs[insertSig] = struct{}{} + } + + enh.out = append(enh.out, Sample{ + Metric: metric, + Point: Point{V: value}, + }) + } + return enh.out +} + +// signatureFunc returns a function that calculates the signature for a metric +// ignoring the provided labels. If on, then the given labels are only used instead. +func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { + sort.Strings(names) + if on { + return func(lset labels.Labels) uint64 { + h, _ := lset.HashForLabels(make([]byte, 0, 1024), names...) + return h + } + } + return func(lset labels.Labels) uint64 { + h, _ := lset.HashWithoutLabels(make([]byte, 0, 1024), names...) + return h + } +} + +// resultMetric returns the metric for the given sample(s) based on the Vector +// binary operation and the matching options. +func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.VectorMatching, enh *EvalNodeHelper) labels.Labels { + if enh.resultMetric == nil { + enh.resultMetric = make(map[uint64]labels.Labels, len(enh.out)) + } + // op and matching are always the same for a given node, so + // there's no need to include them in the hash key. + // If the lhs and rhs are the same then the xor would be 0, + // so add in one side to protect against that. + lh := lhs.Hash() + h := (lh ^ rhs.Hash()) + lh + if ret, ok := enh.resultMetric[h]; ok { + return ret + } + + lb := labels.NewBuilder(lhs) + + if shouldDropMetricName(op) { + lb.Del(labels.MetricName) + } + + if matching.Card == parser.CardOneToOne { + if matching.On { + Outer: + for _, l := range lhs { + for _, n := range matching.MatchingLabels { + if l.Name == n { + continue Outer + } + } + lb.Del(l.Name) + } + } else { + lb.Del(matching.MatchingLabels...) + } + } + for _, ln := range matching.Include { + // Included labels from the `group_x` modifier are taken from the "one"-side. + if v := rhs.Get(ln); v != "" { + lb.Set(ln, v) + } else { + lb.Del(ln) + } + } + + ret := lb.Labels() + enh.resultMetric[h] = ret + return ret +} + +// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { + for _, lhsSample := range lhs { + lv, rv := lhsSample.V, rhs.V + // lhs always contains the Vector. If the original position was different + // swap for calculating the value. + if swap { + lv, rv = rv, lv + } + value, keep := vectorElemBinop(op, lv, rv) + // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. + // We want to always keep the vector element value as the output value, even if it's on the RHS. + if op.IsComparisonOperator() && swap { + value = rv + } + if returnBool { + if keep { + value = 1.0 + } else { + value = 0.0 + } + keep = true + } + if keep { + lhsSample.V = value + if shouldDropMetricName(op) || returnBool { + lhsSample.Metric = enh.dropMetricName(lhsSample.Metric) + } + enh.out = append(enh.out, lhsSample) + } + } + return enh.out +} + +func dropMetricName(l labels.Labels) labels.Labels { + return labels.NewBuilder(l).Del(labels.MetricName).Labels() +} + +// scalarBinop evaluates a binary operation between two Scalars. +func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { + switch op { + case parser.ADD: + return lhs + rhs + case parser.SUB: + return lhs - rhs + case parser.MUL: + return lhs * rhs + case parser.DIV: + return lhs / rhs + case parser.POW: + return math.Pow(lhs, rhs) + case parser.MOD: + return math.Mod(lhs, rhs) + case parser.EQL: + return btos(lhs == rhs) + case parser.NEQ: + return btos(lhs != rhs) + case parser.GTR: + return btos(lhs > rhs) + case parser.LSS: + return btos(lhs < rhs) + case parser.GTE: + return btos(lhs >= rhs) + case parser.LTE: + return btos(lhs <= rhs) + } + panic(errors.Errorf("operator %q not allowed for Scalar operations", op)) +} + +// vectorElemBinop evaluates a binary operation between two Vector elements. +func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) { + switch op { + case parser.ADD: + return lhs + rhs, true + case parser.SUB: + return lhs - rhs, true + case parser.MUL: + return lhs * rhs, true + case parser.DIV: + return lhs / rhs, true + case parser.POW: + return math.Pow(lhs, rhs), true + case parser.MOD: + return math.Mod(lhs, rhs), true + case parser.EQL: + return lhs, lhs == rhs + case parser.NEQ: + return lhs, lhs != rhs + case parser.GTR: + return lhs, lhs > rhs + case parser.LSS: + return lhs, lhs < rhs + case parser.GTE: + return lhs, lhs >= rhs + case parser.LTE: + return lhs, lhs <= rhs + } + panic(errors.Errorf("operator %q not allowed for operations between Vectors", op)) +} + +type groupedAggregation struct { + labels labels.Labels + value float64 + mean float64 + groupCount int + heap vectorByValueHeap + reverseHeap vectorByReverseValueHeap +} + +// aggregation evaluates an aggregation operation on a Vector. +func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, enh *EvalNodeHelper) Vector { + + result := map[uint64]*groupedAggregation{} + var k int64 + if op == parser.TOPK || op == parser.BOTTOMK { + f := param.(float64) + if !convertibleToInt64(f) { + ev.errorf("Scalar value %v overflows int64", f) + } + k = int64(f) + if k < 1 { + return Vector{} + } + } + var q float64 + if op == parser.QUANTILE { + q = param.(float64) + } + var valueLabel string + if op == parser.COUNT_VALUES { + valueLabel = param.(string) + if !model.LabelName(valueLabel).IsValid() { + ev.errorf("invalid label name %q", valueLabel) + } + if !without { + grouping = append(grouping, valueLabel) + } + } + + sort.Strings(grouping) + lb := labels.NewBuilder(nil) + buf := make([]byte, 0, 1024) + for _, s := range vec { + metric := s.Metric + + if op == parser.COUNT_VALUES { + lb.Reset(metric) + lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + metric = lb.Labels() + } + + var ( + groupingKey uint64 + ) + if without { + groupingKey, buf = metric.HashWithoutLabels(buf, grouping...) + } else { + groupingKey, buf = metric.HashForLabels(buf, grouping...) + } + + group, ok := result[groupingKey] + // Add a new group if it doesn't exist. + if !ok { + var m labels.Labels + + if without { + lb.Reset(metric) + lb.Del(grouping...) + lb.Del(labels.MetricName) + m = lb.Labels() + } else { + m = make(labels.Labels, 0, len(grouping)) + for _, l := range metric { + for _, n := range grouping { + if l.Name == n { + m = append(m, l) + break + } + } + } + sort.Sort(m) + } + result[groupingKey] = &groupedAggregation{ + labels: m, + value: s.V, + mean: s.V, + groupCount: 1, + } + inputVecLen := int64(len(vec)) + resultSize := k + if k > inputVecLen { + resultSize = inputVecLen + } + if op == parser.STDVAR || op == parser.STDDEV { + result[groupingKey].value = 0.0 + } else if op == parser.TOPK || op == parser.QUANTILE { + result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) + heap.Push(&result[groupingKey].heap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } else if op == parser.BOTTOMK { + result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) + heap.Push(&result[groupingKey].reverseHeap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + continue + } + + switch op { + case parser.SUM: + group.value += s.V + + case parser.AVG: + group.groupCount++ + group.mean += (s.V - group.mean) / float64(group.groupCount) + + case parser.MAX: + if group.value < s.V || math.IsNaN(group.value) { + group.value = s.V + } + + case parser.MIN: + if group.value > s.V || math.IsNaN(group.value) { + group.value = s.V + } + + case parser.COUNT, parser.COUNT_VALUES: + group.groupCount++ + + case parser.STDVAR, parser.STDDEV: + group.groupCount++ + delta := s.V - group.mean + group.mean += delta / float64(group.groupCount) + group.value += delta * (s.V - group.mean) + + case parser.TOPK: + if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { + if int64(len(group.heap)) == k { + heap.Pop(&group.heap) + } + heap.Push(&group.heap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + + case parser.BOTTOMK: + if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { + if int64(len(group.reverseHeap)) == k { + heap.Pop(&group.reverseHeap) + } + heap.Push(&group.reverseHeap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + + case parser.QUANTILE: + group.heap = append(group.heap, s) + + default: + panic(errors.Errorf("expected aggregation operator but got %q", op)) + } + } + + // Construct the result Vector from the aggregated groups. + for _, aggr := range result { + switch op { + case parser.AVG: + aggr.value = aggr.mean + + case parser.COUNT, parser.COUNT_VALUES: + aggr.value = float64(aggr.groupCount) + + case parser.STDVAR: + aggr.value = aggr.value / float64(aggr.groupCount) + + case parser.STDDEV: + aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) + + case parser.TOPK: + // The heap keeps the lowest value on top, so reverse it. + sort.Sort(sort.Reverse(aggr.heap)) + for _, v := range aggr.heap { + enh.out = append(enh.out, Sample{ + Metric: v.Metric, + Point: Point{V: v.V}, + }) + } + continue // Bypass default append. + + case parser.BOTTOMK: + // The heap keeps the lowest value on top, so reverse it. + sort.Sort(sort.Reverse(aggr.reverseHeap)) + for _, v := range aggr.reverseHeap { + enh.out = append(enh.out, Sample{ + Metric: v.Metric, + Point: Point{V: v.V}, + }) + } + continue // Bypass default append. + + case parser.QUANTILE: + aggr.value = quantile(q, aggr.heap) + + default: + // For other aggregations, we already have the right value. + } + + enh.out = append(enh.out, Sample{ + Metric: aggr.labels, + Point: Point{V: aggr.value}, + }) + } + return enh.out +} + +// btos returns 1 if b is true, 0 otherwise. +func btos(b bool) float64 { + if b { + return 1 + } + return 0 +} + +// shouldDropMetricName returns whether the metric name should be dropped in the +// result of the op operation. +func shouldDropMetricName(op parser.ItemType) bool { + switch op { + case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD: + return true + default: + return false + } +} + +// NewOriginContext returns a new context with data about the origin attached. +func NewOriginContext(ctx context.Context, data map[string]interface{}) context.Context { + return context.WithValue(ctx, queryOrigin{}, data) +} + +func formatDate(t time.Time) string { + return t.UTC().Format("2006-01-02T15:04:05.000Z07:00") +} + +// unwrapParenExpr does the AST equivalent of removing parentheses around a expression. +func unwrapParenExpr(e *parser.Expr) { + for { + if p, ok := (*e).(*parser.ParenExpr); ok { + *e = p.Expr + } else { + break + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go new file mode 100644 index 0000000000000000000000000000000000000000..49efadaf3451c8a7d430a212103c5c62fa99d03f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -0,0 +1,1013 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +// FunctionCall is the type of a PromQL function implementation +// +// vals is a list of the evaluated arguments for the function call. +// For range vectors it will be a Matrix with one series, instant vectors a +// Vector, scalars a Vector with one series whose value is the scalar +// value,and nil for strings. +// args are the original arguments to the function, where you can access +// matrixSelectors, vectorSelectors, and StringLiterals. +// enh.out is a pre-allocated empty vector that you may use to accumulate +// output before returning it. The vectors in vals should not be returned.a +// Range vector functions need only return a vector with the right value, +// the metric and timestamp are not needed. +// Instant vector functions need only return a vector with the right values and +// metrics, the timestamp are not needed. +// Scalar results should be returned as the value of a sample in a Vector. +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector + +// === time() float64 === +func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return Vector{Sample{Point: Point{ + V: float64(enh.ts) / 1000, + }}} +} + +// extrapolatedRate is a utility function for rate/increase/delta. +// It calculates the rate (allowing for counter resets if isCounter is true), +// extrapolates if the first/last sample is close to the boundary, and returns +// the result as either per-second (if isRate is true) or overall. +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { + ms := args[0].(*parser.MatrixSelector) + vs := ms.VectorSelector.(*parser.VectorSelector) + + var ( + samples = vals[0].(Matrix)[0] + rangeStart = enh.ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.ts - durationMilliseconds(vs.Offset) + ) + + // No sense in trying to compute a rate without at least two points. Drop + // this Vector element. + if len(samples.Points) < 2 { + return enh.out + } + var ( + counterCorrection float64 + lastValue float64 + ) + for _, sample := range samples.Points { + if isCounter && sample.V < lastValue { + counterCorrection += lastValue + } + lastValue = sample.V + } + resultValue := lastValue - samples.Points[0].V + counterCorrection + + // Duration between first/last samples and boundary of range. + durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + + sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + + if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { + // Counters cannot be negative. If we have any slope at + // all (i.e. resultValue went up), we can extrapolate + // the zero point of the counter. If the duration to the + // zero point is shorter than the durationToStart, we + // take the zero point as the start of the series, + // thereby avoiding extrapolation to negative counter + // values. + durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + if durationToZero < durationToStart { + durationToStart = durationToZero + } + } + + // If the first/last samples are close to the boundaries of the range, + // extrapolate the result. This is as we expect that another sample + // will exist given the spacing between samples we've seen thus far, + // with an allowance for noise. + extrapolationThreshold := averageDurationBetweenSamples * 1.1 + extrapolateToInterval := sampledInterval + + if durationToStart < extrapolationThreshold { + extrapolateToInterval += durationToStart + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + if durationToEnd < extrapolationThreshold { + extrapolateToInterval += durationToEnd + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + resultValue = resultValue * (extrapolateToInterval / sampledInterval) + if isRate { + resultValue = resultValue / ms.Range.Seconds() + } + + return append(enh.out, Sample{ + Point: Point{V: resultValue}, + }) +} + +// === delta(Matrix parser.ValueTypeMatrix) Vector === +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, false, false) +} + +// === rate(node parser.ValueTypeMatrix) Vector === +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, true, true) +} + +// === increase(node parser.ValueTypeMatrix) Vector === +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, true, false) +} + +// === irate(node parser.ValueTypeMatrix) Vector === +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return instantValue(vals, enh.out, true) +} + +// === idelta(node model.ValMatrix) Vector === +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return instantValue(vals, enh.out, false) +} + +func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { + samples := vals[0].(Matrix)[0] + // No sense in trying to compute a rate without at least two points. Drop + // this Vector element. + if len(samples.Points) < 2 { + return out + } + + lastSample := samples.Points[len(samples.Points)-1] + previousSample := samples.Points[len(samples.Points)-2] + + var resultValue float64 + if isRate && lastSample.V < previousSample.V { + // Counter reset. + resultValue = lastSample.V + } else { + resultValue = lastSample.V - previousSample.V + } + + sampledInterval := lastSample.T - previousSample.T + if sampledInterval == 0 { + // Avoid dividing by 0. + return out + } + + if isRate { + // Convert to per-second. + resultValue /= float64(sampledInterval) / 1000 + } + + return append(out, Sample{ + Point: Point{V: resultValue}, + }) +} + +// Calculate the trend value at the given index i in raw data d. +// This is somewhat analogous to the slope of the trend at the given index. +// The argument "tf" is the trend factor. +// The argument "s0" is the computed smoothed value. +// The argument "s1" is the computed trend factor. +// The argument "b" is the raw input value. +func calcTrendValue(i int, tf, s0, s1, b float64) float64 { + if i == 0 { + return b + } + + x := tf * (s1 - s0) + y := (1 - tf) * b + + return x + y +} + +// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. +// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current +// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects +// how trends in historical data will affect the current data. A higher trend factor increases the influence. +// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". +func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + samples := vals[0].(Matrix)[0] + + // The smoothing factor argument. + sf := vals[1].(Vector)[0].V + + // The trend factor argument. + tf := vals[2].(Vector)[0].V + + // Sanity check the input. + if sf <= 0 || sf >= 1 { + panic(errors.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) + } + if tf <= 0 || tf >= 1 { + panic(errors.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) + } + + l := len(samples.Points) + + // Can't do the smoothing operation with less than two points. + if l < 2 { + return enh.out + } + + var s0, s1, b float64 + // Set initial values. + s1 = samples.Points[0].V + b = samples.Points[1].V - samples.Points[0].V + + // Run the smoothing operation. + var x, y float64 + for i := 1; i < l; i++ { + + // Scale the raw value against the smoothing factor. + x = sf * samples.Points[i].V + + // Scale the last smoothed value with the trend at this point. + b = calcTrendValue(i-1, tf, s0, s1, b) + y = (1 - sf) * (s1 + b) + + s0, s1 = s1, x+y + } + + return append(enh.out, Sample{ + Point: Point{V: s1}, + }) +} + +// === sort(node parser.ValueTypeVector) Vector === +func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + // NaN should sort to the bottom, so take descending sort with NaN first and + // reverse it. + byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) + sort.Sort(sort.Reverse(byValueSorter)) + return Vector(byValueSorter) +} + +// === sortDesc(node parser.ValueTypeVector) Vector === +func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + // NaN should sort to the bottom, so take ascending sort with NaN first and + // reverse it. + byValueSorter := vectorByValueHeap(vals[0].(Vector)) + sort.Sort(sort.Reverse(byValueSorter)) + return Vector(byValueSorter) +} + +// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === +func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + max := vals[1].(Vector)[0].Point.V + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: math.Min(max, el.V)}, + }) + } + return enh.out +} + +// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === +func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + min := vals[1].(Vector)[0].Point.V + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: math.Max(min, el.V)}, + }) + } + return enh.out +} + +// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) Vector === +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + // round returns a number rounded to toNearest. + // Ties are solved by rounding up. + toNearest := float64(1) + if len(args) >= 2 { + toNearest = vals[1].(Vector)[0].Point.V + } + // Invert as it seems to cause fewer floating point accuracy issues. + toNearestInverse := 1.0 / toNearest + + for _, el := range vec { + v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: v}, + }) + } + return enh.out +} + +// === Scalar(node parser.ValueTypeVector) Scalar === +func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + v := vals[0].(Vector) + if len(v) != 1 { + return append(enh.out, Sample{ + Point: Point{V: math.NaN()}, + }) + } + return append(enh.out, Sample{ + Point: Point{V: v[0].V}, + }) +} + +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.out, Sample{ + Point: Point{V: aggrFn(el.Points)}, + }) +} + +// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var mean, count float64 + for _, v := range values { + count++ + mean += (v.V - mean) / count + } + return mean + }) +} + +// === count_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + return float64(len(values)) + }) +} + +// === floor(Vector parser.ValueTypeVector) Vector === +// === max_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + max := values[0].V + for _, v := range values { + if v.V > max || math.IsNaN(max) { + max = v.V + } + } + return max + }) +} + +// === min_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + min := values[0].V + for _, v := range values { + if v.V < min || math.IsNaN(min) { + min = v.V + } + } + return min + }) +} + +// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var sum float64 + for _, v := range values { + sum += v.V + } + return sum + }) +} + +// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + q := vals[0].(Vector)[0].V + el := vals[1].(Matrix)[0] + + values := make(vectorByValueHeap, 0, len(el.Points)) + for _, v := range el.Points { + values = append(values, Sample{Point: Point{V: v.V}}) + } + return append(enh.out, Sample{ + Point: Point{V: quantile(q, values)}, + }) +} + +// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var aux, count, mean float64 + for _, v := range values { + count++ + delta := v.V - mean + mean += delta / count + aux += delta * (v.V - mean) + } + return math.Sqrt(aux / count) + }) +} + +// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var aux, count, mean float64 + for _, v := range values { + count++ + delta := v.V - mean + mean += delta / count + aux += delta * (v.V - mean) + } + return aux / count + }) +} + +// === absent(Vector parser.ValueTypeVector) Vector === +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + if len(vals[0].(Vector)) > 0 { + return enh.out + } + return append(enh.out, + Sample{ + Metric: createLabelsForAbsentFunction(args[0]), + Point: Point{V: 1}, + }) +} + +// === absent_over_time(Vector parser.ValueTypeMatrix) Vector === +// As this function has a matrix as argument, it does not get all the Series. +// This function will return 1 if the matrix has at least one element. +// Due to engine optimization, this function is only called when this condition is true. +// Then, the engine post-processes the results to get the expected output. +func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return append(enh.out, + Sample{ + Point: Point{V: 1}, + }) +} + +func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { + for _, el := range vals[0].(Vector) { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: f(el.V)}, + }) + } + return enh.out +} + +// === abs(Vector parser.ValueTypeVector) Vector === +func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Abs) +} + +// === ceil(Vector parser.ValueTypeVector) Vector === +func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Ceil) +} + +// === floor(Vector parser.ValueTypeVector) Vector === +func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Floor) +} + +// === exp(Vector parser.ValueTypeVector) Vector === +func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Exp) +} + +// === sqrt(Vector VectorNode) Vector === +func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Sqrt) +} + +// === ln(Vector parser.ValueTypeVector) Vector === +func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log) +} + +// === log2(Vector parser.ValueTypeVector) Vector === +func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log2) +} + +// === log10(Vector parser.ValueTypeVector) Vector === +func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log10) +} + +// === timestamp(Vector parser.ValueTypeVector) Vector === +func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: float64(el.T) / 1000}, + }) + } + return enh.out +} + +// linearRegression performs a least-square linear regression analysis on the +// provided SamplePairs. It returns the slope, and the intercept value at the +// provided time. +func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { + var ( + n float64 + sumX, sumY float64 + sumXY, sumX2 float64 + ) + for _, sample := range samples { + x := float64(sample.T-interceptTime) / 1e3 + n += 1.0 + sumY += sample.V + sumX += x + sumXY += x * sample.V + sumX2 += x * x + } + covXY := sumXY - sumX*sumY/n + varX := sumX2 - sumX*sumX/n + + slope = covXY / varX + intercept = sumY/n - slope*sumX/n + return slope, intercept +} + +// === deriv(node parser.ValueTypeMatrix) Vector === +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + samples := vals[0].(Matrix)[0] + + // No sense in trying to compute a derivative without at least two points. + // Drop this Vector element. + if len(samples.Points) < 2 { + return enh.out + } + + // We pass in an arbitrary timestamp that is near the values in use + // to avoid floating point accuracy issues, see + // https://github.com/prometheus/prometheus/issues/2674 + slope, _ := linearRegression(samples.Points, samples.Points[0].T) + return append(enh.out, Sample{ + Point: Point{V: slope}, + }) +} + +// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + samples := vals[0].(Matrix)[0] + duration := vals[1].(Vector)[0].V + + // No sense in trying to predict anything without at least two points. + // Drop this Vector element. + if len(samples.Points) < 2 { + return enh.out + } + slope, intercept := linearRegression(samples.Points, enh.ts) + + return append(enh.out, Sample{ + Point: Point{V: slope*duration + intercept}, + }) +} + +// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + q := vals[0].(Vector)[0].V + inVec := vals[1].(Vector) + sigf := enh.signatureFunc(false, excludedLabels...) + + if enh.signatureToMetricWithBuckets == nil { + enh.signatureToMetricWithBuckets = map[uint64]*metricWithBuckets{} + } else { + for _, v := range enh.signatureToMetricWithBuckets { + v.buckets = v.buckets[:0] + } + } + for _, el := range inVec { + upperBound, err := strconv.ParseFloat( + el.Metric.Get(model.BucketLabel), 64, + ) + if err != nil { + // Oops, no bucket label or malformed label value. Skip. + // TODO(beorn7): Issue a warning somehow. + continue + } + hash := sigf(el.Metric) + + mb, ok := enh.signatureToMetricWithBuckets[hash] + if !ok { + el.Metric = labels.NewBuilder(el.Metric). + Del(labels.BucketLabel, labels.MetricName). + Labels() + + mb = &metricWithBuckets{el.Metric, nil} + enh.signatureToMetricWithBuckets[hash] = mb + } + mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) + } + + for _, mb := range enh.signatureToMetricWithBuckets { + if len(mb.buckets) > 0 { + enh.out = append(enh.out, Sample{ + Metric: mb.metric, + Point: Point{V: bucketQuantile(q, mb.buckets)}, + }) + } + } + + return enh.out +} + +// === resets(Matrix parser.ValueTypeMatrix) Vector === +func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + samples := vals[0].(Matrix)[0] + + resets := 0 + prev := samples.Points[0].V + for _, sample := range samples.Points[1:] { + current := sample.V + if current < prev { + resets++ + } + prev = current + } + + return append(enh.out, Sample{ + Point: Point{V: float64(resets)}, + }) +} + +// === changes(Matrix parser.ValueTypeMatrix) Vector === +func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + samples := vals[0].(Matrix)[0] + + changes := 0 + prev := samples.Points[0].V + for _, sample := range samples.Points[1:] { + current := sample.V + if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { + changes++ + } + prev = current + } + + return append(enh.out, Sample{ + Point: Point{V: float64(changes)}, + }) +} + +// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === +func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + var ( + vector = vals[0].(Vector) + dst = args[1].(*parser.StringLiteral).Val + repl = args[2].(*parser.StringLiteral).Val + src = args[3].(*parser.StringLiteral).Val + regexStr = args[4].(*parser.StringLiteral).Val + ) + + if enh.regex == nil { + var err error + enh.regex, err = regexp.Compile("^(?:" + regexStr + ")$") + if err != nil { + panic(errors.Errorf("invalid regular expression in label_replace(): %s", regexStr)) + } + if !model.LabelNameRE.MatchString(dst) { + panic(errors.Errorf("invalid destination label name in label_replace(): %s", dst)) + } + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + + for _, el := range vector { + h := el.Metric.Hash() + var outMetric labels.Labels + if l, ok := enh.dmn[h]; ok { + outMetric = l + } else { + srcVal := el.Metric.Get(src) + indexes := enh.regex.FindStringSubmatchIndex(srcVal) + if indexes == nil { + // If there is no match, no replacement should take place. + outMetric = el.Metric + enh.dmn[h] = outMetric + } else { + res := enh.regex.ExpandString([]byte{}, repl, srcVal, indexes) + + lb := labels.NewBuilder(el.Metric).Del(dst) + if len(res) > 0 { + lb.Set(dst, string(res)) + } + outMetric = lb.Labels() + enh.dmn[h] = outMetric + } + } + + enh.out = append(enh.out, Sample{ + Metric: outMetric, + Point: Point{V: el.Point.V}, + }) + } + return enh.out +} + +// === Vector(s Scalar) Vector === +func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return append(enh.out, + Sample{ + Metric: labels.Labels{}, + Point: Point{V: vals[0].(Vector)[0].V}, + }) +} + +// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === +func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + var ( + vector = vals[0].(Vector) + dst = args[1].(*parser.StringLiteral).Val + sep = args[2].(*parser.StringLiteral).Val + srcLabels = make([]string, len(args)-3) + ) + + if enh.dmn == nil { + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + + for i := 3; i < len(args); i++ { + src := args[i].(*parser.StringLiteral).Val + if !model.LabelName(src).IsValid() { + panic(errors.Errorf("invalid source label name in label_join(): %s", src)) + } + srcLabels[i-3] = src + } + + if !model.LabelName(dst).IsValid() { + panic(errors.Errorf("invalid destination label name in label_join(): %s", dst)) + } + + srcVals := make([]string, len(srcLabels)) + for _, el := range vector { + h := el.Metric.Hash() + var outMetric labels.Labels + if l, ok := enh.dmn[h]; ok { + outMetric = l + } else { + + for i, src := range srcLabels { + srcVals[i] = el.Metric.Get(src) + } + + lb := labels.NewBuilder(el.Metric) + + strval := strings.Join(srcVals, sep) + if strval == "" { + lb.Del(dst) + } else { + lb.Set(dst, strval) + } + + outMetric = lb.Labels() + enh.dmn[h] = outMetric + } + + enh.out = append(enh.out, Sample{ + Metric: outMetric, + Point: Point{V: el.Point.V}, + }) + } + return enh.out +} + +// Common code for date related functions. +func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { + if len(vals) == 0 { + return append(enh.out, + Sample{ + Metric: labels.Labels{}, + Point: Point{V: f(time.Unix(enh.ts/1000, 0).UTC())}, + }) + } + + for _, el := range vals[0].(Vector) { + t := time.Unix(int64(el.V), 0).UTC() + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: f(t)}, + }) + } + return enh.out +} + +// === days_in_month(v Vector) Scalar === +func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) + }) +} + +// === day_of_month(v Vector) Scalar === +func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Day()) + }) +} + +// === day_of_week(v Vector) Scalar === +func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Weekday()) + }) +} + +// === hour(v Vector) Scalar === +func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Hour()) + }) +} + +// === minute(v Vector) Scalar === +func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Minute()) + }) +} + +// === month(v Vector) Scalar === +func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Month()) + }) +} + +// === year(v Vector) Scalar === +func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Year()) + }) +} + +// FunctionCalls is a list of all functions supported by PromQL, including their types. +var FunctionCalls = map[string]FunctionCall{ + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_quantile": funcHistogramQuantile, + "holt_winters": funcHoltWinters, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "irate": funcIrate, + "label_replace": funcLabelReplace, + "label_join": funcLabelJoin, + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "predict_linear": funcPredictLinear, + "quantile_over_time": funcQuantileOverTime, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, +} + +type vectorByValueHeap Vector + +func (s vectorByValueHeap) Len() int { + return len(s) +} + +func (s vectorByValueHeap) Less(i, j int) bool { + if math.IsNaN(s[i].V) { + return true + } + return s[i].V < s[j].V +} + +func (s vectorByValueHeap) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s *vectorByValueHeap) Push(x interface{}) { + *s = append(*s, *(x.(*Sample))) +} + +func (s *vectorByValueHeap) Pop() interface{} { + old := *s + n := len(old) + el := old[n-1] + *s = old[0 : n-1] + return el +} + +type vectorByReverseValueHeap Vector + +func (s vectorByReverseValueHeap) Len() int { + return len(s) +} + +func (s vectorByReverseValueHeap) Less(i, j int) bool { + if math.IsNaN(s[i].V) { + return true + } + return s[i].V > s[j].V +} + +func (s vectorByReverseValueHeap) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s *vectorByReverseValueHeap) Push(x interface{}) { + *s = append(*s, *(x.(*Sample))) +} + +func (s *vectorByReverseValueHeap) Pop() interface{} { + old := *s + n := len(old) + el := old[n-1] + *s = old[0 : n-1] + return el +} + +// createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched +// in a given expression. It is used in the absent functions. +func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { + m := labels.Labels{} + + var lm []*labels.Matcher + switch n := expr.(type) { + case *parser.VectorSelector: + lm = n.LabelMatchers + case *parser.MatrixSelector: + lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers + default: + return m + } + + empty := []string{} + for _, ma := range lm { + if ma.Name == labels.MetricName { + continue + } + if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { + m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels() + } else { + empty = append(empty, ma.Name) + } + } + + for _, v := range empty { + m = labels.NewBuilder(m).Del(v).Labels() + } + return m +} diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go new file mode 100644 index 0000000000000000000000000000000000000000..066700d8c044a8749b41c2b8264f7803f843fed8 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go @@ -0,0 +1,103 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Only build when go-fuzz is in use +// +build gofuzz + +package promql + +import ( + "io" + + "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/promql/parser" +) + +// PromQL parser fuzzing instrumentation for use with +// https://github.com/dvyukov/go-fuzz. +// +// Fuzz each parser by building appropriately instrumented parser, ex. +// FuzzParseMetric and execute it with it's +// +// go-fuzz-build -func FuzzParseMetric -o FuzzParseMetric.zip github.com/prometheus/prometheus/promql +// +// And then run the tests with the appropriate inputs +// +// go-fuzz -bin FuzzParseMetric.zip -workdir fuzz-data/ParseMetric +// +// Further input samples should go in the folders fuzz-data/ParseMetric/corpus. +// +// Repeat for FuzzParseOpenMetric, FuzzParseMetricSelector and FuzzParseExpr. + +// Tuning which value is returned from Fuzz*-functions has a strong influence +// on how quick the fuzzer converges on "interesting" cases. At least try +// switching between fuzzMeh (= included in corpus, but not a priority) and +// fuzzDiscard (=don't use this input for re-building later inputs) when +// experimenting. +const ( + fuzzInteresting = 1 + fuzzMeh = 0 + fuzzDiscard = -1 +) + +func fuzzParseMetricWithContentType(in []byte, contentType string) int { + p := textparse.New(in, contentType) + var err error + for { + _, err = p.Next() + if err != nil { + break + } + } + if err == io.EOF { + err = nil + } + + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} + +// Fuzz the metric parser. +// +// Note that this is not the parser for the text-based exposition-format; that +// lives in github.com/prometheus/client_golang/text. +func FuzzParseMetric(in []byte) int { + return fuzzParseMetricWithContentType(in, "") +} + +func FuzzParseOpenMetric(in []byte) int { + return fuzzParseMetricWithContentType(in, "application/openmetrics-text") +} + +// Fuzz the metric selector parser. +func FuzzParseMetricSelector(in []byte) int { + _, err := parser.ParseMetricSelector(string(in)) + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} + +// Fuzz the expression parser. +func FuzzParseExpr(in []byte) int { + _, err := parser.ParseExpr(string(in)) + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go new file mode 100644 index 0000000000000000000000000000000000000000..6fef2c4f64a427944ed5939fec5f358172996be1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -0,0 +1,204 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "math" + "sort" + + "github.com/prometheus/prometheus/pkg/labels" +) + +// Helpers to calculate quantiles. + +// excludedLabels are the labels to exclude from signature calculation for +// quantiles. +var excludedLabels = []string{ + labels.MetricName, + labels.BucketLabel, +} + +type bucket struct { + upperBound float64 + count float64 +} + +// buckets implements sort.Interface. +type buckets []bucket + +func (b buckets) Len() int { return len(b) } +func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } + +type metricWithBuckets struct { + metric labels.Labels + buckets buckets +} + +// bucketQuantile calculates the quantile 'q' based on the given buckets. The +// buckets will be sorted by upperBound by this function (i.e. no sorting +// needed before calling this function). The quantile value is interpolated +// assuming a linear distribution within a bucket. However, if the quantile +// falls into the highest bucket, the upper bound of the 2nd highest bucket is +// returned. A natural lower bound of 0 is assumed if the upper bound of the +// lowest bucket is greater 0. In that case, interpolation in the lowest bucket +// happens linearly between 0 and the upper bound of the lowest bucket. +// However, if the lowest bucket has an upper bound less or equal 0, this upper +// bound is returned if the quantile falls into the lowest bucket. +// +// There are a number of special cases (once we have a way to report errors +// happening during evaluations of AST functions, we should report those +// explicitly): +// +// If 'buckets' has fewer than 2 elements, NaN is returned. +// +// If the highest bucket is not +Inf, NaN is returned. +// +// If q<0, -Inf is returned. +// +// If q>1, +Inf is returned. +func bucketQuantile(q float64, buckets buckets) float64 { + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + sort.Sort(buckets) + if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { + return math.NaN() + } + + buckets = coalesceBuckets(buckets) + ensureMonotonic(buckets) + + if len(buckets) < 2 { + return math.NaN() + } + + rank := q * buckets[len(buckets)-1].count + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + + if b == len(buckets)-1 { + return buckets[len(buckets)-2].upperBound + } + if b == 0 && buckets[0].upperBound <= 0 { + return buckets[0].upperBound + } + var ( + bucketStart float64 + bucketEnd = buckets[b].upperBound + count = buckets[b].count + ) + if b > 0 { + bucketStart = buckets[b-1].upperBound + count -= buckets[b-1].count + rank -= buckets[b-1].count + } + return bucketStart + (bucketEnd-bucketStart)*(rank/count) +} + +// coalesceBuckets merges buckets with the same upper bound. +// +// The input buckets must be sorted. +func coalesceBuckets(buckets buckets) buckets { + last := buckets[0] + i := 0 + for _, b := range buckets[1:] { + if b.upperBound == last.upperBound { + last.count += b.count + } else { + buckets[i] = last + last = b + i++ + } + } + buckets[i] = last + return buckets[:i+1] +} + +// The assumption that bucket counts increase monotonically with increasing +// upperBound may be violated during: +// +// * Recording rule evaluation of histogram_quantile, especially when rate() +// has been applied to the underlying bucket timeseries. +// * Evaluation of histogram_quantile computed over federated bucket +// timeseries, especially when rate() has been applied. +// +// This is because scraped data is not made available to rule evaluation or +// federation atomically, so some buckets are computed with data from the +// most recent scrapes, but the other buckets are missing data from the most +// recent scrape. +// +// Monotonicity is usually guaranteed because if a bucket with upper bound +// u1 has count c1, then any bucket with a higher upper bound u > u1 must +// have counted all c1 observations and perhaps more, so that c >= c1. +// +// Randomly interspersed partial sampling breaks that guarantee, and rate() +// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from +// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The +// monotonicity is broken. It is exacerbated by rate() because under normal +// operation, cumulative counting of buckets will cause the bucket counts to +// diverge such that small differences from missing samples are not a problem. +// rate() removes this divergence.) +// +// bucketQuantile depends on that monotonicity to do a binary search for the +// bucket with the φ-quantile count, so breaking the monotonicity +// guarantee causes bucketQuantile() to return undefined (nonsense) results. +// +// As a somewhat hacky solution until ingestion is atomic per scrape, we +// calculate the "envelope" of the histogram buckets, essentially removing +// any decreases in the count between successive buckets. + +func ensureMonotonic(buckets buckets) { + max := buckets[0].count + for i := range buckets[1:] { + switch { + case buckets[i].count > max: + max = buckets[i].count + case buckets[i].count < max: + buckets[i].count = max + } + } +} + +// quantile calculates the given quantile of a vector of samples. +// +// The Vector will be sorted. +// If 'values' has zero elements, NaN is returned. +// If q<0, -Inf is returned. +// If q>1, +Inf is returned. +func quantile(q float64, values vectorByValueHeap) float64 { + if len(values) == 0 { + return math.NaN() + } + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + sort.Sort(values) + + n := float64(len(values)) + // When the quantile lies between two samples, + // we use a weighted average of the two samples. + rank := q * (n - 1) + + lowerIndex := math.Max(0, math.Floor(rank)) + upperIndex := math.Min(n-1, lowerIndex+1) + + weight := rank - math.Floor(rank) + return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight +} diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go new file mode 100644 index 0000000000000000000000000000000000000000..1ae1bec5f3222c5f0d7381e1dc086e9dc2d578e7 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -0,0 +1,197 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + "unicode/utf8" + + "github.com/edsrzf/mmap-go" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +type ActiveQueryTracker struct { + mmapedFile []byte + getNextIndex chan int + logger log.Logger + maxConcurrent int +} + +type Entry struct { + Query string `json:"query"` + Timestamp int64 `json:"timestamp_sec"` +} + +const ( + entrySize int = 1000 +) + +func parseBrokenJSON(brokenJSON []byte) (bool, string) { + queries := strings.ReplaceAll(string(brokenJSON), "\x00", "") + if len(queries) > 0 { + queries = queries[:len(queries)-1] + "]" + } + + // Conditional because of implementation detail: len() = 1 implies file consisted of a single char: '['. + if len(queries) <= 1 { + return false, "[]" + } + + return true, queries +} + +func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { + if _, err := os.Stat(filename); err == nil { + fd, err := os.Open(filename) + if err != nil { + level.Error(logger).Log("msg", "Failed to open query log file", "err", err) + return + } + + brokenJSON := make([]byte, filesize) + _, err = fd.Read(brokenJSON) + if err != nil { + level.Error(logger).Log("msg", "Failed to read query log file", "err", err) + return + } + + queriesExist, queries := parseBrokenJSON(brokenJSON) + if !queriesExist { + return + } + level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) + } +} + +func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { + + file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666) + if err != nil { + level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) + return nil, err + } + + err = file.Truncate(int64(filesize)) + if err != nil { + level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) + return nil, err + } + + fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) + if err != nil { + level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) + return nil, err + } + + return fileAsBytes, err +} + +func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { + err := os.MkdirAll(localStoragePath, 0777) + if err != nil { + level.Error(logger).Log("msg", "Failed to create directory for logging active queries") + } + + filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize + logUnfinishedQueries(filename, filesize, logger) + + fileAsBytes, err := getMMapedFile(filename, filesize, logger) + if err != nil { + panic("Unable to create mmap-ed active query log") + } + + copy(fileAsBytes, "[") + activeQueryTracker := ActiveQueryTracker{ + mmapedFile: fileAsBytes, + getNextIndex: make(chan int, maxConcurrent), + logger: logger, + maxConcurrent: maxConcurrent, + } + + activeQueryTracker.generateIndices(maxConcurrent) + + return &activeQueryTracker +} + +func trimStringByBytes(str string, size int) string { + bytesStr := []byte(str) + + trimIndex := len(bytesStr) + if size < len(bytesStr) { + for !utf8.RuneStart(bytesStr[size]) { + size-- + } + trimIndex = size + } + + return string(bytesStr[:trimIndex]) +} + +func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { + entry := Entry{query, timestamp} + jsonEntry, err := json.Marshal(entry) + + if err != nil { + level.Error(logger).Log("msg", "Cannot create json of query", "query", query) + return []byte{} + } + + return jsonEntry +} + +func newJSONEntry(query string, logger log.Logger) []byte { + timestamp := time.Now().Unix() + minEntryJSON := _newJSONEntry("", timestamp, logger) + + query = trimStringByBytes(query, entrySize-(len(minEntryJSON)+1)) + jsonEntry := _newJSONEntry(query, timestamp, logger) + + return jsonEntry +} + +func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) { + for i := 0; i < maxConcurrent; i++ { + tracker.getNextIndex <- 1 + (i * entrySize) + } +} + +func (tracker ActiveQueryTracker) GetMaxConcurrent() int { + return tracker.maxConcurrent +} + +func (tracker ActiveQueryTracker) Delete(insertIndex int) { + copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) + tracker.getNextIndex <- insertIndex +} + +func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { + select { + case i := <-tracker.getNextIndex: + fileBytes := tracker.mmapedFile + entry := newJSONEntry(query, tracker.logger) + start, end := i, i+entrySize + + copy(fileBytes[start:], entry) + copy(fileBytes[end-1:], ",") + return i, nil + case <-ctx.Done(): + return 0, ctx.Err() + } +} diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go new file mode 100644 index 0000000000000000000000000000000000000000..e3d99e83f8cc9e871883ce1a1ef0866c1b2410b6 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -0,0 +1,701 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "regexp" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/util/teststorage" + "github.com/prometheus/prometheus/util/testutil" +) + +var ( + minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. + + patSpace = regexp.MustCompile("[\t ]+") + patLoad = regexp.MustCompile(`^load\s+(.+?)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) +) + +const ( + epsilon = 0.000001 // Relative error allowed for sample values. +) + +var testStartTime = time.Unix(0, 0).UTC() + +// Test is a sequence of read and write commands that are run +// against a test storage. +type Test struct { + testutil.T + + cmds []testCommand + + storage *teststorage.TestStorage + + queryEngine *Engine + context context.Context + cancelCtx context.CancelFunc +} + +// NewTest returns an initialized empty Test. +func NewTest(t testutil.T, input string) (*Test, error) { + test := &Test{ + T: t, + cmds: []testCommand{}, + } + err := test.parse(input) + test.clear() + + return test, err +} + +func newTestFromFile(t testutil.T, filename string) (*Test, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return NewTest(t, string(content)) +} + +// QueryEngine returns the test's query engine. +func (t *Test) QueryEngine() *Engine { + return t.queryEngine +} + +// Queryable allows querying the test data. +func (t *Test) Queryable() storage.Queryable { + return t.storage +} + +// Context returns the test's context. +func (t *Test) Context() context.Context { + return t.context +} + +// Storage returns the test's storage. +func (t *Test) Storage() storage.Storage { + return t.storage +} + +// TSDB returns test's TSDB. +func (t *Test) TSDB() *tsdb.DB { + return t.storage.DB +} + +func raise(line int, format string, v ...interface{}) error { + return &parser.ParseErr{ + LineOffset: line, + Err: errors.Errorf(format, v...), + } +} + +func parseLoad(lines []string, i int) (int, *loadCmd, error) { + if !patLoad.MatchString(lines[i]) { + return i, nil, raise(i, "invalid load command. (load )") + } + parts := patLoad.FindStringSubmatch(lines[i]) + + gap, err := model.ParseDuration(parts[1]) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + cmd := newLoadCmd(time.Duration(gap)) + for i+1 < len(lines) { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + metric, vals, err := parser.ParseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + } + return i, nil, err + } + cmd.set(metric, vals...) + } + return i, cmd, nil +} + +func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) { + if !patEvalInstant.MatchString(lines[i]) { + return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ") + } + parts := patEvalInstant.FindStringSubmatch(lines[i]) + var ( + mod = parts[1] + at = parts[2] + expr = parts[3] + ) + _, err := parser.ParseExpr(expr) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + posOffset := parser.Pos(strings.Index(lines[i], expr)) + perr.PositionRange.Start += posOffset + perr.PositionRange.End += posOffset + perr.Query = lines[i] + } + return i, nil, err + } + + offset, err := model.ParseDuration(at) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + ts := testStartTime.Add(time.Duration(offset)) + + cmd := newEvalCmd(expr, ts, i+1) + switch mod { + case "ordered": + cmd.ordered = true + case "fail": + cmd.fail = true + } + + for j := 1; i+1 < len(lines); j++ { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + if f, err := parseNumber(defLine); err == nil { + cmd.expect(0, nil, parser.SequenceValue{Value: f}) + break + } + metric, vals, err := parser.ParseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*parser.ParseErr); ok { + perr.LineOffset = i + } + return i, nil, err + } + + // Currently, we are not expecting any matrices. + if len(vals) > 1 { + return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") + } + cmd.expect(j, metric, vals...) + } + return i, cmd, nil +} + +// getLines returns trimmed lines after removing the comments. +func getLines(input string) []string { + lines := strings.Split(input, "\n") + for i, l := range lines { + l = strings.TrimSpace(l) + if strings.HasPrefix(l, "#") { + l = "" + } + lines[i] = l + } + return lines +} + +// parse the given command sequence and appends it to the test. +func (t *Test) parse(input string) error { + lines := getLines(input) + var err error + // Scan for steps line by line. + for i := 0; i < len(lines); i++ { + l := lines[i] + if len(l) == 0 { + continue + } + var cmd testCommand + + switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { + case c == "clear": + cmd = &clearCmd{} + case c == "load": + i, cmd, err = parseLoad(lines, i) + case strings.HasPrefix(c, "eval"): + i, cmd, err = t.parseEval(lines, i) + default: + return raise(i, "invalid command %q", l) + } + if err != nil { + return err + } + t.cmds = append(t.cmds, cmd) + } + return nil +} + +// testCommand is an interface that ensures that only the package internal +// types can be a valid command for a test. +type testCommand interface { + testCmd() +} + +func (*clearCmd) testCmd() {} +func (*loadCmd) testCmd() {} +func (*evalCmd) testCmd() {} + +// loadCmd is a command that loads sequences of sample values for specific +// metrics into the storage. +type loadCmd struct { + gap time.Duration + metrics map[uint64]labels.Labels + defs map[uint64][]Point +} + +func newLoadCmd(gap time.Duration) *loadCmd { + return &loadCmd{ + gap: gap, + metrics: map[uint64]labels.Labels{}, + defs: map[uint64][]Point{}, + } +} + +func (cmd loadCmd) String() string { + return "load" +} + +// set a sequence of sample values for the given metric. +func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) { + h := m.Hash() + + samples := make([]Point, 0, len(vals)) + ts := testStartTime + for _, v := range vals { + if !v.Omitted { + samples = append(samples, Point{ + T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), + V: v.Value, + }) + } + ts = ts.Add(cmd.gap) + } + cmd.defs[h] = samples + cmd.metrics[h] = m +} + +// append the defined time series to the storage. +func (cmd *loadCmd) append(a storage.Appender) error { + for h, smpls := range cmd.defs { + m := cmd.metrics[h] + + for _, s := range smpls { + if _, err := a.Add(m, s.T, s.V); err != nil { + return err + } + } + } + return nil +} + +// evalCmd is a command that evaluates an expression for the given time (range) +// and expects a specific result. +type evalCmd struct { + expr string + start time.Time + line int + + fail, ordered bool + + metrics map[uint64]labels.Labels + expected map[uint64]entry +} + +type entry struct { + pos int + vals []parser.SequenceValue +} + +func (e entry) String() string { + return fmt.Sprintf("%d: %s", e.pos, e.vals) +} + +func newEvalCmd(expr string, start time.Time, line int) *evalCmd { + return &evalCmd{ + expr: expr, + start: start, + line: line, + + metrics: map[uint64]labels.Labels{}, + expected: map[uint64]entry{}, + } +} + +func (ev *evalCmd) String() string { + return "eval" +} + +// expect adds a new metric with a sequence of values to the set of expected +// results for the query. +func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) { + if m == nil { + ev.expected[0] = entry{pos: pos, vals: vals} + return + } + h := m.Hash() + ev.metrics[h] = m + ev.expected[h] = entry{pos: pos, vals: vals} +} + +// compareResult compares the result value with the defined expectation. +func (ev *evalCmd) compareResult(result parser.Value) error { + switch val := result.(type) { + case Matrix: + return errors.New("received range result on instant evaluation") + + case Vector: + seen := map[uint64]bool{} + for pos, v := range val { + fp := v.Metric.Hash() + if _, ok := ev.metrics[fp]; !ok { + return errors.Errorf("unexpected metric %s in result", v.Metric) + } + exp := ev.expected[fp] + if ev.ordered && exp.pos != pos+1 { + return errors.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) + } + if !almostEqual(exp.vals[0].Value, v.V) { + return errors.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.V) + } + + seen[fp] = true + } + for fp, expVals := range ev.expected { + if !seen[fp] { + fmt.Println("vector result", len(val), ev.expr) + for _, ss := range val { + fmt.Println(" ", ss.Metric, ss.Point) + } + return errors.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) + } + } + + case Scalar: + if !almostEqual(ev.expected[0].vals[0].Value, val.V) { + return errors.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].Value) + } + + default: + panic(errors.Errorf("promql.Test.compareResult: unexpected result type %T", result)) + } + return nil +} + +// clearCmd is a command that wipes the test's storage state. +type clearCmd struct{} + +func (cmd clearCmd) String() string { + return "clear" +} + +// Run executes the command sequence of the test. Until the maximum error number +// is reached, evaluation errors do not terminate execution. +func (t *Test) Run() error { + for _, cmd := range t.cmds { + // TODO(fabxc): aggregate command errors, yield diffs for result + // comparison errors. + if err := t.exec(cmd); err != nil { + return err + } + } + return nil +} + +// exec processes a single step of the test. +func (t *Test) exec(tc testCommand) error { + switch cmd := tc.(type) { + case *clearCmd: + t.clear() + + case *loadCmd: + app := t.storage.Appender() + if err := cmd.append(app); err != nil { + app.Rollback() + return err + } + + if err := app.Commit(); err != nil { + return err + } + + case *evalCmd: + q, err := t.QueryEngine().NewInstantQuery(t.storage, cmd.expr, cmd.start) + if err != nil { + return err + } + defer q.Close() + res := q.Exec(t.context) + if res.Err != nil { + if cmd.fail { + return nil + } + return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", cmd.expr, cmd.line) + } + if res.Err == nil && cmd.fail { + return errors.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + } + + err = cmd.compareResult(res.Value) + if err != nil { + return errors.Wrapf(err, "error in %s %s", cmd, cmd.expr) + } + + // Check query returns same result in range mode, + // by checking against the middle step. + q, err = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) + if err != nil { + return err + } + rangeRes := q.Exec(t.context) + if rangeRes.Err != nil { + return errors.Wrapf(rangeRes.Err, "error evaluating query %q (line %d) in range mode", cmd.expr, cmd.line) + } + defer q.Close() + if cmd.ordered { + // Ordering isn't defined for range queries. + return nil + } + mat := rangeRes.Value.(Matrix) + vec := make(Vector, 0, len(mat)) + for _, series := range mat { + for _, point := range series.Points { + if point.T == timeMilliseconds(cmd.start) { + vec = append(vec, Sample{Metric: series.Metric, Point: point}) + break + } + } + } + if _, ok := res.Value.(Scalar); ok { + err = cmd.compareResult(Scalar{V: vec[0].Point.V}) + } else { + err = cmd.compareResult(vec) + } + if err != nil { + return errors.Wrapf(err, "error in %s %s (line %d) rande mode", cmd, cmd.expr, cmd.line) + } + + default: + panic("promql.Test.exec: unknown test command type") + } + return nil +} + +// clear the current test storage of all inserted samples. +func (t *Test) clear() { + if t.storage != nil { + if err := t.storage.Close(); err != nil { + t.T.Fatalf("closing test storage: %s", err) + } + } + if t.cancelCtx != nil { + t.cancelCtx() + } + t.storage = teststorage.New(t) + + opts := EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + } + + t.queryEngine = NewEngine(opts) + t.context, t.cancelCtx = context.WithCancel(context.Background()) +} + +// Close closes resources associated with the Test. +func (t *Test) Close() { + t.cancelCtx() + + if err := t.storage.Close(); err != nil { + t.T.Fatalf("closing test storage: %s", err) + } +} + +// samplesAlmostEqual returns true if the two sample lines only differ by a +// small relative error in their sample value. +func almostEqual(a, b float64) bool { + // NaN has no equality but for testing we still want to know whether both values + // are NaN. + if math.IsNaN(a) && math.IsNaN(b) { + return true + } + + // Cf. http://floating-point-gui.de/errors/comparison/ + if a == b { + return true + } + + diff := math.Abs(a - b) + + if a == 0 || b == 0 || diff < minNormal { + return diff < epsilon*minNormal + } + return diff/(math.Abs(a)+math.Abs(b)) < epsilon +} + +func parseNumber(s string) (float64, error) { + n, err := strconv.ParseInt(s, 0, 64) + f := float64(n) + if err != nil { + f, err = strconv.ParseFloat(s, 64) + } + if err != nil { + return 0, errors.Wrap(err, "error parsing number") + } + return f, nil +} + +// LazyLoader lazily loads samples into storage. +// This is specifically implemented for unit testing of rules. +type LazyLoader struct { + testutil.T + + loadCmd *loadCmd + + storage storage.Storage + + queryEngine *Engine + context context.Context + cancelCtx context.CancelFunc +} + +// NewLazyLoader returns an initialized empty LazyLoader. +func NewLazyLoader(t testutil.T, input string) (*LazyLoader, error) { + ll := &LazyLoader{ + T: t, + } + err := ll.parse(input) + ll.clear() + return ll, err +} + +// parse the given load command. +func (ll *LazyLoader) parse(input string) error { + lines := getLines(input) + // Accepts only 'load' command. + for i := 0; i < len(lines); i++ { + l := lines[i] + if len(l) == 0 { + continue + } + if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" { + _, cmd, err := parseLoad(lines, i) + if err != nil { + return err + } + ll.loadCmd = cmd + return nil + } + + return raise(i, "invalid command %q", l) + } + return errors.New("no \"load\" command found") +} + +// clear the current test storage of all inserted samples. +func (ll *LazyLoader) clear() { + if ll.storage != nil { + if err := ll.storage.Close(); err != nil { + ll.T.Fatalf("closing test storage: %s", err) + } + } + if ll.cancelCtx != nil { + ll.cancelCtx() + } + ll.storage = teststorage.New(ll) + + opts := EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + } + + ll.queryEngine = NewEngine(opts) + ll.context, ll.cancelCtx = context.WithCancel(context.Background()) +} + +// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds). +func (ll *LazyLoader) appendTill(ts int64) error { + app := ll.storage.Appender() + for h, smpls := range ll.loadCmd.defs { + m := ll.loadCmd.metrics[h] + for i, s := range smpls { + if s.T > ts { + // Removing the already added samples. + ll.loadCmd.defs[h] = smpls[i:] + break + } + if _, err := app.Add(m, s.T, s.V); err != nil { + return err + } + if i == len(smpls)-1 { + ll.loadCmd.defs[h] = nil + } + } + } + return app.Commit() +} + +// WithSamplesTill loads the samples till given timestamp and executes the given function. +func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) { + tsMilli := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond + fn(ll.appendTill(int64(tsMilli))) +} + +// QueryEngine returns the LazyLoader's query engine. +func (ll *LazyLoader) QueryEngine() *Engine { + return ll.queryEngine +} + +// Queryable allows querying the LazyLoader's data. +// Note: only the samples till the max timestamp used +// in `WithSamplesTill` can be queried. +func (ll *LazyLoader) Queryable() storage.Queryable { + return ll.storage +} + +// Context returns the LazyLoader's context. +func (ll *LazyLoader) Context() context.Context { + return ll.context +} + +// Storage returns the LazyLoader's storage. +func (ll *LazyLoader) Storage() storage.Storage { + return ll.storage +} + +// Close closes resources associated with the LazyLoader. +func (ll *LazyLoader) Close() { + ll.cancelCtx() + + if err := ll.storage.Close(); err != nil { + ll.T.Fatalf("closing test storage: %s", err) + } +} diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go new file mode 100644 index 0000000000000000000000000000000000000000..a28f06bbae863d4cb90d32c632e79db2bacf321f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -0,0 +1,306 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/tsdb/chunkenc" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" +) + +func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix } +func (Vector) Type() parser.ValueType { return parser.ValueTypeVector } +func (Scalar) Type() parser.ValueType { return parser.ValueTypeScalar } +func (String) Type() parser.ValueType { return parser.ValueTypeString } + +// String represents a string value. +type String struct { + T int64 + V string +} + +func (s String) String() string { + return s.V +} + +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) +} + +// Scalar is a data point that's explicitly not associated with a metric. +type Scalar struct { + T int64 + V float64 +} + +func (s Scalar) String() string { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return fmt.Sprintf("scalar: %v @[%v]", v, s.T) +} + +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(s.T) / 1000, v}) +} + +// Series is a stream of data points belonging to a metric. +type Series struct { + Metric labels.Labels `json:"metric"` + Points []Point `json:"values"` +} + +func (s Series) String() string { + vals := make([]string, len(s.Points)) + for i, v := range s.Points { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) +} + +// Point represents a single data point for a given timestamp. +type Point struct { + T int64 + V float64 +} + +func (p Point) String() string { + v := strconv.FormatFloat(p.V, 'f', -1, 64) + return fmt.Sprintf("%v @[%v]", v, p.T) +} + +// MarshalJSON implements json.Marshaler. +func (p Point) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(p.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) +} + +// Sample is a single sample belonging to a metric. +type Sample struct { + Point + + Metric labels.Labels +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, s.Point) +} + +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + M labels.Labels `json:"metric"` + V Point `json:"value"` + }{ + M: s.Metric, + V: s.Point, + } + return json.Marshal(v) +} + +// Vector is basically only an alias for model.Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +// ContainsSameLabelset checks if a vector has samples with the same labelset +// Such a behavior is semantically undefined +// https://github.com/prometheus/prometheus/issues/4562 +func (vec Vector) ContainsSameLabelset() bool { + l := make(map[uint64]struct{}, len(vec)) + for _, s := range vec { + hash := s.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} + } + return false +} + +// Matrix is a slice of Series that implements sort.Interface and +// has a String method. +type Matrix []Series + +func (m Matrix) String() string { + // TODO(fabxc): sort, or can we rely on order from the querier? + strs := make([]string, len(m)) + + for i, ss := range m { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} + +// TotalSamples returns the total number of samples in the series within a matrix. +func (m Matrix) TotalSamples() int { + numSamples := 0 + for _, series := range m { + numSamples += len(series.Points) + } + return numSamples +} + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +// ContainsSameLabelset checks if a matrix has samples with the same labelset +// Such a behavior is semantically undefined +// https://github.com/prometheus/prometheus/issues/4562 +func (m Matrix) ContainsSameLabelset() bool { + l := make(map[uint64]struct{}, len(m)) + for _, ss := range m { + hash := ss.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} + } + return false +} + +// Result holds the resulting value of an execution or an error +// if any occurred. +type Result struct { + Err error + Value parser.Value + Warnings storage.Warnings +} + +// Vector returns a Vector if the result value is one. An error is returned if +// the result was an error or the result value is not a Vector. +func (r *Result) Vector() (Vector, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Vector) + if !ok { + return nil, errors.New("query result is not a Vector") + } + return v, nil +} + +// Matrix returns a Matrix. An error is returned if +// the result was an error or the result value is not a Matrix. +func (r *Result) Matrix() (Matrix, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Matrix) + if !ok { + return nil, errors.New("query result is not a range Vector") + } + return v, nil +} + +// Scalar returns a Scalar value. An error is returned if +// the result was an error or the result value is not a Scalar. +func (r *Result) Scalar() (Scalar, error) { + if r.Err != nil { + return Scalar{}, r.Err + } + v, ok := r.Value.(Scalar) + if !ok { + return Scalar{}, errors.New("query result is not a Scalar") + } + return v, nil +} + +func (r *Result) String() string { + if r.Err != nil { + return r.Err.Error() + } + if r.Value == nil { + return "" + } + return r.Value.String() +} + +// StorageSeries simulates promql.Series as storage.Series. +type StorageSeries struct { + series Series +} + +// NewStorageSeries returns a StorageSeries from a Series. +func NewStorageSeries(series Series) *StorageSeries { + return &StorageSeries{ + series: series, + } +} + +func (ss *StorageSeries) Labels() labels.Labels { + return ss.series.Metric +} + +// Iterator returns a new iterator of the data of the series. +func (ss *StorageSeries) Iterator() chunkenc.Iterator { + return newStorageSeriesIterator(ss.series) +} + +type storageSeriesIterator struct { + points []Point + curr int +} + +func newStorageSeriesIterator(series Series) *storageSeriesIterator { + return &storageSeriesIterator{ + points: series.Points, + curr: -1, + } +} + +func (ssi *storageSeriesIterator) Seek(t int64) bool { + i := ssi.curr + if i < 0 { + i = 0 + } + for ; i < len(ssi.points); i++ { + if ssi.points[i].T >= t { + ssi.curr = i + return true + } + } + ssi.curr = len(ssi.points) - 1 + return false +} + +func (ssi *storageSeriesIterator) At() (t int64, v float64) { + p := ssi.points[ssi.curr] + return p.T, p.V +} + +func (ssi *storageSeriesIterator) Next() bool { + ssi.curr++ + return ssi.curr < len(ssi.points) +} + +func (ssi *storageSeriesIterator) Err() error { + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go new file mode 100644 index 0000000000000000000000000000000000000000..dc55e8a1e26a9c4279fc1f3a833fe163c4c61a6b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -0,0 +1,550 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + "time" + + html_template "html/template" + + yaml "gopkg.in/yaml.v2" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/template" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + // AlertMetricName is the metric name for synthetic alert timeseries. + alertMetricName = "ALERTS" + // AlertForStateMetricName is the metric name for 'for' state of alert. + alertForStateMetricName = "ALERTS_FOR_STATE" + + // AlertNameLabel is the label name indicating the name of an alert. + alertNameLabel = "alertname" + // AlertStateLabel is the label name indicating the state of an alert. + alertStateLabel = "alertstate" +) + +// AlertState denotes the state of an active alert. +type AlertState int + +const ( + // StateInactive is the state of an alert that is neither firing nor pending. + StateInactive AlertState = iota + // StatePending is the state of an alert that has been active for less than + // the configured threshold duration. + StatePending + // StateFiring is the state of an alert that has been active for longer than + // the configured threshold duration. + StateFiring +) + +func (s AlertState) String() string { + switch s { + case StateInactive: + return "inactive" + case StatePending: + return "pending" + case StateFiring: + return "firing" + } + panic(errors.Errorf("unknown alert state: %s", s.String())) +} + +// Alert is the user-level representation of a single instance of an alerting rule. +type Alert struct { + State AlertState + + Labels labels.Labels + Annotations labels.Labels + + // The value at the last evaluation of the alerting expression. + Value float64 + // The interval during which the condition of this alert held true. + // ResolvedAt will be 0 to indicate a still active alert. + ActiveAt time.Time + FiredAt time.Time + ResolvedAt time.Time + LastSentAt time.Time + ValidUntil time.Time +} + +func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool { + if a.State == StatePending { + return false + } + + // if an alert has been resolved since the last send, resend it + if a.ResolvedAt.After(a.LastSentAt) { + return true + } + + return a.LastSentAt.Add(resendDelay).Before(ts) +} + +// An AlertingRule generates alerts from its vector expression. +type AlertingRule struct { + // The name of the alert. + name string + // The vector expression from which to generate alerts. + vector parser.Expr + // The duration for which a labelset needs to persist in the expression + // output vector before an alert transitions from Pending to Firing state. + holdDuration time.Duration + // Extra labels to attach to the resulting alert sample vectors. + labels labels.Labels + // Non-identifying key/value pairs. + annotations labels.Labels + // External labels from the global config. + externalLabels map[string]string + // true if old state has been restored. We start persisting samples for ALERT_FOR_STATE + // only after the restoration. + restored bool + // Protects the below. + mtx sync.Mutex + // Time in seconds taken to evaluate rule. + evaluationDuration time.Duration + // Timestamp of last evaluation of rule. + evaluationTimestamp time.Time + // The health of the alerting rule. + health RuleHealth + // The last error seen by the alerting rule. + lastError error + // A map of alerts which are currently active (Pending or Firing), keyed by + // the fingerprint of the labelset they correspond to. + active map[uint64]*Alert + + logger log.Logger +} + +// NewAlertingRule constructs a new AlertingRule. +func NewAlertingRule( + name string, vec parser.Expr, hold time.Duration, + labels, annotations, externalLabels labels.Labels, + restored bool, logger log.Logger, +) *AlertingRule { + el := make(map[string]string, len(externalLabels)) + for _, lbl := range externalLabels { + el[lbl.Name] = lbl.Value + } + + return &AlertingRule{ + name: name, + vector: vec, + holdDuration: hold, + labels: labels, + annotations: annotations, + externalLabels: el, + health: HealthUnknown, + active: map[uint64]*Alert{}, + logger: logger, + restored: restored, + } +} + +// Name returns the name of the alerting rule. +func (r *AlertingRule) Name() string { + return r.name +} + +// SetLastError sets the current error seen by the alerting rule. +func (r *AlertingRule) SetLastError(err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.lastError = err +} + +// LastError returns the last error seen by the alerting rule. +func (r *AlertingRule) LastError() error { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.lastError +} + +// SetHealth sets the current health of the alerting rule. +func (r *AlertingRule) SetHealth(health RuleHealth) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.health = health +} + +// Health returns the current health of the alerting rule. +func (r *AlertingRule) Health() RuleHealth { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.health +} + +// Query returns the query expression of the alerting rule. +func (r *AlertingRule) Query() parser.Expr { + return r.vector +} + +// HoldDuration returns the hold duration of the alerting rule. +func (r *AlertingRule) HoldDuration() time.Duration { + return r.holdDuration +} + +// Labels returns the labels of the alerting rule. +func (r *AlertingRule) Labels() labels.Labels { + return r.labels +} + +// Annotations returns the annotations of the alerting rule. +func (r *AlertingRule) Annotations() labels.Labels { + return r.annotations +} + +func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { + lb := labels.NewBuilder(r.labels) + + for _, l := range alert.Labels { + lb.Set(l.Name, l.Value) + } + + lb.Set(labels.MetricName, alertMetricName) + lb.Set(labels.AlertName, r.name) + lb.Set(alertStateLabel, alert.State.String()) + + s := promql.Sample{ + Metric: lb.Labels(), + Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, + } + return s +} + +// forStateSample returns the sample for ALERTS_FOR_STATE. +func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample { + lb := labels.NewBuilder(r.labels) + + for _, l := range alert.Labels { + lb.Set(l.Name, l.Value) + } + + lb.Set(labels.MetricName, alertForStateMetricName) + lb.Set(labels.AlertName, r.name) + + s := promql.Sample{ + Metric: lb.Labels(), + Point: promql.Point{T: timestamp.FromTime(ts), V: v}, + } + return s +} + +// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. +func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.evaluationDuration = dur +} + +// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule. +func (r *AlertingRule) GetEvaluationDuration() time.Duration { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.evaluationDuration +} + +// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. +func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.evaluationTimestamp = ts +} + +// GetEvaluationTimestamp returns the time the evaluation took place. +func (r *AlertingRule) GetEvaluationTimestamp() time.Time { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.evaluationTimestamp +} + +// SetRestored updates the restoration state of the alerting rule. +func (r *AlertingRule) SetRestored(restored bool) { + r.restored = restored +} + +// resolvedRetention is the duration for which a resolved alert instance +// is kept in memory state and consequently repeatedly sent to the AlertManager. +const resolvedRetention = 15 * time.Minute + +// Eval evaluates the rule expression and then creates pending alerts and fires +// or removes previously pending alerts accordingly. +func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL) (promql.Vector, error) { + res, err := query(ctx, r.vector.String(), ts) + if err != nil { + r.SetHealth(HealthBad) + r.SetLastError(err) + return nil, err + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + // Create pending alerts for any new vector elements in the alert expression + // or update the expression value for existing elements. + resultFPs := map[uint64]struct{}{} + + var vec promql.Vector + var alerts = make(map[uint64]*Alert, len(res)) + for _, smpl := range res { + // Provide the alert information to the template. + l := make(map[string]string, len(smpl.Metric)) + for _, lbl := range smpl.Metric { + l[lbl.Name] = lbl.Value + } + + tmplData := template.AlertTemplateData(l, r.externalLabels, smpl.V) + // Inject some convenience variables that are easier to remember for users + // who are not used to Go's templating system. + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } + + expand := func(text string) string { + tmpl := template.NewTemplateExpander( + ctx, + strings.Join(append(defs, text), ""), + "__alert_"+r.Name(), + tmplData, + model.Time(timestamp.FromTime(ts)), + template.QueryFunc(query), + externalURL, + ) + result, err := tmpl.Expand() + if err != nil { + result = fmt.Sprintf("", err) + level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + } + return result + } + + lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName) + + for _, l := range r.labels { + lb.Set(l.Name, expand(l.Value)) + } + lb.Set(labels.AlertName, r.Name()) + + annotations := make(labels.Labels, 0, len(r.annotations)) + for _, a := range r.annotations { + annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)}) + } + + lbs := lb.Labels() + h := lbs.Hash() + resultFPs[h] = struct{}{} + + if _, ok := alerts[h]; ok { + err = fmt.Errorf("vector contains metrics with the same labelset after applying alert labels") + // We have already acquired the lock above hence using SetHealth and + // SetLastError will deadlock. + r.health = HealthBad + r.lastError = err + return nil, err + } + + alerts[h] = &Alert{ + Labels: lbs, + Annotations: annotations, + ActiveAt: ts, + State: StatePending, + Value: smpl.V, + } + } + + for h, a := range alerts { + // Check whether we already have alerting state for the identifying label set. + // Update the last value and annotations if so, create a new alert entry otherwise. + if alert, ok := r.active[h]; ok && alert.State != StateInactive { + alert.Value = a.Value + alert.Annotations = a.Annotations + continue + } + + r.active[h] = a + } + + // Check if any pending alerts should be removed or fire now. Write out alert timeseries. + for fp, a := range r.active { + if _, ok := resultFPs[fp]; !ok { + // If the alert was previously firing, keep it around for a given + // retention time so it is reported as resolved to the AlertManager. + if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { + delete(r.active, fp) + } + if a.State != StateInactive { + a.State = StateInactive + a.ResolvedAt = ts + } + continue + } + + if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration { + a.State = StateFiring + a.FiredAt = ts + } + + if r.restored { + vec = append(vec, r.sample(a, ts)) + vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) + } + } + + // We have already acquired the lock above hence using SetHealth and + // SetLastError will deadlock. + r.health = HealthGood + r.lastError = err + return vec, nil +} + +// State returns the maximum state of alert instances for this rule. +// StateFiring > StatePending > StateInactive +func (r *AlertingRule) State() AlertState { + r.mtx.Lock() + defer r.mtx.Unlock() + + maxState := StateInactive + for _, a := range r.active { + if a.State > maxState { + maxState = a.State + } + } + return maxState +} + +// ActiveAlerts returns a slice of active alerts. +func (r *AlertingRule) ActiveAlerts() []*Alert { + var res []*Alert + for _, a := range r.currentAlerts() { + if a.ResolvedAt.IsZero() { + res = append(res, a) + } + } + return res +} + +// currentAlerts returns all instances of alerts for this rule. This may include +// inactive alerts that were previously firing. +func (r *AlertingRule) currentAlerts() []*Alert { + r.mtx.Lock() + defer r.mtx.Unlock() + + alerts := make([]*Alert, 0, len(r.active)) + + for _, a := range r.active { + anew := *a + alerts = append(alerts, &anew) + } + return alerts +} + +// ForEachActiveAlert runs the given function on each alert. +// This should be used when you want to use the actual alerts from the AlertingRule +// and not on its copy. +// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'. +func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { + r.mtx.Lock() + defer r.mtx.Unlock() + + for _, a := range r.active { + f(a) + } +} + +func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { + alerts := []*Alert{} + r.ForEachActiveAlert(func(alert *Alert) { + if alert.needsSending(ts, resendDelay) { + alert.LastSentAt = ts + // Allow for a couple Eval or Alertmanager send failures + delta := resendDelay + if interval > resendDelay { + delta = interval + } + alert.ValidUntil = ts.Add(3 * delta) + anew := *alert + alerts = append(alerts, &anew) + } + }) + notifyFunc(ctx, r.vector.String(), alerts...) +} + +func (r *AlertingRule) String() string { + ar := rulefmt.Rule{ + Alert: r.name, + Expr: r.vector.String(), + For: model.Duration(r.holdDuration), + Labels: r.labels.Map(), + Annotations: r.annotations.Map(), + } + + byt, err := yaml.Marshal(ar) + if err != nil { + return fmt.Sprintf("error marshaling alerting rule: %s", err.Error()) + } + + return string(byt) +} + +// HTMLSnippet returns an HTML snippet representing this alerting rule. The +// resulting snippet is expected to be presented in a
 element, so that
+// line breaks and other returned whitespace is respected.
+func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
+	alertMetric := model.Metric{
+		model.MetricNameLabel: alertMetricName,
+		alertNameLabel:        model.LabelValue(r.name),
+	}
+
+	labelsMap := make(map[string]string, len(r.labels))
+	for _, l := range r.labels {
+		labelsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
+	}
+
+	annotationsMap := make(map[string]string, len(r.annotations))
+	for _, l := range r.annotations {
+		annotationsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
+	}
+
+	ar := rulefmt.Rule{
+		Alert:       fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
+		Expr:        fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
+		For:         model.Duration(r.holdDuration),
+		Labels:      labelsMap,
+		Annotations: annotationsMap,
+	}
+
+	byt, err := yaml.Marshal(ar)
+	if err != nil {
+		return html_template.HTML(fmt.Sprintf("error marshaling alerting rule: %q", html_template.HTMLEscapeString(err.Error())))
+	}
+	return html_template.HTML(byt)
+}
diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e549fef6778372e4b3fa29e2d76a34a2d730dde
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/rules/manager.go
@@ -0,0 +1,1079 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+	"context"
+	html_template "html/template"
+	"math"
+	"net/url"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/common/model"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/rulefmt"
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/pkg/value"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/storage"
+)
+
+// RuleHealth describes the health state of a rule.
+type RuleHealth string
+
+// The possible health states of a rule based on the last execution.
+const (
+	HealthUnknown RuleHealth = "unknown"
+	HealthGood    RuleHealth = "ok"
+	HealthBad     RuleHealth = "err"
+)
+
+// Constants for instrumentation.
+const namespace = "prometheus"
+
+// Metrics for rule evaluation.
+type Metrics struct {
+	evalDuration        prometheus.Summary
+	iterationDuration   prometheus.Summary
+	iterationsMissed    prometheus.Counter
+	iterationsScheduled prometheus.Counter
+	evalTotal           *prometheus.CounterVec
+	evalFailures        *prometheus.CounterVec
+	groupInterval       *prometheus.GaugeVec
+	groupLastEvalTime   *prometheus.GaugeVec
+	groupLastDuration   *prometheus.GaugeVec
+	groupRules          *prometheus.GaugeVec
+}
+
+// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
+// if not nil.
+func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
+	m := &Metrics{
+		evalDuration: prometheus.NewSummary(
+			prometheus.SummaryOpts{
+				Namespace:  namespace,
+				Name:       "rule_evaluation_duration_seconds",
+				Help:       "The duration for a rule to execute.",
+				Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+			}),
+		iterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
+			Namespace:  namespace,
+			Name:       "rule_group_duration_seconds",
+			Help:       "The duration of rule group evaluations.",
+			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
+		}),
+		iterationsMissed: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: namespace,
+			Name:      "rule_group_iterations_missed_total",
+			Help:      "The total number of rule group evaluations missed due to slow rule group evaluation.",
+		}),
+		iterationsScheduled: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: namespace,
+			Name:      "rule_group_iterations_total",
+			Help:      "The total number of scheduled rule group evaluations, whether executed or missed.",
+		}),
+		evalTotal: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: namespace,
+				Name:      "rule_evaluations_total",
+				Help:      "The total number of rule evaluations.",
+			},
+			[]string{"rule_group"},
+		),
+		evalFailures: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: namespace,
+				Name:      "rule_evaluation_failures_total",
+				Help:      "The total number of rule evaluation failures.",
+			},
+			[]string{"rule_group"},
+		),
+		groupInterval: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_interval_seconds",
+				Help:      "The interval of a rule group.",
+			},
+			[]string{"rule_group"},
+		),
+		groupLastEvalTime: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_last_evaluation_timestamp_seconds",
+				Help:      "The timestamp of the last rule group evaluation in seconds.",
+			},
+			[]string{"rule_group"},
+		),
+		groupLastDuration: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_last_duration_seconds",
+				Help:      "The duration of the last rule group evaluation.",
+			},
+			[]string{"rule_group"},
+		),
+		groupRules: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_rules",
+				Help:      "The number of rules.",
+			},
+			[]string{"rule_group"},
+		),
+	}
+
+	if reg != nil {
+		reg.MustRegister(
+			m.evalDuration,
+			m.iterationDuration,
+			m.iterationsMissed,
+			m.iterationsScheduled,
+			m.evalTotal,
+			m.evalFailures,
+			m.groupInterval,
+			m.groupLastEvalTime,
+			m.groupLastDuration,
+			m.groupRules,
+		)
+	}
+
+	return m
+}
+
+// QueryFunc processes PromQL queries.
+type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
+
+// EngineQueryFunc returns a new query function that executes instant queries against
+// the given engine.
+// It converts scalar into vector results.
+func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
+	return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		q, err := engine.NewInstantQuery(q, qs, t)
+		if err != nil {
+			return nil, err
+		}
+		res := q.Exec(ctx)
+		if res.Err != nil {
+			return nil, res.Err
+		}
+		switch v := res.Value.(type) {
+		case promql.Vector:
+			return v, nil
+		case promql.Scalar:
+			return promql.Vector{promql.Sample{
+				Point:  promql.Point(v),
+				Metric: labels.Labels{},
+			}}, nil
+		default:
+			return nil, errors.New("rule result is not a vector or scalar")
+		}
+	}
+}
+
+// A Rule encapsulates a vector expression which is evaluated at a specified
+// interval and acted upon (currently either recorded or used for alerting).
+type Rule interface {
+	Name() string
+	// Labels of the rule.
+	Labels() labels.Labels
+	// eval evaluates the rule, including any associated recording or alerting actions.
+	Eval(context.Context, time.Time, QueryFunc, *url.URL) (promql.Vector, error)
+	// String returns a human-readable string representation of the rule.
+	String() string
+	// SetLastErr sets the current error experienced by the rule.
+	SetLastError(error)
+	// LastErr returns the last error experienced by the rule.
+	LastError() error
+	// SetHealth sets the current health of the rule.
+	SetHealth(RuleHealth)
+	// Health returns the current health of the rule.
+	Health() RuleHealth
+	SetEvaluationDuration(time.Duration)
+	// GetEvaluationDuration returns last evaluation duration.
+	// NOTE: Used dynamically by rules.html template.
+	GetEvaluationDuration() time.Duration
+	SetEvaluationTimestamp(time.Time)
+	// GetEvaluationTimestamp returns last evaluation timestamp.
+	// NOTE: Used dynamically by rules.html template.
+	GetEvaluationTimestamp() time.Time
+	// HTMLSnippet returns a human-readable string representation of the rule,
+	// decorated with HTML elements for use the web frontend.
+	HTMLSnippet(pathPrefix string) html_template.HTML
+}
+
+// Group is a set of rules that have a logical relation.
+type Group struct {
+	name                 string
+	file                 string
+	interval             time.Duration
+	rules                []Rule
+	seriesInPreviousEval []map[string]labels.Labels // One per Rule.
+	staleSeries          []labels.Labels
+	opts                 *ManagerOptions
+	mtx                  sync.Mutex
+	evaluationDuration   time.Duration
+	evaluationTimestamp  time.Time
+
+	shouldRestore bool
+
+	markStale   bool
+	done        chan struct{}
+	terminated  chan struct{}
+	managerDone chan struct{}
+
+	logger log.Logger
+
+	metrics *Metrics
+}
+
+type GroupOptions struct {
+	Name, File    string
+	Interval      time.Duration
+	Rules         []Rule
+	ShouldRestore bool
+	Opts          *ManagerOptions
+	done          chan struct{}
+}
+
+// NewGroup makes a new Group with the given name, options, and rules.
+func NewGroup(o GroupOptions) *Group {
+	metrics := o.Opts.Metrics
+	if metrics == nil {
+		metrics = NewGroupMetrics(o.Opts.Registerer)
+	}
+
+	key := groupKey(o.File, o.Name)
+	metrics.evalTotal.WithLabelValues(key)
+	metrics.evalFailures.WithLabelValues(key)
+	metrics.groupLastEvalTime.WithLabelValues(key)
+	metrics.groupLastDuration.WithLabelValues(key)
+	metrics.groupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
+	metrics.groupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
+
+	return &Group{
+		name:                 o.Name,
+		file:                 o.File,
+		interval:             o.Interval,
+		rules:                o.Rules,
+		shouldRestore:        o.ShouldRestore,
+		opts:                 o.Opts,
+		seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
+		done:                 make(chan struct{}),
+		managerDone:          o.done,
+		terminated:           make(chan struct{}),
+		logger:               log.With(o.Opts.Logger, "group", o.Name),
+		metrics:              metrics,
+	}
+}
+
+// Name returns the group name.
+func (g *Group) Name() string { return g.name }
+
+// File returns the group's file.
+func (g *Group) File() string { return g.file }
+
+// Rules returns the group's rules.
+func (g *Group) Rules() []Rule { return g.rules }
+
+// Interval returns the group's interval.
+func (g *Group) Interval() time.Duration { return g.interval }
+
+func (g *Group) run(ctx context.Context) {
+	defer close(g.terminated)
+
+	// Wait an initial amount to have consistently slotted intervals.
+	evalTimestamp := g.evalTimestamp().Add(g.interval)
+	select {
+	case <-time.After(time.Until(evalTimestamp)):
+	case <-g.done:
+		return
+	}
+
+	ctx = promql.NewOriginContext(ctx, map[string]interface{}{
+		"ruleGroup": map[string]string{
+			"file": g.File(),
+			"name": g.Name(),
+		},
+	})
+
+	iter := func() {
+		g.metrics.iterationsScheduled.Inc()
+
+		start := time.Now()
+		g.Eval(ctx, evalTimestamp)
+		timeSinceStart := time.Since(start)
+
+		g.metrics.iterationDuration.Observe(timeSinceStart.Seconds())
+		g.setEvaluationDuration(timeSinceStart)
+		g.setEvaluationTimestamp(start)
+	}
+
+	// The assumption here is that since the ticker was started after having
+	// waited for `evalTimestamp` to pass, the ticks will trigger soon
+	// after each `evalTimestamp + N * g.interval` occurrence.
+	tick := time.NewTicker(g.interval)
+	defer tick.Stop()
+
+	defer func() {
+		if !g.markStale {
+			return
+		}
+		go func(now time.Time) {
+			for _, rule := range g.seriesInPreviousEval {
+				for _, r := range rule {
+					g.staleSeries = append(g.staleSeries, r)
+				}
+			}
+			// That can be garbage collected at this point.
+			g.seriesInPreviousEval = nil
+			// Wait for 2 intervals to give the opportunity to renamed rules
+			// to insert new series in the tsdb. At this point if there is a
+			// renamed rule, it should already be started.
+			select {
+			case <-g.managerDone:
+			case <-time.After(2 * g.interval):
+				g.cleanupStaleSeries(now)
+			}
+		}(time.Now())
+	}()
+
+	iter()
+	if g.shouldRestore {
+		// If we have to restore, we wait for another Eval to finish.
+		// The reason behind this is, during first eval (or before it)
+		// we might not have enough data scraped, and recording rules would not
+		// have updated the latest values, on which some alerts might depend.
+		select {
+		case <-g.done:
+			return
+		case <-tick.C:
+			missed := (time.Since(evalTimestamp) / g.interval) - 1
+			if missed > 0 {
+				g.metrics.iterationsMissed.Add(float64(missed))
+				g.metrics.iterationsScheduled.Add(float64(missed))
+			}
+			evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
+			iter()
+		}
+
+		g.RestoreForState(time.Now())
+		g.shouldRestore = false
+	}
+
+	for {
+		select {
+		case <-g.done:
+			return
+		default:
+			select {
+			case <-g.done:
+				return
+			case <-tick.C:
+				missed := (time.Since(evalTimestamp) / g.interval) - 1
+				if missed > 0 {
+					g.metrics.iterationsMissed.Add(float64(missed))
+					g.metrics.iterationsScheduled.Add(float64(missed))
+				}
+				evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
+				iter()
+			}
+		}
+	}
+}
+
+func (g *Group) stop() {
+	close(g.done)
+	<-g.terminated
+}
+
+func (g *Group) hash() uint64 {
+	l := labels.New(
+		labels.Label{Name: "name", Value: g.name},
+		labels.Label{Name: "file", Value: g.file},
+	)
+	return l.Hash()
+}
+
+// AlertingRules returns the list of the group's alerting rules.
+func (g *Group) AlertingRules() []*AlertingRule {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+
+	var alerts []*AlertingRule
+	for _, rule := range g.rules {
+		if alertingRule, ok := rule.(*AlertingRule); ok {
+			alerts = append(alerts, alertingRule)
+		}
+	}
+	sort.Slice(alerts, func(i, j int) bool {
+		return alerts[i].State() > alerts[j].State() ||
+			(alerts[i].State() == alerts[j].State() &&
+				alerts[i].Name() < alerts[j].Name())
+	})
+	return alerts
+}
+
+// HasAlertingRules returns true if the group contains at least one AlertingRule.
+func (g *Group) HasAlertingRules() bool {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+
+	for _, rule := range g.rules {
+		if _, ok := rule.(*AlertingRule); ok {
+			return true
+		}
+	}
+	return false
+}
+
+// GetEvaluationDuration returns the time in seconds it took to evaluate the rule group.
+func (g *Group) GetEvaluationDuration() time.Duration {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	return g.evaluationDuration
+}
+
+// setEvaluationDuration sets the time in seconds the last evaluation took.
+func (g *Group) setEvaluationDuration(dur time.Duration) {
+	g.metrics.groupLastDuration.WithLabelValues(groupKey(g.file, g.name)).Set(dur.Seconds())
+
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	g.evaluationDuration = dur
+}
+
+// GetEvaluationTimestamp returns the time the last evaluation of the rule group took place.
+func (g *Group) GetEvaluationTimestamp() time.Time {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	return g.evaluationTimestamp
+}
+
+// setEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
+func (g *Group) setEvaluationTimestamp(ts time.Time) {
+	g.metrics.groupLastEvalTime.WithLabelValues(groupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
+
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	g.evaluationTimestamp = ts
+}
+
+// evalTimestamp returns the immediately preceding consistently slotted evaluation time.
+func (g *Group) evalTimestamp() time.Time {
+	var (
+		offset = int64(g.hash() % uint64(g.interval))
+		now    = time.Now().UnixNano()
+		adjNow = now - offset
+		base   = adjNow - (adjNow % int64(g.interval))
+	)
+
+	return time.Unix(0, base+offset).UTC()
+}
+
+func nameAndLabels(rule Rule) string {
+	return rule.Name() + rule.Labels().String()
+}
+
+// CopyState copies the alerting rule and staleness related state from the given group.
+//
+// Rules are matched based on their name and labels. If there are duplicates, the
+// first is matched with the first, second with the second etc.
+func (g *Group) CopyState(from *Group) {
+	g.evaluationDuration = from.evaluationDuration
+
+	ruleMap := make(map[string][]int, len(from.rules))
+
+	for fi, fromRule := range from.rules {
+		nameAndLabels := nameAndLabels(fromRule)
+		l := ruleMap[nameAndLabels]
+		ruleMap[nameAndLabels] = append(l, fi)
+	}
+
+	for i, rule := range g.rules {
+		nameAndLabels := nameAndLabels(rule)
+		indexes := ruleMap[nameAndLabels]
+		if len(indexes) == 0 {
+			continue
+		}
+		fi := indexes[0]
+		g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
+		ruleMap[nameAndLabels] = indexes[1:]
+
+		ar, ok := rule.(*AlertingRule)
+		if !ok {
+			continue
+		}
+		far, ok := from.rules[fi].(*AlertingRule)
+		if !ok {
+			continue
+		}
+
+		for fp, a := range far.active {
+			ar.active[fp] = a
+		}
+	}
+
+	// Handle deleted and unmatched duplicate rules.
+	g.staleSeries = from.staleSeries
+	for fi, fromRule := range from.rules {
+		nameAndLabels := nameAndLabels(fromRule)
+		l := ruleMap[nameAndLabels]
+		if len(l) != 0 {
+			for _, series := range from.seriesInPreviousEval[fi] {
+				g.staleSeries = append(g.staleSeries, series)
+			}
+		}
+	}
+}
+
+// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
+func (g *Group) Eval(ctx context.Context, ts time.Time) {
+	for i, rule := range g.rules {
+		select {
+		case <-g.done:
+			return
+		default:
+		}
+
+		func(i int, rule Rule) {
+			sp, ctx := opentracing.StartSpanFromContext(ctx, "rule")
+			sp.SetTag("name", rule.Name())
+			defer func(t time.Time) {
+				sp.Finish()
+
+				since := time.Since(t)
+				g.metrics.evalDuration.Observe(since.Seconds())
+				rule.SetEvaluationDuration(since)
+				rule.SetEvaluationTimestamp(t)
+			}(time.Now())
+
+			g.metrics.evalTotal.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
+
+			vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
+			if err != nil {
+				// Canceled queries are intentional termination of queries. This normally
+				// happens on shutdown and thus we skip logging of any errors here.
+				if _, ok := err.(promql.ErrQueryCanceled); !ok {
+					level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
+				}
+				g.metrics.evalFailures.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
+				return
+			}
+
+			if ar, ok := rule.(*AlertingRule); ok {
+				ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
+			}
+			var (
+				numOutOfOrder = 0
+				numDuplicates = 0
+			)
+
+			app := g.opts.Appendable.Appender()
+			seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
+			defer func() {
+				if err := app.Commit(); err != nil {
+					level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err)
+					return
+				}
+				g.seriesInPreviousEval[i] = seriesReturned
+			}()
+			for _, s := range vector {
+				if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
+					switch errors.Cause(err) {
+					case storage.ErrOutOfOrderSample:
+						numOutOfOrder++
+						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					case storage.ErrDuplicateSampleForTimestamp:
+						numDuplicates++
+						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					default:
+						level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					}
+				} else {
+					seriesReturned[s.Metric.String()] = s.Metric
+				}
+			}
+			if numOutOfOrder > 0 {
+				level.Warn(g.logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
+			}
+			if numDuplicates > 0 {
+				level.Warn(g.logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
+			}
+
+			for metric, lset := range g.seriesInPreviousEval[i] {
+				if _, ok := seriesReturned[metric]; !ok {
+					// Series no longer exposed, mark it stale.
+					_, err = app.Add(lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+					switch errors.Cause(err) {
+					case nil:
+					case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
+						// Do not count these in logging, as this is expected if series
+						// is exposed from a different rule.
+					default:
+						level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", metric, "err", err)
+					}
+				}
+			}
+		}(i, rule)
+	}
+	g.cleanupStaleSeries(ts)
+}
+
+func (g *Group) cleanupStaleSeries(ts time.Time) {
+	if len(g.staleSeries) == 0 {
+		return
+	}
+	app := g.opts.Appendable.Appender()
+	for _, s := range g.staleSeries {
+		// Rule that produced series no longer configured, mark it stale.
+		_, err := app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+		switch errors.Cause(err) {
+		case nil:
+		case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
+			// Do not count these in logging, as this is expected if series
+			// is exposed from a different rule.
+		default:
+			level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err)
+		}
+	}
+	if err := app.Commit(); err != nil {
+		level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err)
+	} else {
+		g.staleSeries = nil
+	}
+}
+
+// RestoreForState restores the 'for' state of the alerts
+// by looking up last ActiveAt from storage.
+func (g *Group) RestoreForState(ts time.Time) {
+	maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
+	// We allow restoration only if alerts were active before after certain time.
+	mint := ts.Add(-g.opts.OutageTolerance)
+	mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
+	q, err := g.opts.TSDB.Querier(g.opts.Context, mintMS, maxtMS)
+	if err != nil {
+		level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
+		return
+	}
+	defer func() {
+		if err := q.Close(); err != nil {
+			level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err)
+		}
+	}()
+
+	for _, rule := range g.Rules() {
+		alertRule, ok := rule.(*AlertingRule)
+		if !ok {
+			continue
+		}
+
+		alertHoldDuration := alertRule.HoldDuration()
+		if alertHoldDuration < g.opts.ForGracePeriod {
+			// If alertHoldDuration is already less than grace period, we would not
+			// like to make it wait for `g.opts.ForGracePeriod` time before firing.
+			// Hence we skip restoration, which will make it wait for alertHoldDuration.
+			alertRule.SetRestored(true)
+			continue
+		}
+
+		alertRule.ForEachActiveAlert(func(a *Alert) {
+			smpl := alertRule.forStateSample(a, time.Now(), 0)
+			var matchers []*labels.Matcher
+			for _, l := range smpl.Metric {
+				mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
+				if err != nil {
+					panic(err)
+				}
+				matchers = append(matchers, mt)
+			}
+
+			sset, err, _ := q.Select(false, nil, matchers...)
+			if err != nil {
+				level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
+					labels.AlertName, alertRule.Name(), "stage", "Select", "err", err)
+				return
+			}
+
+			seriesFound := false
+			var s storage.Series
+			for sset.Next() {
+				// Query assures that smpl.Metric is included in sset.At().Labels(),
+				// hence just checking the length would act like equality.
+				// (This is faster than calling labels.Compare again as we already have some info).
+				if len(sset.At().Labels()) == len(smpl.Metric) {
+					s = sset.At()
+					seriesFound = true
+					break
+				}
+			}
+
+			if !seriesFound {
+				return
+			}
+
+			// Series found for the 'for' state.
+			var t int64
+			var v float64
+			it := s.Iterator()
+			for it.Next() {
+				t, v = it.At()
+			}
+			if it.Err() != nil {
+				level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
+					labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
+				return
+			}
+			if value.IsStaleNaN(v) { // Alert was not active.
+				return
+			}
+
+			downAt := time.Unix(t/1000, 0).UTC()
+			restoredActiveAt := time.Unix(int64(v), 0).UTC()
+			timeSpentPending := downAt.Sub(restoredActiveAt)
+			timeRemainingPending := alertHoldDuration - timeSpentPending
+
+			if timeRemainingPending <= 0 {
+				// It means that alert was firing when prometheus went down.
+				// In the next Eval, the state of this alert will be set back to
+				// firing again if it's still firing in that Eval.
+				// Nothing to be done in this case.
+			} else if timeRemainingPending < g.opts.ForGracePeriod {
+				// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
+				//                            /* new firing time */      /* moving back by hold duration */
+				//
+				// Proof of correctness:
+				// firingTime = restoredActiveAt.Add(alertHoldDuration)
+				//            = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
+				//            = ts + m.opts.ForGracePeriod
+				//
+				// Time remaining to fire = firingTime.Sub(ts)
+				//                        = (ts + m.opts.ForGracePeriod) - ts
+				//                        = m.opts.ForGracePeriod
+				restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
+			} else {
+				// By shifting ActiveAt to the future (ActiveAt + some_duration),
+				// the total pending time from the original ActiveAt
+				// would be `alertHoldDuration + some_duration`.
+				// Here, some_duration = downDuration.
+				downDuration := ts.Sub(downAt)
+				restoredActiveAt = restoredActiveAt.Add(downDuration)
+			}
+
+			a.ActiveAt = restoredActiveAt
+			level.Debug(g.logger).Log("msg", "'for' state restored",
+				labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
+				"labels", a.Labels.String())
+
+		})
+
+		alertRule.SetRestored(true)
+	}
+
+}
+
+// Equals return if two groups are the same.
+func (g *Group) Equals(ng *Group) bool {
+	if g.name != ng.name {
+		return false
+	}
+
+	if g.file != ng.file {
+		return false
+	}
+
+	if g.interval != ng.interval {
+		return false
+	}
+
+	if len(g.rules) != len(ng.rules) {
+		return false
+	}
+
+	for i, gr := range g.rules {
+		if gr.String() != ng.rules[i].String() {
+			return false
+		}
+	}
+
+	return true
+}
+
+// The Manager manages recording and alerting rules.
+type Manager struct {
+	opts     *ManagerOptions
+	groups   map[string]*Group
+	mtx      sync.RWMutex
+	block    chan struct{}
+	done     chan struct{}
+	restored bool
+
+	logger log.Logger
+}
+
+// NotifyFunc sends notifications about a set of alerts generated by the given expression.
+type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
+
+// ManagerOptions bundles options for the Manager.
+type ManagerOptions struct {
+	ExternalURL     *url.URL
+	QueryFunc       QueryFunc
+	NotifyFunc      NotifyFunc
+	Context         context.Context
+	Appendable      storage.Appendable
+	TSDB            storage.Storage
+	Logger          log.Logger
+	Registerer      prometheus.Registerer
+	OutageTolerance time.Duration
+	ForGracePeriod  time.Duration
+	ResendDelay     time.Duration
+
+	Metrics *Metrics
+}
+
+// NewManager returns an implementation of Manager, ready to be started
+// by calling the Run method.
+func NewManager(o *ManagerOptions) *Manager {
+	if o.Metrics == nil {
+		o.Metrics = NewGroupMetrics(o.Registerer)
+	}
+
+	m := &Manager{
+		groups: map[string]*Group{},
+		opts:   o,
+		block:  make(chan struct{}),
+		done:   make(chan struct{}),
+		logger: o.Logger,
+	}
+
+	o.Metrics.iterationsMissed.Inc()
+	return m
+}
+
+// Run starts processing of the rule manager.
+func (m *Manager) Run() {
+	close(m.block)
+}
+
+// Stop the rule manager's rule evaluation cycles.
+func (m *Manager) Stop() {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	level.Info(m.logger).Log("msg", "Stopping rule manager...")
+
+	for _, eg := range m.groups {
+		eg.stop()
+	}
+
+	// Shut down the groups waiting multiple evaluation intervals to write
+	// staleness markers.
+	close(m.done)
+
+	level.Info(m.logger).Log("msg", "Rule manager stopped")
+}
+
+// Update the rule manager's state as the config requires. If
+// loading the new rules failed the old rule set is restored.
+func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels) error {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	groups, errs := m.LoadGroups(interval, externalLabels, files...)
+	if errs != nil {
+		for _, e := range errs {
+			level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
+		}
+		return errors.New("error loading rules, previous rule set restored")
+	}
+	m.restored = true
+
+	var wg sync.WaitGroup
+	for _, newg := range groups {
+		// If there is an old group with the same identifier,
+		// check if new group equals with the old group, if yes then skip it.
+		// If not equals, stop it and wait for it to finish the current iteration.
+		// Then copy it into the new group.
+		gn := groupKey(newg.file, newg.name)
+		oldg, ok := m.groups[gn]
+		delete(m.groups, gn)
+
+		if ok && oldg.Equals(newg) {
+			groups[gn] = oldg
+			continue
+		}
+
+		wg.Add(1)
+		go func(newg *Group) {
+			if ok {
+				oldg.stop()
+				newg.CopyState(oldg)
+			}
+			go func() {
+				// Wait with starting evaluation until the rule manager
+				// is told to run. This is necessary to avoid running
+				// queries against a bootstrapping storage.
+				<-m.block
+				newg.run(m.opts.Context)
+			}()
+			wg.Done()
+		}(newg)
+	}
+
+	// Stop remaining old groups.
+	wg.Add(len(m.groups))
+	for n, oldg := range m.groups {
+		go func(n string, g *Group) {
+			g.markStale = true
+			g.stop()
+			if m := g.metrics; m != nil {
+				m.evalTotal.DeleteLabelValues(n)
+				m.evalFailures.DeleteLabelValues(n)
+				m.groupInterval.DeleteLabelValues(n)
+				m.groupLastEvalTime.DeleteLabelValues(n)
+				m.groupLastDuration.DeleteLabelValues(n)
+				m.groupRules.DeleteLabelValues(n)
+			}
+			wg.Done()
+		}(n, oldg)
+	}
+
+	wg.Wait()
+	m.groups = groups
+
+	return nil
+}
+
+// LoadGroups reads groups from a list of files.
+func (m *Manager) LoadGroups(
+	interval time.Duration, externalLabels labels.Labels, filenames ...string,
+) (map[string]*Group, []error) {
+	groups := make(map[string]*Group)
+
+	shouldRestore := !m.restored
+
+	for _, fn := range filenames {
+		rgs, errs := rulefmt.ParseFile(fn)
+		if errs != nil {
+			return nil, errs
+		}
+
+		for _, rg := range rgs.Groups {
+			itv := interval
+			if rg.Interval != 0 {
+				itv = time.Duration(rg.Interval)
+			}
+
+			rules := make([]Rule, 0, len(rg.Rules))
+			for _, r := range rg.Rules {
+				expr, err := parser.ParseExpr(r.Expr.Value)
+				if err != nil {
+					return nil, []error{errors.Wrap(err, fn)}
+				}
+
+				if r.Alert.Value != "" {
+					rules = append(rules, NewAlertingRule(
+						r.Alert.Value,
+						expr,
+						time.Duration(r.For),
+						labels.FromMap(r.Labels),
+						labels.FromMap(r.Annotations),
+						externalLabels,
+						m.restored,
+						log.With(m.logger, "alert", r.Alert),
+					))
+					continue
+				}
+				rules = append(rules, NewRecordingRule(
+					r.Record.Value,
+					expr,
+					labels.FromMap(r.Labels),
+				))
+			}
+
+			groups[groupKey(fn, rg.Name)] = NewGroup(GroupOptions{
+				Name:          rg.Name,
+				File:          fn,
+				Interval:      itv,
+				Rules:         rules,
+				ShouldRestore: shouldRestore,
+				Opts:          m.opts,
+				done:          m.done,
+			})
+		}
+	}
+
+	return groups, nil
+}
+
+// Group names need not be unique across filenames.
+func groupKey(file, name string) string {
+	return file + ";" + name
+}
+
+// RuleGroups returns the list of manager's rule groups.
+func (m *Manager) RuleGroups() []*Group {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	rgs := make([]*Group, 0, len(m.groups))
+	for _, g := range m.groups {
+		rgs = append(rgs, g)
+	}
+
+	sort.Slice(rgs, func(i, j int) bool {
+		if rgs[i].file != rgs[j].file {
+			return rgs[i].file < rgs[j].file
+		}
+		return rgs[i].name < rgs[j].name
+	})
+
+	return rgs
+}
+
+// Rules returns the list of the manager's rules.
+func (m *Manager) Rules() []Rule {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	var rules []Rule
+	for _, g := range m.groups {
+		rules = append(rules, g.rules...)
+	}
+
+	return rules
+}
+
+// AlertingRules returns the list of the manager's alerting rules.
+func (m *Manager) AlertingRules() []*AlertingRule {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	alerts := []*AlertingRule{}
+	for _, rule := range m.Rules() {
+		if alertingRule, ok := rule.(*AlertingRule); ok {
+			alerts = append(alerts, alertingRule)
+		}
+	}
+
+	return alerts
+}
diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go
new file mode 100644
index 0000000000000000000000000000000000000000..cec1a0fad488e4a088d6daa8197417e9d10f2bd5
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/rules/recording.go
@@ -0,0 +1,203 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+	"context"
+	"fmt"
+	"html/template"
+	"net/url"
+	"sync"
+	"time"
+
+	yaml "gopkg.in/yaml.v2"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/rulefmt"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/util/strutil"
+)
+
+// A RecordingRule records its vector expression into new timeseries.
+type RecordingRule struct {
+	name   string
+	vector parser.Expr
+	labels labels.Labels
+	// Protects the below.
+	mtx sync.Mutex
+	// The health of the recording rule.
+	health RuleHealth
+	// Timestamp of last evaluation of the recording rule.
+	evaluationTimestamp time.Time
+	// The last error seen by the recording rule.
+	lastError error
+	// Duration of how long it took to evaluate the recording rule.
+	evaluationDuration time.Duration
+}
+
+// NewRecordingRule returns a new recording rule.
+func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule {
+	return &RecordingRule{
+		name:   name,
+		vector: vector,
+		health: HealthUnknown,
+		labels: lset,
+	}
+}
+
+// Name returns the rule name.
+func (rule *RecordingRule) Name() string {
+	return rule.name
+}
+
+// Query returns the rule query expression.
+func (rule *RecordingRule) Query() parser.Expr {
+	return rule.vector
+}
+
+// Labels returns the rule labels.
+func (rule *RecordingRule) Labels() labels.Labels {
+	return rule.labels
+}
+
+// Eval evaluates the rule and then overrides the metric names and labels accordingly.
+func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL) (promql.Vector, error) {
+	vector, err := query(ctx, rule.vector.String(), ts)
+	if err != nil {
+		rule.SetHealth(HealthBad)
+		rule.SetLastError(err)
+		return nil, err
+	}
+	// Override the metric name and labels.
+	for i := range vector {
+		sample := &vector[i]
+
+		lb := labels.NewBuilder(sample.Metric)
+
+		lb.Set(labels.MetricName, rule.name)
+
+		for _, l := range rule.labels {
+			lb.Set(l.Name, l.Value)
+		}
+
+		sample.Metric = lb.Labels()
+	}
+
+	// Check that the rule does not produce identical metrics after applying
+	// labels.
+	if vector.ContainsSameLabelset() {
+		err = fmt.Errorf("vector contains metrics with the same labelset after applying rule labels")
+		rule.SetHealth(HealthBad)
+		rule.SetLastError(err)
+		return nil, err
+	}
+
+	rule.SetHealth(HealthGood)
+	rule.SetLastError(err)
+	return vector, nil
+}
+
+func (rule *RecordingRule) String() string {
+	r := rulefmt.Rule{
+		Record: rule.name,
+		Expr:   rule.vector.String(),
+		Labels: rule.labels.Map(),
+	}
+
+	byt, err := yaml.Marshal(r)
+	if err != nil {
+		return fmt.Sprintf("error marshaling recording rule: %q", err.Error())
+	}
+
+	return string(byt)
+}
+
+// SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation.
+func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	rule.evaluationDuration = dur
+}
+
+// SetLastError sets the current error seen by the recording rule.
+func (rule *RecordingRule) SetLastError(err error) {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	rule.lastError = err
+}
+
+// LastError returns the last error seen by the recording rule.
+func (rule *RecordingRule) LastError() error {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	return rule.lastError
+}
+
+// SetHealth sets the current health of the recording rule.
+func (rule *RecordingRule) SetHealth(health RuleHealth) {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	rule.health = health
+}
+
+// Health returns the current health of the recording rule.
+func (rule *RecordingRule) Health() RuleHealth {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	return rule.health
+}
+
+// GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule.
+func (rule *RecordingRule) GetEvaluationDuration() time.Duration {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	return rule.evaluationDuration
+}
+
+// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
+func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	rule.evaluationTimestamp = ts
+}
+
+// GetEvaluationTimestamp returns the time the evaluation took place.
+func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
+	rule.mtx.Lock()
+	defer rule.mtx.Unlock()
+	return rule.evaluationTimestamp
+}
+
+// HTMLSnippet returns an HTML snippet representing this rule.
+func (rule *RecordingRule) HTMLSnippet(pathPrefix string) template.HTML {
+	ruleExpr := rule.vector.String()
+	labels := make(map[string]string, len(rule.labels))
+	for _, l := range rule.labels {
+		labels[l.Name] = template.HTMLEscapeString(l.Value)
+	}
+
+	r := rulefmt.Rule{
+		Record: fmt.Sprintf(`%s`, pathPrefix+strutil.TableLinkForExpression(rule.name), rule.name),
+		Expr:   fmt.Sprintf(`%s`, pathPrefix+strutil.TableLinkForExpression(ruleExpr), template.HTMLEscapeString(ruleExpr)),
+		Labels: labels,
+	}
+
+	byt, err := yaml.Marshal(r)
+	if err != nil {
+		return template.HTML(fmt.Sprintf("error marshaling recording rule: %q", template.HTMLEscapeString(err.Error())))
+	}
+
+	return template.HTML(byt)
+}
diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a31c4e54a4a373596173a1b6617019948ee0324
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/template/template.go
@@ -0,0 +1,364 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package template
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"math"
+	"net/url"
+	"regexp"
+	"sort"
+	"strings"
+	"time"
+
+	html_template "html/template"
+	text_template "text/template"
+
+	"github.com/pkg/errors"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/util/strutil"
+)
+
+var (
+	templateTextExpansionFailures = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_template_text_expansion_failures_total",
+		Help: "The total number of template text expansion failures.",
+	})
+	templateTextExpansionTotal = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_template_text_expansions_total",
+		Help: "The total number of template text expansions.",
+	})
+)
+
+func init() {
+	prometheus.MustRegister(templateTextExpansionFailures)
+	prometheus.MustRegister(templateTextExpansionTotal)
+}
+
+// A version of vector that's easier to use from templates.
+type sample struct {
+	Labels map[string]string
+	Value  float64
+}
+type queryResult []*sample
+
+type queryResultByLabelSorter struct {
+	results queryResult
+	by      string
+}
+
+func (q queryResultByLabelSorter) Len() int {
+	return len(q.results)
+}
+
+func (q queryResultByLabelSorter) Less(i, j int) bool {
+	return q.results[i].Labels[q.by] < q.results[j].Labels[q.by]
+}
+
+func (q queryResultByLabelSorter) Swap(i, j int) {
+	q.results[i], q.results[j] = q.results[j], q.results[i]
+}
+
+// QueryFunc executes a PromQL query at the given time.
+type QueryFunc func(context.Context, string, time.Time) (promql.Vector, error)
+
+func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (queryResult, error) {
+	vector, err := queryFn(ctx, q, ts)
+	if err != nil {
+		return nil, err
+	}
+
+	// promql.Vector is hard to work with in templates, so convert to
+	// base data types.
+	// TODO(fabxc): probably not true anymore after type rework.
+	var result = make(queryResult, len(vector))
+	for n, v := range vector {
+		s := sample{
+			Value:  v.V,
+			Labels: v.Metric.Map(),
+		}
+		result[n] = &s
+	}
+	return result, nil
+}
+
+// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
+type Expander struct {
+	text    string
+	name    string
+	data    interface{}
+	funcMap text_template.FuncMap
+}
+
+// NewTemplateExpander returns a template expander ready to use.
+func NewTemplateExpander(
+	ctx context.Context,
+	text string,
+	name string,
+	data interface{},
+	timestamp model.Time,
+	queryFunc QueryFunc,
+	externalURL *url.URL,
+) *Expander {
+	return &Expander{
+		text: text,
+		name: name,
+		data: data,
+		funcMap: text_template.FuncMap{
+			"query": func(q string) (queryResult, error) {
+				return query(ctx, q, timestamp.Time(), queryFunc)
+			},
+			"first": func(v queryResult) (*sample, error) {
+				if len(v) > 0 {
+					return v[0], nil
+				}
+				return nil, errors.New("first() called on vector with no elements")
+			},
+			"label": func(label string, s *sample) string {
+				return s.Labels[label]
+			},
+			"value": func(s *sample) float64 {
+				return s.Value
+			},
+			"strvalue": func(s *sample) string {
+				return s.Labels["__value__"]
+			},
+			"args": func(args ...interface{}) map[string]interface{} {
+				result := make(map[string]interface{})
+				for i, a := range args {
+					result[fmt.Sprintf("arg%d", i)] = a
+				}
+				return result
+			},
+			"reReplaceAll": func(pattern, repl, text string) string {
+				re := regexp.MustCompile(pattern)
+				return re.ReplaceAllString(text, repl)
+			},
+			"safeHtml": func(text string) html_template.HTML {
+				return html_template.HTML(text)
+			},
+			"match":     regexp.MatchString,
+			"title":     strings.Title,
+			"toUpper":   strings.ToUpper,
+			"toLower":   strings.ToLower,
+			"graphLink": strutil.GraphLinkForExpression,
+			"tableLink": strutil.TableLinkForExpression,
+			"sortByLabel": func(label string, v queryResult) queryResult {
+				sorter := queryResultByLabelSorter{v[:], label}
+				sort.Stable(sorter)
+				return v
+			},
+			"humanize": func(v float64) string {
+				if v == 0 || math.IsNaN(v) || math.IsInf(v, 0) {
+					return fmt.Sprintf("%.4g", v)
+				}
+				if math.Abs(v) >= 1 {
+					prefix := ""
+					for _, p := range []string{"k", "M", "G", "T", "P", "E", "Z", "Y"} {
+						if math.Abs(v) < 1000 {
+							break
+						}
+						prefix = p
+						v /= 1000
+					}
+					return fmt.Sprintf("%.4g%s", v, prefix)
+				}
+				prefix := ""
+				for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
+					if math.Abs(v) >= 1 {
+						break
+					}
+					prefix = p
+					v *= 1000
+				}
+				return fmt.Sprintf("%.4g%s", v, prefix)
+			},
+			"humanize1024": func(v float64) string {
+				if math.Abs(v) <= 1 || math.IsNaN(v) || math.IsInf(v, 0) {
+					return fmt.Sprintf("%.4g", v)
+				}
+				prefix := ""
+				for _, p := range []string{"ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} {
+					if math.Abs(v) < 1024 {
+						break
+					}
+					prefix = p
+					v /= 1024
+				}
+				return fmt.Sprintf("%.4g%s", v, prefix)
+			},
+			"humanizeDuration": func(v float64) string {
+				if math.IsNaN(v) || math.IsInf(v, 0) {
+					return fmt.Sprintf("%.4g", v)
+				}
+				if v == 0 {
+					return fmt.Sprintf("%.4gs", v)
+				}
+				if math.Abs(v) >= 1 {
+					sign := ""
+					if v < 0 {
+						sign = "-"
+						v = -v
+					}
+					seconds := int64(v) % 60
+					minutes := (int64(v) / 60) % 60
+					hours := (int64(v) / 60 / 60) % 24
+					days := int64(v) / 60 / 60 / 24
+					// For days to minutes, we display seconds as an integer.
+					if days != 0 {
+						return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds)
+					}
+					if hours != 0 {
+						return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds)
+					}
+					if minutes != 0 {
+						return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds)
+					}
+					// For seconds, we display 4 significant digits.
+					return fmt.Sprintf("%s%.4gs", sign, v)
+				}
+				prefix := ""
+				for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
+					if math.Abs(v) >= 1 {
+						break
+					}
+					prefix = p
+					v *= 1000
+				}
+				return fmt.Sprintf("%.4g%ss", v, prefix)
+			},
+			"humanizePercentage": func(v float64) string {
+				return fmt.Sprintf("%.4g%%", v*100)
+			},
+			"humanizeTimestamp": func(v float64) string {
+				if math.IsNaN(v) || math.IsInf(v, 0) {
+					return fmt.Sprintf("%.4g", v)
+				}
+				t := model.TimeFromUnixNano(int64(v * 1e9)).Time().UTC()
+				return fmt.Sprint(t)
+			},
+			"pathPrefix": func() string {
+				return externalURL.Path
+			},
+			"externalURL": func() string {
+				return externalURL.String()
+			},
+		},
+	}
+}
+
+// AlertTemplateData returns the interface to be used in expanding the template.
+func AlertTemplateData(labels map[string]string, externalLabels map[string]string, value float64) interface{} {
+	return struct {
+		Labels         map[string]string
+		ExternalLabels map[string]string
+		Value          float64
+	}{
+		Labels:         labels,
+		ExternalLabels: externalLabels,
+		Value:          value,
+	}
+}
+
+// Funcs adds the functions in fm to the Expander's function map.
+// Existing functions will be overwritten in case of conflict.
+func (te Expander) Funcs(fm text_template.FuncMap) {
+	for k, v := range fm {
+		te.funcMap[k] = v
+	}
+}
+
+// Expand expands a template in text (non-HTML) mode.
+func (te Expander) Expand() (result string, resultErr error) {
+	// It'd better to have no alert description than to kill the whole process
+	// if there's a bug in the template.
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			resultErr, ok = r.(error)
+			if !ok {
+				resultErr = errors.Errorf("panic expanding template %v: %v", te.name, r)
+			}
+		}
+		if resultErr != nil {
+			templateTextExpansionFailures.Inc()
+		}
+	}()
+
+	templateTextExpansionTotal.Inc()
+
+	tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
+	if err != nil {
+		return "", errors.Wrapf(err, "error parsing template %v", te.name)
+	}
+	var buffer bytes.Buffer
+	err = tmpl.Execute(&buffer, te.data)
+	if err != nil {
+		return "", errors.Wrapf(err, "error executing template %v", te.name)
+	}
+	return buffer.String(), nil
+}
+
+// ExpandHTML expands a template with HTML escaping, with templates read from the given files.
+func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr error) {
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			resultErr, ok = r.(error)
+			if !ok {
+				resultErr = errors.Errorf("panic expanding template %s: %v", te.name, r)
+			}
+		}
+	}()
+
+	tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
+	tmpl.Option("missingkey=zero")
+	tmpl.Funcs(html_template.FuncMap{
+		"tmpl": func(name string, data interface{}) (html_template.HTML, error) {
+			var buffer bytes.Buffer
+			err := tmpl.ExecuteTemplate(&buffer, name, data)
+			return html_template.HTML(buffer.String()), err
+		},
+	})
+	tmpl, err := tmpl.Parse(te.text)
+	if err != nil {
+		return "", errors.Wrapf(err, "error parsing template %v", te.name)
+	}
+	if len(templateFiles) > 0 {
+		_, err = tmpl.ParseFiles(templateFiles...)
+		if err != nil {
+			return "", errors.Wrapf(err, "error parsing template files for %v", te.name)
+		}
+	}
+	var buffer bytes.Buffer
+	err = tmpl.Execute(&buffer, te.data)
+	if err != nil {
+		return "", errors.Wrapf(err, "error executing template %v", te.name)
+	}
+	return buffer.String(), nil
+}
+
+// ParseTest parses the templates and returns the error if any.
+func (te Expander) ParseTest() error {
+	_, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/.gitignore b/vendor/github.com/prometheus/prometheus/tsdb/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..5530d1f6fa99d77c4c49164798349fb4b94ffb76
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/.gitignore
@@ -0,0 +1 @@
+benchout/
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..844ab5b37c691204f51a8c568410b11bc70f1cfb
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
@@ -0,0 +1,108 @@
+## master / unreleased
+
+## 0.10.0
+
+ - [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
+    - `DBReadOnly.Blocks()` exposes a slice of `BlockReader`s.
+    - `BlockReader` interface - removed MinTime/MaxTime methods and now exposes the full block meta via `Meta()`.
+ - [FEATURE] `chunkenc.Chunk.Iterator` method now takes a `chunkenc.Iterator` interface as an argument for reuse.
+
+## 0.9.1
+
+ - [CHANGE] LiveReader metrics are now injected rather than global.
+
+## 0.9.0
+
+ - [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609)
+ - [BUGFIX] Re-calculate block size when calling `block.Delete`.
+ - [BUGFIX] Re-encode all head chunks at compaction that are open (being appended to) or outside the Maxt block range. This avoids writing out corrupt data. It happens when snapshotting with the head included.
+ - [BUGFIX] Improved handling of multiple refs for the same series in WAL reading.
+ - [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure.
+ - [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before.
+ - [CHANGE] Create new clean segment when starting the WAL.
+ - [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`.
+ - [ENHANCEMENT] Improved atomicity of .tmp block replacement during compaction for usual case.
+ - [ENHANCEMENT] Improved postings intersection matching.
+ - [ENHANCEMENT] Reduced disk usage for WAL for small setups.
+ - [ENHANCEMENT] Optimize queries using regexp for set lookups.
+
+
+## 0.8.0
+
+ - [BUGFIX] Calling `Close` more than once on a querier returns an error instead of a panic.
+ - [BUGFIX] Don't panic and recover nicely when running out of disk space.
+ - [BUGFIX] Correctly handle empty labels.
+ - [BUGFIX] Don't crash on an unknown tombstone ref.
+ - [ENHANCEMENT] Re-add FromData function to create a chunk from bytes. It is used by Cortex and Thanos.
+ - [ENHANCEMENT] Simplify mergedPostings.Seek.
+ - [FEATURE]  Added `currentSegment` metric for the current WAL segment it is being written to.
+
+## 0.7.1
+
+ - [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek
+
+## 0.7.0
+
+ - [CHANGE] tsdb now requires golang 1.12 or higher.
+ - [REMOVED] `chunks.NewReader` is removed as it wasn't used anywhere.
+ - [REMOVED] `FromData` is considered unused so was removed.
+ - [FEATURE] Added option WALSegmentSize -1 to disable the WAL.
+ - [BUGFIX] Bugfix in selectOverlappingDirs. Only return the first overlapping blocks.
+ - [BUGFIX] Fsync the meta file to persist it on disk to avoid data loss in case of a host crash.
+ - [BUGFIX] Fix fd and vm_area leak on error path in chunks.NewDirReader.
+ - [BUGFIX] Fix fd and vm_area leak on error path in index.NewFileReader.
+ - [BUGFIX] Force persisting the tombstone file to avoid data loss in case of a host crash.
+ - [BUGFIX] Keep series that are still in WAL in checkpoints.
+ - [ENHANCEMENT] Fast path for EmptyPostings cases in Merge, Intersect and Without.
+ - [ENHANCEMENT] Be smarter in how we look at matchers.
+ - [ENHANCEMENT] PostListings and NotMatcher now public.
+
+## 0.6.1
+
+  - [BUGFIX] Update `last` after appending a non-overlapping chunk in `chunks.MergeOverlappingChunks`. [#539](https://github.com/prometheus/tsdb/pull/539)
+
+## 0.6.0
+
+  - [CHANGE] `AllowOverlappingBlock` is now `AllowOverlappingBlocks`.
+
+## 0.5.0
+
+ - [FEATURE] Time-overlapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370)
+   - Disabled by default and can be enabled via `AllowOverlappingBlock` option.
+   - Added `MergeChunks` function in `chunkenc/xor.go` to merge 2 time-overlapping chunks.
+   - Added `MergeOverlappingChunks` function in `chunks/chunks.go` to merge multiple time-overlapping Chunk Metas.
+   - Added `MinTime` and `MaxTime` method for `BlockReader`.
+ - [FEATURE] New `dump` command to tsdb tool to dump all samples.
+ - [FEATURE] New `encoding` package for common binary encoding/decoding helpers.
+    - Added to remove some code duplication.
+ - [ENHANCEMENT] When closing the db any running compaction will be cancelled so it doesn't block.
+   - `NewLeveledCompactor` takes a context.
+ - [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`.
+ - [BUGFIX] Improved Postings Merge performance. Fixes a regression from the previous release.
+ - [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs.
+
+## 0.4.0
+
+ - [CHANGE] New `WALSegmentSize` option to override the `DefaultOptions.WALSegmentSize`. Added to allow using smaller wal files. For example using tmpfs on a RPI to minimise the SD card wear out from the constant WAL writes. As part of this change the `DefaultOptions.WALSegmentSize` constant was also exposed.
+ - [CHANGE] Empty blocks are not written during compaction [#374](https://github.com/prometheus/tsdb/pull/374)
+ - [FEATURE]  Size base retention through `Options.MaxBytes`.  As part of this change:
+   - Added new metrics - `prometheus_tsdb_storage_blocks_bytes_total`, `prometheus_tsdb_size_retentions_total`, `prometheus_tsdb_time_retentions_total`
+   - New public interface `SizeReader: Size() int64`
+   - `OpenBlock` signature changed to take a logger.
+ - [REMOVED] `PrefixMatcher` is considered unused so was removed.
+ - [CLEANUP] `Options.WALFlushInterval` is removed as it wasn't used anywhere.
+ - [FEATURE] Add new `LiveReader` to WAL package. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read.
+
+## 0.3.1
+
+ - [BUGFIX] Fixed most windows test and some actual bugs for unclosed file readers.
+
+## 0.3.0
+
+ - [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path.
+ - [CHANGE] `NewSegmentsRangeReader()` can now read over multiple wal ranges by using the new `SegmentRange{}` struct.
+ - [CHANGE] `CorruptionErr{}` now also exposes the Segment `Dir` which is added when displaying any errors.
+ - [CHANGE] `Head.Init()` is changed to `Head.Init(minValidTime int64)`
+ - [CHANGE] `SymbolTable()` renamed to `SymbolTableSize()` to make the name consistent with the  `Block{ symbolTableSize uint64 }` field.
+ - [CHANGE] `wal.Reader{}` now exposes `Segment()` for the current segment being read  and `Offset()` for the current offset.
+ - [FEATURE] tsdbutil analyze subcomand to find churn, high cardinality, etc.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/README.md b/vendor/github.com/prometheus/prometheus/tsdb/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..61f867088203cf4add7ee9d180700f2adc177dde
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/README.md
@@ -0,0 +1,13 @@
+# TSDB 
+
+[![GoDoc](https://godoc.org/github.com/prometheus/prometheus/tsdb?status.svg)](https://godoc.org/github.com/prometheus/prometheus/tsdb)
+
+This repository contains the Prometheus storage layer that is used in its 2.x releases.
+
+A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/).
+
+Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
+
+Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/).
+
+See also the [format documentation](docs/format/README.md).
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go
new file mode 100644
index 0000000000000000000000000000000000000000..2d26f9038dda332a1cd93472fcb9607c860e1854
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go
@@ -0,0 +1,627 @@
+// Copyright 2017 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/oklog/ulid"
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/chunks"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+	"github.com/prometheus/prometheus/tsdb/index"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+)
+
+// IndexWriter serializes the index for a block of series data.
+// The methods must be called in the order they are specified in.
+type IndexWriter interface {
+	// AddSymbols registers all string symbols that are encountered in series
+	// and other indices. Symbols must be added in sorted order.
+	AddSymbol(sym string) error
+
+	// AddSeries populates the index writer with a series and its offsets
+	// of chunks that the index can reference.
+	// Implementations may require series to be insert in increasing order by
+	// their labels.
+	// The reference numbers are used to resolve entries in postings lists that
+	// are added later.
+	AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error
+
+	// Close writes any finalization and closes the resources associated with
+	// the underlying writer.
+	Close() error
+}
+
+// IndexReader provides reading access of serialized index data.
+type IndexReader interface {
+	// Symbols return an iterator over sorted string symbols that may occur in
+	// series' labels and indices. It is not safe to use the returned strings
+	// beyond the lifetime of the index reader.
+	Symbols() index.StringIter
+
+	// LabelValues returns sorted possible label values.
+	LabelValues(name string) ([]string, error)
+
+	// Postings returns the postings list iterator for the label pairs.
+	// The Postings here contain the offsets to the series inside the index.
+	// Found IDs are not strictly required to point to a valid Series, e.g.
+	// during background garbage collections. Input values must be sorted.
+	Postings(name string, values ...string) (index.Postings, error)
+
+	// SortedPostings returns a postings list that is reordered to be sorted
+	// by the label set of the underlying series.
+	SortedPostings(index.Postings) index.Postings
+
+	// Series populates the given labels and chunk metas for the series identified
+	// by the reference.
+	// Returns storage.ErrNotFound if the ref does not resolve to a known series.
+	Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error
+
+	// LabelNames returns all the unique label names present in the index in sorted order.
+	LabelNames() ([]string, error)
+
+	// Close releases the underlying resources of the reader.
+	Close() error
+}
+
+// ChunkWriter serializes a time block of chunked series data.
+type ChunkWriter interface {
+	// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
+	// must be populated.
+	// After returning successfully, the Ref fields in the ChunkMetas
+	// are set and can be used to retrieve the chunks from the written data.
+	WriteChunks(chunks ...chunks.Meta) error
+
+	// Close writes any required finalization and closes the resources
+	// associated with the underlying writer.
+	Close() error
+}
+
+// ChunkReader provides reading access of serialized time series data.
+type ChunkReader interface {
+	// Chunk returns the series data chunk with the given reference.
+	Chunk(ref uint64) (chunkenc.Chunk, error)
+
+	// Close releases all underlying resources of the reader.
+	Close() error
+}
+
+// BlockReader provides reading access to a data block.
+type BlockReader interface {
+	// Index returns an IndexReader over the block's data.
+	Index() (IndexReader, error)
+
+	// Chunks returns a ChunkReader over the block's data.
+	Chunks() (ChunkReader, error)
+
+	// Tombstones returns a tombstones.Reader over the block's deleted data.
+	Tombstones() (tombstones.Reader, error)
+
+	// Meta provides meta information about the block reader.
+	Meta() BlockMeta
+}
+
+// BlockMeta provides meta information about a block.
+type BlockMeta struct {
+	// Unique identifier for the block and its contents. Changes on compaction.
+	ULID ulid.ULID `json:"ulid"`
+
+	// MinTime and MaxTime specify the time range all samples
+	// in the block are in.
+	MinTime int64 `json:"minTime"`
+	MaxTime int64 `json:"maxTime"`
+
+	// Stats about the contents of the block.
+	Stats BlockStats `json:"stats,omitempty"`
+
+	// Information on compactions the block was created from.
+	Compaction BlockMetaCompaction `json:"compaction"`
+
+	// Version of the index format.
+	Version int `json:"version"`
+}
+
+// BlockStats contains stats about contents of a block.
+type BlockStats struct {
+	NumSamples    uint64 `json:"numSamples,omitempty"`
+	NumSeries     uint64 `json:"numSeries,omitempty"`
+	NumChunks     uint64 `json:"numChunks,omitempty"`
+	NumTombstones uint64 `json:"numTombstones,omitempty"`
+}
+
+// BlockDesc describes a block by ULID and time range.
+type BlockDesc struct {
+	ULID    ulid.ULID `json:"ulid"`
+	MinTime int64     `json:"minTime"`
+	MaxTime int64     `json:"maxTime"`
+}
+
+// BlockMetaCompaction holds information about compactions a block went through.
+type BlockMetaCompaction struct {
+	// Maximum number of compaction cycles any source block has
+	// gone through.
+	Level int `json:"level"`
+	// ULIDs of all source head blocks that went into the block.
+	Sources []ulid.ULID `json:"sources,omitempty"`
+	// Indicates that during compaction it resulted in a block without any samples
+	// so it should be deleted on the next reload.
+	Deletable bool `json:"deletable,omitempty"`
+	// Short descriptions of the direct blocks that were used to create
+	// this block.
+	Parents []BlockDesc `json:"parents,omitempty"`
+	Failed  bool        `json:"failed,omitempty"`
+}
+
+const indexFilename = "index"
+const metaFilename = "meta.json"
+const metaVersion1 = 1
+
+func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
+
+func readMetaFile(dir string) (*BlockMeta, int64, error) {
+	b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
+	if err != nil {
+		return nil, 0, err
+	}
+	var m BlockMeta
+
+	if err := json.Unmarshal(b, &m); err != nil {
+		return nil, 0, err
+	}
+	if m.Version != metaVersion1 {
+		return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version)
+	}
+
+	return &m, int64(len(b)), nil
+}
+
+func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) {
+	meta.Version = metaVersion1
+
+	// Make any changes to the file appear atomic.
+	path := filepath.Join(dir, metaFilename)
+	tmp := path + ".tmp"
+	defer func() {
+		if err := os.RemoveAll(tmp); err != nil {
+			level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
+		}
+	}()
+
+	f, err := os.Create(tmp)
+	if err != nil {
+		return 0, err
+	}
+
+	jsonMeta, err := json.MarshalIndent(meta, "", "\t")
+	if err != nil {
+		return 0, err
+	}
+
+	var merr tsdb_errors.MultiError
+	n, err := f.Write(jsonMeta)
+	if err != nil {
+		merr.Add(err)
+		merr.Add(f.Close())
+		return 0, merr.Err()
+	}
+
+	// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
+	if err := f.Sync(); err != nil {
+		merr.Add(err)
+		merr.Add(f.Close())
+		return 0, merr.Err()
+	}
+	if err := f.Close(); err != nil {
+		return 0, err
+	}
+	return int64(n), fileutil.Replace(tmp, path)
+}
+
+// Block represents a directory of time series data covering a continuous time range.
+type Block struct {
+	mtx            sync.RWMutex
+	closing        bool
+	pendingReaders sync.WaitGroup
+
+	dir  string
+	meta BlockMeta
+
+	// Symbol Table Size in bytes.
+	// We maintain this variable to avoid recalculation every time.
+	symbolTableSize uint64
+
+	chunkr     ChunkReader
+	indexr     IndexReader
+	tombstones tombstones.Reader
+
+	logger log.Logger
+
+	numBytesChunks    int64
+	numBytesIndex     int64
+	numBytesTombstone int64
+	numBytesMeta      int64
+}
+
+// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
+// to instantiate chunk structs.
+func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) {
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	var closers []io.Closer
+	defer func() {
+		if err != nil {
+			var merr tsdb_errors.MultiError
+			merr.Add(err)
+			merr.Add(closeAll(closers))
+			err = merr.Err()
+		}
+	}()
+	meta, sizeMeta, err := readMetaFile(dir)
+	if err != nil {
+		return nil, err
+	}
+
+	cr, err := chunks.NewDirReader(chunkDir(dir), pool)
+	if err != nil {
+		return nil, err
+	}
+	closers = append(closers, cr)
+
+	ir, err := index.NewFileReader(filepath.Join(dir, indexFilename))
+	if err != nil {
+		return nil, err
+	}
+	closers = append(closers, ir)
+
+	tr, sizeTomb, err := tombstones.ReadTombstones(dir)
+	if err != nil {
+		return nil, err
+	}
+	closers = append(closers, tr)
+
+	pb = &Block{
+		dir:               dir,
+		meta:              *meta,
+		chunkr:            cr,
+		indexr:            ir,
+		tombstones:        tr,
+		symbolTableSize:   ir.SymbolTableSize(),
+		logger:            logger,
+		numBytesChunks:    cr.Size(),
+		numBytesIndex:     ir.Size(),
+		numBytesTombstone: sizeTomb,
+		numBytesMeta:      sizeMeta,
+	}
+	return pb, nil
+}
+
+// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
+func (pb *Block) Close() error {
+	pb.mtx.Lock()
+	pb.closing = true
+	pb.mtx.Unlock()
+
+	pb.pendingReaders.Wait()
+
+	var merr tsdb_errors.MultiError
+
+	merr.Add(pb.chunkr.Close())
+	merr.Add(pb.indexr.Close())
+	merr.Add(pb.tombstones.Close())
+
+	return merr.Err()
+}
+
+func (pb *Block) String() string {
+	return pb.meta.ULID.String()
+}
+
+// Dir returns the directory of the block.
+func (pb *Block) Dir() string { return pb.dir }
+
+// Meta returns meta information about the block.
+func (pb *Block) Meta() BlockMeta { return pb.meta }
+
+// MinTime returns the min time of the meta.
+func (pb *Block) MinTime() int64 { return pb.meta.MinTime }
+
+// MaxTime returns the max time of the meta.
+func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime }
+
+// Size returns the number of bytes that the block takes up.
+func (pb *Block) Size() int64 {
+	return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta
+}
+
+// ErrClosing is returned when a block is in the process of being closed.
+var ErrClosing = errors.New("block is closing")
+
+func (pb *Block) startRead() error {
+	pb.mtx.RLock()
+	defer pb.mtx.RUnlock()
+
+	if pb.closing {
+		return ErrClosing
+	}
+	pb.pendingReaders.Add(1)
+	return nil
+}
+
+// Index returns a new IndexReader against the block data.
+func (pb *Block) Index() (IndexReader, error) {
+	if err := pb.startRead(); err != nil {
+		return nil, err
+	}
+	return blockIndexReader{ir: pb.indexr, b: pb}, nil
+}
+
+// Chunks returns a new ChunkReader against the block data.
+func (pb *Block) Chunks() (ChunkReader, error) {
+	if err := pb.startRead(); err != nil {
+		return nil, err
+	}
+	return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil
+}
+
+// Tombstones returns a new TombstoneReader against the block data.
+func (pb *Block) Tombstones() (tombstones.Reader, error) {
+	if err := pb.startRead(); err != nil {
+		return nil, err
+	}
+	return blockTombstoneReader{Reader: pb.tombstones, b: pb}, nil
+}
+
+// GetSymbolTableSize returns the Symbol Table Size in the index of this block.
+func (pb *Block) GetSymbolTableSize() uint64 {
+	return pb.symbolTableSize
+}
+
+func (pb *Block) setCompactionFailed() error {
+	pb.meta.Compaction.Failed = true
+	n, err := writeMetaFile(pb.logger, pb.dir, &pb.meta)
+	if err != nil {
+		return err
+	}
+	pb.numBytesMeta = n
+	return nil
+}
+
+type blockIndexReader struct {
+	ir IndexReader
+	b  *Block
+}
+
+func (r blockIndexReader) Symbols() index.StringIter {
+	return r.ir.Symbols()
+}
+
+func (r blockIndexReader) LabelValues(name string) ([]string, error) {
+	st, err := r.ir.LabelValues(name)
+	return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
+}
+
+func (r blockIndexReader) Postings(name string, values ...string) (index.Postings, error) {
+	p, err := r.ir.Postings(name, values...)
+	if err != nil {
+		return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
+	}
+	return p, nil
+}
+
+func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
+	return r.ir.SortedPostings(p)
+}
+
+func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
+	if err := r.ir.Series(ref, lset, chks); err != nil {
+		return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
+	}
+	return nil
+}
+
+func (r blockIndexReader) LabelNames() ([]string, error) {
+	return r.b.LabelNames()
+}
+
+func (r blockIndexReader) Close() error {
+	r.b.pendingReaders.Done()
+	return nil
+}
+
+type blockTombstoneReader struct {
+	tombstones.Reader
+	b *Block
+}
+
+func (r blockTombstoneReader) Close() error {
+	r.b.pendingReaders.Done()
+	return nil
+}
+
+type blockChunkReader struct {
+	ChunkReader
+	b *Block
+}
+
+func (r blockChunkReader) Close() error {
+	r.b.pendingReaders.Done()
+	return nil
+}
+
+// Delete matching series between mint and maxt in the block.
+func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
+	pb.mtx.Lock()
+	defer pb.mtx.Unlock()
+
+	if pb.closing {
+		return ErrClosing
+	}
+
+	p, err := PostingsForMatchers(pb.indexr, ms...)
+	if err != nil {
+		return errors.Wrap(err, "select series")
+	}
+
+	ir := pb.indexr
+
+	// Choose only valid postings which have chunks in the time-range.
+	stones := tombstones.NewMemTombstones()
+
+	var lset labels.Labels
+	var chks []chunks.Meta
+
+Outer:
+	for p.Next() {
+		err := ir.Series(p.At(), &lset, &chks)
+		if err != nil {
+			return err
+		}
+
+		for _, chk := range chks {
+			if chk.OverlapsClosedInterval(mint, maxt) {
+				// Delete only until the current values and not beyond.
+				tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime)
+				stones.AddInterval(p.At(), tombstones.Interval{Mint: tmin, Maxt: tmax})
+				continue Outer
+			}
+		}
+	}
+
+	if p.Err() != nil {
+		return p.Err()
+	}
+
+	err = pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error {
+		for _, iv := range ivs {
+			stones.AddInterval(id, iv)
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	pb.tombstones = stones
+	pb.meta.Stats.NumTombstones = pb.tombstones.Total()
+
+	n, err := tombstones.WriteFile(pb.logger, pb.dir, pb.tombstones)
+	if err != nil {
+		return err
+	}
+	pb.numBytesTombstone = n
+	n, err = writeMetaFile(pb.logger, pb.dir, &pb.meta)
+	if err != nil {
+		return err
+	}
+	pb.numBytesMeta = n
+	return nil
+}
+
+// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
+// If there was a rewrite, then it returns the ULID of the new block written, else nil.
+func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) {
+	numStones := 0
+
+	if err := pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error {
+		numStones += len(ivs)
+		return nil
+	}); err != nil {
+		// This should never happen, as the iteration function only returns nil.
+		panic(err)
+	}
+	if numStones == 0 {
+		return nil, nil
+	}
+
+	meta := pb.Meta()
+	uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
+	if err != nil {
+		return nil, err
+	}
+	return &uid, nil
+}
+
+// Snapshot creates snapshot of the block into dir.
+func (pb *Block) Snapshot(dir string) error {
+	blockDir := filepath.Join(dir, pb.meta.ULID.String())
+	if err := os.MkdirAll(blockDir, 0777); err != nil {
+		return errors.Wrap(err, "create snapshot block dir")
+	}
+
+	chunksDir := chunkDir(blockDir)
+	if err := os.MkdirAll(chunksDir, 0777); err != nil {
+		return errors.Wrap(err, "create snapshot chunk dir")
+	}
+
+	// Hardlink meta, index and tombstones
+	for _, fname := range []string{
+		metaFilename,
+		indexFilename,
+		tombstones.TombstonesFilename,
+	} {
+		if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil {
+			return errors.Wrapf(err, "create snapshot %s", fname)
+		}
+	}
+
+	// Hardlink the chunks
+	curChunkDir := chunkDir(pb.dir)
+	files, err := ioutil.ReadDir(curChunkDir)
+	if err != nil {
+		return errors.Wrap(err, "ReadDir the current chunk dir")
+	}
+
+	for _, f := range files {
+		err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name()))
+		if err != nil {
+			return errors.Wrap(err, "hardlink a chunk")
+		}
+	}
+
+	return nil
+}
+
+// OverlapsClosedInterval returns true if the block overlaps [mint, maxt].
+func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool {
+	// The block itself is a half-open interval
+	// [pb.meta.MinTime, pb.meta.MaxTime).
+	return pb.meta.MinTime <= maxt && mint < pb.meta.MaxTime
+}
+
+// LabelNames returns all the unique label names present in the Block in sorted order.
+func (pb *Block) LabelNames() ([]string, error) {
+	return pb.indexr.LabelNames()
+}
+
+func clampInterval(a, b, mint, maxt int64) (int64, int64) {
+	if a < mint {
+		a = mint
+	}
+	if b > maxt {
+		b = maxt
+	}
+	return a, b
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go
new file mode 100644
index 0000000000000000000000000000000000000000..9bb04920bfbff771fbe4395212cd2b802ece1b86
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go
@@ -0,0 +1,1047 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"context"
+	"crypto/rand"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"path/filepath"
+	"sort"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/oklog/ulid"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/chunks"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+	"github.com/prometheus/prometheus/tsdb/index"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+)
+
+// ExponentialBlockRanges returns the time ranges based on the stepSize.
+func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
+	ranges := make([]int64, 0, steps)
+	curRange := minSize
+	for i := 0; i < steps; i++ {
+		ranges = append(ranges, curRange)
+		curRange = curRange * int64(stepSize)
+	}
+
+	return ranges
+}
+
+// Compactor provides compaction against an underlying storage
+// of time series data.
+type Compactor interface {
+	// Plan returns a set of directories that can be compacted concurrently.
+	// The directories can be overlapping.
+	// Results returned when compactions are in progress are undefined.
+	Plan(dir string) ([]string, error)
+
+	// Write persists a Block into a directory.
+	// No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}.
+	Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error)
+
+	// Compact runs compaction against the provided directories. Must
+	// only be called concurrently with results of Plan().
+	// Can optionally pass a list of already open blocks,
+	// to avoid having to reopen them.
+	// When resulting Block has 0 samples
+	//  * No block is written.
+	//  * The source dirs are marked Deletable.
+	//  * Returns empty ulid.ULID{}.
+	Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error)
+}
+
+// LeveledCompactor implements the Compactor interface.
+type LeveledCompactor struct {
+	metrics   *compactorMetrics
+	logger    log.Logger
+	ranges    []int64
+	chunkPool chunkenc.Pool
+	ctx       context.Context
+}
+
+type compactorMetrics struct {
+	ran               prometheus.Counter
+	populatingBlocks  prometheus.Gauge
+	overlappingBlocks prometheus.Counter
+	duration          prometheus.Histogram
+	chunkSize         prometheus.Histogram
+	chunkSamples      prometheus.Histogram
+	chunkRange        prometheus.Histogram
+}
+
+func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
+	m := &compactorMetrics{}
+
+	m.ran = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_compactions_total",
+		Help: "Total number of compactions that were executed for the partition.",
+	})
+	m.populatingBlocks = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_compaction_populating_block",
+		Help: "Set to 1 when a block is currently being written to the disk.",
+	})
+	m.overlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_vertical_compactions_total",
+		Help: "Total number of compactions done on overlapping blocks.",
+	})
+	m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Name:    "prometheus_tsdb_compaction_duration_seconds",
+		Help:    "Duration of compaction runs",
+		Buckets: prometheus.ExponentialBuckets(1, 2, 10),
+	})
+	m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Name:    "prometheus_tsdb_compaction_chunk_size_bytes",
+		Help:    "Final size of chunks on their first compaction",
+		Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
+	})
+	m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Name:    "prometheus_tsdb_compaction_chunk_samples",
+		Help:    "Final number of samples on their first compaction",
+		Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
+	})
+	m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Name:    "prometheus_tsdb_compaction_chunk_range_seconds",
+		Help:    "Final time range of chunks on their first compaction",
+		Buckets: prometheus.ExponentialBuckets(100, 4, 10),
+	})
+
+	if r != nil {
+		r.MustRegister(
+			m.ran,
+			m.populatingBlocks,
+			m.overlappingBlocks,
+			m.duration,
+			m.chunkRange,
+			m.chunkSamples,
+			m.chunkSize,
+		)
+	}
+	return m
+}
+
+// NewLeveledCompactor returns a LeveledCompactor.
+func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) {
+	if len(ranges) == 0 {
+		return nil, errors.Errorf("at least one range must be provided")
+	}
+	if pool == nil {
+		pool = chunkenc.NewPool()
+	}
+	if l == nil {
+		l = log.NewNopLogger()
+	}
+	return &LeveledCompactor{
+		ranges:    ranges,
+		chunkPool: pool,
+		logger:    l,
+		metrics:   newCompactorMetrics(r),
+		ctx:       ctx,
+	}, nil
+}
+
+type dirMeta struct {
+	dir  string
+	meta *BlockMeta
+}
+
+// Plan returns a list of compactable blocks in the provided directory.
+func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
+	dirs, err := blockDirs(dir)
+	if err != nil {
+		return nil, err
+	}
+	if len(dirs) < 1 {
+		return nil, nil
+	}
+
+	var dms []dirMeta
+	for _, dir := range dirs {
+		meta, _, err := readMetaFile(dir)
+		if err != nil {
+			return nil, err
+		}
+		dms = append(dms, dirMeta{dir, meta})
+	}
+	return c.plan(dms)
+}
+
+func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
+	sort.Slice(dms, func(i, j int) bool {
+		return dms[i].meta.MinTime < dms[j].meta.MinTime
+	})
+
+	res := c.selectOverlappingDirs(dms)
+	if len(res) > 0 {
+		return res, nil
+	}
+	// No overlapping blocks, do compaction the usual way.
+	// We do not include a recently created block with max(minTime), so the block which was just created from WAL.
+	// This gives users a window of a full block size to piece-wise backup new data without having to care about data overlap.
+	dms = dms[:len(dms)-1]
+
+	for _, dm := range c.selectDirs(dms) {
+		res = append(res, dm.dir)
+	}
+	if len(res) > 0 {
+		return res, nil
+	}
+
+	// Compact any blocks with big enough time range that have >5% tombstones.
+	for i := len(dms) - 1; i >= 0; i-- {
+		meta := dms[i].meta
+		if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] {
+			break
+		}
+		if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 {
+			return []string{dms[i].dir}, nil
+		}
+	}
+
+	return nil, nil
+}
+
+// selectDirs returns the dir metas that should be compacted into a single new block.
+// If only a single block range is configured, the result is always nil.
+func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
+	if len(c.ranges) < 2 || len(ds) < 1 {
+		return nil
+	}
+
+	highTime := ds[len(ds)-1].meta.MinTime
+
+	for _, iv := range c.ranges[1:] {
+		parts := splitByRange(ds, iv)
+		if len(parts) == 0 {
+			continue
+		}
+
+	Outer:
+		for _, p := range parts {
+			// Do not select the range if it has a block whose compaction failed.
+			for _, dm := range p {
+				if dm.meta.Compaction.Failed {
+					continue Outer
+				}
+			}
+
+			mint := p[0].meta.MinTime
+			maxt := p[len(p)-1].meta.MaxTime
+			// Pick the range of blocks if it spans the full range (potentially with gaps)
+			// or is before the most recent block.
+			// This ensures we don't compact blocks prematurely when another one of the same
+			// size still fits in the range.
+			if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 {
+				return p
+			}
+		}
+	}
+
+	return nil
+}
+
+// selectOverlappingDirs returns all dirs with overlapping time ranges.
+// It expects sorted input by mint and returns the overlapping dirs in the same order as received.
+func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string {
+	if len(ds) < 2 {
+		return nil
+	}
+	var overlappingDirs []string
+	globalMaxt := ds[0].meta.MaxTime
+	for i, d := range ds[1:] {
+		if d.meta.MinTime < globalMaxt {
+			if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well.
+				overlappingDirs = append(overlappingDirs, ds[i].dir)
+			}
+			overlappingDirs = append(overlappingDirs, d.dir)
+		} else if len(overlappingDirs) > 0 {
+			break
+		}
+		if d.meta.MaxTime > globalMaxt {
+			globalMaxt = d.meta.MaxTime
+		}
+	}
+	return overlappingDirs
+}
+
+// splitByRange splits the directories by the time range. The range sequence starts at 0.
+//
+// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30
+// it returns [0-10, 10-20], [50-60], [90-100].
+func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
+	var splitDirs [][]dirMeta
+
+	for i := 0; i < len(ds); {
+		var (
+			group []dirMeta
+			t0    int64
+			m     = ds[i].meta
+		)
+		// Compute start of aligned time range of size tr closest to the current block's start.
+		if m.MinTime >= 0 {
+			t0 = tr * (m.MinTime / tr)
+		} else {
+			t0 = tr * ((m.MinTime - tr + 1) / tr)
+		}
+		// Skip blocks that don't fall into the range. This can happen via mis-alignment or
+		// by being the multiple of the intended range.
+		if m.MaxTime > t0+tr {
+			i++
+			continue
+		}
+
+		// Add all dirs to the current group that are within [t0, t0+tr].
+		for ; i < len(ds); i++ {
+			// Either the block falls into the next range or doesn't fit at all (checked above).
+			if ds[i].meta.MaxTime > t0+tr {
+				break
+			}
+			group = append(group, ds[i])
+		}
+
+		if len(group) > 0 {
+			splitDirs = append(splitDirs, group)
+		}
+	}
+
+	return splitDirs
+}
+
+func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
+	res := &BlockMeta{
+		ULID:    uid,
+		MinTime: blocks[0].MinTime,
+	}
+
+	sources := map[ulid.ULID]struct{}{}
+	// For overlapping blocks, the Maxt can be
+	// in any block so we track it globally.
+	maxt := int64(math.MinInt64)
+
+	for _, b := range blocks {
+		if b.MaxTime > maxt {
+			maxt = b.MaxTime
+		}
+		if b.Compaction.Level > res.Compaction.Level {
+			res.Compaction.Level = b.Compaction.Level
+		}
+		for _, s := range b.Compaction.Sources {
+			sources[s] = struct{}{}
+		}
+		res.Compaction.Parents = append(res.Compaction.Parents, BlockDesc{
+			ULID:    b.ULID,
+			MinTime: b.MinTime,
+			MaxTime: b.MaxTime,
+		})
+	}
+	res.Compaction.Level++
+
+	for s := range sources {
+		res.Compaction.Sources = append(res.Compaction.Sources, s)
+	}
+	sort.Slice(res.Compaction.Sources, func(i, j int) bool {
+		return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0
+	})
+
+	res.MaxTime = maxt
+	return res
+}
+
+// Compact creates a new block in the compactor's directory from the blocks in the
+// provided directories.
+func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) {
+	var (
+		blocks []BlockReader
+		bs     []*Block
+		metas  []*BlockMeta
+		uids   []string
+	)
+	start := time.Now()
+
+	for _, d := range dirs {
+		meta, _, err := readMetaFile(d)
+		if err != nil {
+			return uid, err
+		}
+
+		var b *Block
+
+		// Use already open blocks if we can, to avoid
+		// having the index data in memory twice.
+		for _, o := range open {
+			if meta.ULID == o.Meta().ULID {
+				b = o
+				break
+			}
+		}
+
+		if b == nil {
+			var err error
+			b, err = OpenBlock(c.logger, d, c.chunkPool)
+			if err != nil {
+				return uid, err
+			}
+			defer b.Close()
+		}
+
+		metas = append(metas, meta)
+		blocks = append(blocks, b)
+		bs = append(bs, b)
+		uids = append(uids, meta.ULID.String())
+	}
+
+	uid = ulid.MustNew(ulid.Now(), rand.Reader)
+
+	meta := compactBlockMetas(uid, metas...)
+	err = c.write(dest, meta, blocks...)
+	if err == nil {
+		if meta.Stats.NumSamples == 0 {
+			for _, b := range bs {
+				b.meta.Compaction.Deletable = true
+				n, err := writeMetaFile(c.logger, b.dir, &b.meta)
+				if err != nil {
+					level.Error(c.logger).Log(
+						"msg", "Failed to write 'Deletable' to meta file after compaction",
+						"ulid", b.meta.ULID,
+					)
+				}
+				b.numBytesMeta = n
+			}
+			uid = ulid.ULID{}
+			level.Info(c.logger).Log(
+				"msg", "compact blocks resulted in empty block",
+				"count", len(blocks),
+				"sources", fmt.Sprintf("%v", uids),
+				"duration", time.Since(start),
+			)
+		} else {
+			level.Info(c.logger).Log(
+				"msg", "compact blocks",
+				"count", len(blocks),
+				"mint", meta.MinTime,
+				"maxt", meta.MaxTime,
+				"ulid", meta.ULID,
+				"sources", fmt.Sprintf("%v", uids),
+				"duration", time.Since(start),
+			)
+		}
+		return uid, nil
+	}
+
+	var merr tsdb_errors.MultiError
+	merr.Add(err)
+	if err != context.Canceled {
+		for _, b := range bs {
+			if err := b.setCompactionFailed(); err != nil {
+				merr.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir()))
+			}
+		}
+	}
+
+	return uid, merr
+}
+
+func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
+	start := time.Now()
+
+	uid := ulid.MustNew(ulid.Now(), rand.Reader)
+
+	meta := &BlockMeta{
+		ULID:    uid,
+		MinTime: mint,
+		MaxTime: maxt,
+	}
+	meta.Compaction.Level = 1
+	meta.Compaction.Sources = []ulid.ULID{uid}
+
+	if parent != nil {
+		meta.Compaction.Parents = []BlockDesc{
+			{ULID: parent.ULID, MinTime: parent.MinTime, MaxTime: parent.MaxTime},
+		}
+	}
+
+	err := c.write(dest, meta, b)
+	if err != nil {
+		return uid, err
+	}
+
+	if meta.Stats.NumSamples == 0 {
+		return ulid.ULID{}, nil
+	}
+
+	level.Info(c.logger).Log(
+		"msg", "write block",
+		"mint", meta.MinTime,
+		"maxt", meta.MaxTime,
+		"ulid", meta.ULID,
+		"duration", time.Since(start),
+	)
+	return uid, nil
+}
+
+// instrumentedChunkWriter is used for level 1 compactions to record statistics
+// about compacted chunks.
+type instrumentedChunkWriter struct {
+	ChunkWriter
+
+	size    prometheus.Histogram
+	samples prometheus.Histogram
+	trange  prometheus.Histogram
+}
+
+func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
+	for _, c := range chunks {
+		w.size.Observe(float64(len(c.Chunk.Bytes())))
+		w.samples.Observe(float64(c.Chunk.NumSamples()))
+		w.trange.Observe(float64(c.MaxTime - c.MinTime))
+	}
+	return w.ChunkWriter.WriteChunks(chunks...)
+}
+
+// write creates a new block that is the union of the provided blocks into dir.
+// It cleans up all files of the old blocks after completing successfully.
+func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) {
+	dir := filepath.Join(dest, meta.ULID.String())
+	tmp := dir + ".tmp"
+	var closers []io.Closer
+	defer func(t time.Time) {
+		var merr tsdb_errors.MultiError
+		merr.Add(err)
+		merr.Add(closeAll(closers))
+		err = merr.Err()
+
+		// RemoveAll returns no error when tmp doesn't exist so it is safe to always run it.
+		if err := os.RemoveAll(tmp); err != nil {
+			level.Error(c.logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error())
+		}
+		c.metrics.ran.Inc()
+		c.metrics.duration.Observe(time.Since(t).Seconds())
+	}(time.Now())
+
+	if err = os.RemoveAll(tmp); err != nil {
+		return err
+	}
+
+	if err = os.MkdirAll(tmp, 0777); err != nil {
+		return err
+	}
+
+	// Populate chunk and index files into temporary directory with
+	// data of all blocks.
+	var chunkw ChunkWriter
+
+	chunkw, err = chunks.NewWriter(chunkDir(tmp))
+	if err != nil {
+		return errors.Wrap(err, "open chunk writer")
+	}
+	closers = append(closers, chunkw)
+	// Record written chunk sizes on level 1 compactions.
+	if meta.Compaction.Level == 1 {
+		chunkw = &instrumentedChunkWriter{
+			ChunkWriter: chunkw,
+			size:        c.metrics.chunkSize,
+			samples:     c.metrics.chunkSamples,
+			trange:      c.metrics.chunkRange,
+		}
+	}
+
+	indexw, err := index.NewWriter(c.ctx, filepath.Join(tmp, indexFilename))
+	if err != nil {
+		return errors.Wrap(err, "open index writer")
+	}
+	closers = append(closers, indexw)
+
+	if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil {
+		return errors.Wrap(err, "write compaction")
+	}
+
+	select {
+	case <-c.ctx.Done():
+		return c.ctx.Err()
+	default:
+	}
+
+	// We are explicitly closing them here to check for error even
+	// though these are covered under defer. This is because in Windows,
+	// you cannot delete these unless they are closed and the defer is to
+	// make sure they are closed if the function exits due to an error above.
+	var merr tsdb_errors.MultiError
+	for _, w := range closers {
+		merr.Add(w.Close())
+	}
+	closers = closers[:0] // Avoid closing the writers twice in the defer.
+	if merr.Err() != nil {
+		return merr.Err()
+	}
+
+	// Populated block is empty, so exit early.
+	if meta.Stats.NumSamples == 0 {
+		return nil
+	}
+
+	if _, err = writeMetaFile(c.logger, tmp, meta); err != nil {
+		return errors.Wrap(err, "write merged meta")
+	}
+
+	// Create an empty tombstones file.
+	if _, err := tombstones.WriteFile(c.logger, tmp, tombstones.NewMemTombstones()); err != nil {
+		return errors.Wrap(err, "write new tombstones file")
+	}
+
+	df, err := fileutil.OpenDir(tmp)
+	if err != nil {
+		return errors.Wrap(err, "open temporary block dir")
+	}
+	defer func() {
+		if df != nil {
+			df.Close()
+		}
+	}()
+
+	if err := df.Sync(); err != nil {
+		return errors.Wrap(err, "sync temporary dir file")
+	}
+
+	// Close temp dir before rename block dir (for windows platform).
+	if err = df.Close(); err != nil {
+		return errors.Wrap(err, "close temporary dir")
+	}
+	df = nil
+
+	// Block successfully written, make visible and remove old ones.
+	if err := fileutil.Replace(tmp, dir); err != nil {
+		return errors.Wrap(err, "rename block dir")
+	}
+
+	return nil
+}
+
+// populateBlock fills the index and chunk writers with new data gathered as the union
+// of the provided blocks. It returns meta information for the new block.
+// It expects sorted blocks input by mint.
+func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
+	if len(blocks) == 0 {
+		return errors.New("cannot populate block from no readers")
+	}
+
+	var (
+		set         storage.DeprecatedChunkSeriesSet
+		symbols     index.StringIter
+		closers     = []io.Closer{}
+		overlapping bool
+	)
+	defer func() {
+		var merr tsdb_errors.MultiError
+		merr.Add(err)
+		merr.Add(closeAll(closers))
+		err = merr.Err()
+		c.metrics.populatingBlocks.Set(0)
+	}()
+	c.metrics.populatingBlocks.Set(1)
+
+	globalMaxt := blocks[0].Meta().MaxTime
+	for i, b := range blocks {
+		select {
+		case <-c.ctx.Done():
+			return c.ctx.Err()
+		default:
+		}
+
+		if !overlapping {
+			if i > 0 && b.Meta().MinTime < globalMaxt {
+				c.metrics.overlappingBlocks.Inc()
+				overlapping = true
+				level.Warn(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID)
+			}
+			if b.Meta().MaxTime > globalMaxt {
+				globalMaxt = b.Meta().MaxTime
+			}
+		}
+
+		indexr, err := b.Index()
+		if err != nil {
+			return errors.Wrapf(err, "open index reader for block %s", b)
+		}
+		closers = append(closers, indexr)
+
+		chunkr, err := b.Chunks()
+		if err != nil {
+			return errors.Wrapf(err, "open chunk reader for block %s", b)
+		}
+		closers = append(closers, chunkr)
+
+		tombsr, err := b.Tombstones()
+		if err != nil {
+			return errors.Wrapf(err, "open tombstone reader for block %s", b)
+		}
+		closers = append(closers, tombsr)
+
+		k, v := index.AllPostingsKey()
+		all, err := indexr.Postings(k, v)
+		if err != nil {
+			return err
+		}
+		all = indexr.SortedPostings(all)
+
+		s := newCompactionSeriesSet(indexr, chunkr, tombsr, all)
+		syms := indexr.Symbols()
+
+		if i == 0 {
+			set = s
+			symbols = syms
+			continue
+		}
+		set, err = newCompactionMerger(set, s)
+		if err != nil {
+			return err
+		}
+		symbols = newMergedStringIter(symbols, syms)
+	}
+
+	for symbols.Next() {
+		if err := indexw.AddSymbol(symbols.At()); err != nil {
+			return errors.Wrap(err, "add symbol")
+		}
+	}
+	if symbols.Err() != nil {
+		return errors.Wrap(symbols.Err(), "next symbol")
+	}
+
+	delIter := &deletedIterator{}
+	ref := uint64(0)
+	for set.Next() {
+		select {
+		case <-c.ctx.Done():
+			return c.ctx.Err()
+		default:
+		}
+
+		lset, chks, dranges := set.At() // The chunks here are not fully deleted.
+		if overlapping {
+			// If blocks are overlapping, it is possible to have unsorted chunks.
+			sort.Slice(chks, func(i, j int) bool {
+				return chks[i].MinTime < chks[j].MinTime
+			})
+		}
+
+		// Skip the series with all deleted chunks.
+		if len(chks) == 0 {
+			continue
+		}
+
+		for i, chk := range chks {
+			// Re-encode head chunks that are still open (being appended to) or
+			// outside the compacted MaxTime range.
+			// The chunk.Bytes() method is not safe for open chunks hence the re-encoding.
+			// This happens when snapshotting the head block.
+			//
+			// Block time range is half-open: [meta.MinTime, meta.MaxTime) and
+			// chunks are closed hence the chk.MaxTime >= meta.MaxTime check.
+			//
+			// TODO think how to avoid the typecasting to verify when it is head block.
+			if _, isHeadChunk := chk.Chunk.(*safeChunk); isHeadChunk && chk.MaxTime >= meta.MaxTime {
+				dranges = append(dranges, tombstones.Interval{Mint: meta.MaxTime, Maxt: math.MaxInt64})
+
+			} else
+			// Sanity check for disk blocks.
+			// chk.MaxTime == meta.MaxTime shouldn't happen as well, but will brake many users so not checking for that.
+			if chk.MinTime < meta.MinTime || chk.MaxTime > meta.MaxTime {
+				return errors.Errorf("found chunk with minTime: %d maxTime: %d outside of compacted minTime: %d maxTime: %d",
+					chk.MinTime, chk.MaxTime, meta.MinTime, meta.MaxTime)
+			}
+
+			if len(dranges) > 0 {
+				// Re-encode the chunk to not have deleted values.
+				if !chk.OverlapsClosedInterval(dranges[0].Mint, dranges[len(dranges)-1].Maxt) {
+					continue
+				}
+				newChunk := chunkenc.NewXORChunk()
+				app, err := newChunk.Appender()
+				if err != nil {
+					return err
+				}
+
+				delIter.it = chk.Chunk.Iterator(delIter.it)
+				delIter.intervals = dranges
+
+				var (
+					t int64
+					v float64
+				)
+				for delIter.Next() {
+					t, v = delIter.At()
+					app.Append(t, v)
+				}
+				if err := delIter.Err(); err != nil {
+					return errors.Wrap(err, "iterate chunk while re-encoding")
+				}
+
+				chks[i].Chunk = newChunk
+				chks[i].MaxTime = t
+			}
+		}
+
+		mergedChks := chks
+		if overlapping {
+			mergedChks, err = chunks.MergeOverlappingChunks(chks)
+			if err != nil {
+				return errors.Wrap(err, "merge overlapping chunks")
+			}
+		}
+		if err := chunkw.WriteChunks(mergedChks...); err != nil {
+			return errors.Wrap(err, "write chunks")
+		}
+
+		if err := indexw.AddSeries(ref, lset, mergedChks...); err != nil {
+			return errors.Wrap(err, "add series")
+		}
+
+		meta.Stats.NumChunks += uint64(len(mergedChks))
+		meta.Stats.NumSeries++
+		for _, chk := range mergedChks {
+			meta.Stats.NumSamples += uint64(chk.Chunk.NumSamples())
+		}
+
+		for _, chk := range mergedChks {
+			if err := c.chunkPool.Put(chk.Chunk); err != nil {
+				return errors.Wrap(err, "put chunk")
+			}
+		}
+
+		ref++
+	}
+	if set.Err() != nil {
+		return errors.Wrap(set.Err(), "iterate compaction set")
+	}
+
+	return nil
+}
+
+type compactionSeriesSet struct {
+	p          index.Postings
+	index      IndexReader
+	chunks     ChunkReader
+	tombstones tombstones.Reader
+
+	l         labels.Labels
+	c         []chunks.Meta
+	intervals tombstones.Intervals
+	err       error
+}
+
+func newCompactionSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings) *compactionSeriesSet {
+	return &compactionSeriesSet{
+		index:      i,
+		chunks:     c,
+		tombstones: t,
+		p:          p,
+	}
+}
+
+func (c *compactionSeriesSet) Next() bool {
+	if !c.p.Next() {
+		return false
+	}
+	var err error
+
+	c.intervals, err = c.tombstones.Get(c.p.At())
+	if err != nil {
+		c.err = errors.Wrap(err, "get tombstones")
+		return false
+	}
+
+	if err = c.index.Series(c.p.At(), &c.l, &c.c); err != nil {
+		c.err = errors.Wrapf(err, "get series %d", c.p.At())
+		return false
+	}
+
+	// Remove completely deleted chunks.
+	if len(c.intervals) > 0 {
+		chks := make([]chunks.Meta, 0, len(c.c))
+		for _, chk := range c.c {
+			if !(tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(c.intervals)) {
+				chks = append(chks, chk)
+			}
+		}
+
+		c.c = chks
+	}
+
+	for i := range c.c {
+		chk := &c.c[i]
+
+		chk.Chunk, err = c.chunks.Chunk(chk.Ref)
+		if err != nil {
+			c.err = errors.Wrapf(err, "chunk %d not found", chk.Ref)
+			return false
+		}
+	}
+
+	return true
+}
+
+func (c *compactionSeriesSet) Err() error {
+	if c.err != nil {
+		return c.err
+	}
+	return c.p.Err()
+}
+
+func (c *compactionSeriesSet) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
+	return c.l, c.c, c.intervals
+}
+
+type compactionMerger struct {
+	a, b storage.DeprecatedChunkSeriesSet
+
+	aok, bok  bool
+	l         labels.Labels
+	c         []chunks.Meta
+	intervals tombstones.Intervals
+}
+
+// TODO(bwplotka): Move to storage mergers.
+func newCompactionMerger(a, b storage.DeprecatedChunkSeriesSet) (*compactionMerger, error) {
+	c := &compactionMerger{
+		a: a,
+		b: b,
+	}
+	// Initialize first elements of both sets as Next() needs
+	// one element look-ahead.
+	c.aok = c.a.Next()
+	c.bok = c.b.Next()
+
+	return c, c.Err()
+}
+
+func (c *compactionMerger) compare() int {
+	if !c.aok {
+		return 1
+	}
+	if !c.bok {
+		return -1
+	}
+	a, _, _ := c.a.At()
+	b, _, _ := c.b.At()
+	return labels.Compare(a, b)
+}
+
+func (c *compactionMerger) Next() bool {
+	if !c.aok && !c.bok || c.Err() != nil {
+		return false
+	}
+	// While advancing child iterators the memory used for labels and chunks
+	// may be reused. When picking a series we have to store the result.
+	var lset labels.Labels
+	var chks []chunks.Meta
+
+	d := c.compare()
+	if d > 0 {
+		lset, chks, c.intervals = c.b.At()
+		c.l = append(c.l[:0], lset...)
+		c.c = append(c.c[:0], chks...)
+
+		c.bok = c.b.Next()
+	} else if d < 0 {
+		lset, chks, c.intervals = c.a.At()
+		c.l = append(c.l[:0], lset...)
+		c.c = append(c.c[:0], chks...)
+
+		c.aok = c.a.Next()
+	} else {
+		// Both sets contain the current series. Chain them into a single one.
+		l, ca, ra := c.a.At()
+		_, cb, rb := c.b.At()
+
+		for _, r := range rb {
+			ra = ra.Add(r)
+		}
+
+		c.l = append(c.l[:0], l...)
+		c.c = append(append(c.c[:0], ca...), cb...)
+		c.intervals = ra
+
+		c.aok = c.a.Next()
+		c.bok = c.b.Next()
+	}
+
+	return true
+}
+
+func (c *compactionMerger) Err() error {
+	if c.a.Err() != nil {
+		return c.a.Err()
+	}
+	return c.b.Err()
+}
+
+func (c *compactionMerger) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
+	return c.l, c.c, c.intervals
+}
+
+func newMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter {
+	return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()}
+}
+
+type mergedStringIter struct {
+	a        index.StringIter
+	b        index.StringIter
+	aok, bok bool
+	cur      string
+}
+
+func (m *mergedStringIter) Next() bool {
+	if (!m.aok && !m.bok) || (m.Err() != nil) {
+		return false
+	}
+
+	if !m.aok {
+		m.cur = m.b.At()
+		m.bok = m.b.Next()
+	} else if !m.bok {
+		m.cur = m.a.At()
+		m.aok = m.a.Next()
+	} else if m.b.At() > m.a.At() {
+		m.cur = m.a.At()
+		m.aok = m.a.Next()
+	} else if m.a.At() > m.b.At() {
+		m.cur = m.b.At()
+		m.bok = m.b.Next()
+	} else { // Equal.
+		m.cur = m.b.At()
+		m.aok = m.a.Next()
+		m.bok = m.b.Next()
+	}
+
+	return true
+}
+func (m mergedStringIter) At() string { return m.cur }
+func (m mergedStringIter) Err() error {
+	if m.a.Err() != nil {
+		return m.a.Err()
+	}
+	return m.b.Err()
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go
new file mode 100644
index 0000000000000000000000000000000000000000..e97499c04177f129b6dcdc90ca2ebd16e93883b2
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go
@@ -0,0 +1,1465 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tsdb implements a time series storage for float64 sample data.
+package tsdb
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/oklog/ulid"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+
+	// Load the package into main to make sure minium Go version is met.
+	_ "github.com/prometheus/prometheus/tsdb/goversion"
+	"github.com/prometheus/prometheus/tsdb/wal"
+	"golang.org/x/sync/errgroup"
+)
+
+const (
+	// Default duration of a block in milliseconds.
+	DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
+)
+
+var (
+	// ErrNotReady is returned if the underlying storage is not ready yet.
+	ErrNotReady = errors.New("TSDB not ready")
+)
+
+// DefaultOptions used for the DB. They are sane for setups using
+// millisecond precision timestamps.
+func DefaultOptions() *Options {
+	return &Options{
+		WALSegmentSize:         wal.DefaultSegmentSize,
+		RetentionDuration:      int64(15 * 24 * time.Hour / time.Millisecond),
+		MinBlockDuration:       DefaultBlockDuration,
+		MaxBlockDuration:       DefaultBlockDuration,
+		NoLockfile:             false,
+		AllowOverlappingBlocks: false,
+		WALCompression:         false,
+		StripeSize:             DefaultStripeSize,
+	}
+}
+
+// Options of the DB storage.
+type Options struct {
+	// Segments (wal files) max size.
+	// WALSegmentSize = 0, segment size is default size.
+	// WALSegmentSize > 0, segment size is WALSegmentSize.
+	// WALSegmentSize < 0, wal is disabled.
+	WALSegmentSize int
+
+	// Duration of persisted data to keep.
+	// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
+	// Typically it is in milliseconds.
+	RetentionDuration int64
+
+	// Maximum number of bytes in blocks to be retained.
+	// 0 or less means disabled.
+	// NOTE: For proper storage calculations need to consider
+	// the size of the WAL folder which is not added when calculating
+	// the current size of the database.
+	MaxBytes int64
+
+	// NoLockfile disables creation and consideration of a lock file.
+	NoLockfile bool
+
+	// Overlapping blocks are allowed if AllowOverlappingBlocks is true.
+	// This in-turn enables vertical compaction and vertical query merge.
+	AllowOverlappingBlocks bool
+
+	// WALCompression will turn on Snappy compression for records on the WAL.
+	WALCompression bool
+
+	// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
+	StripeSize int
+
+	// The timestamp range of head blocks after which they get persisted.
+	// It's the minimum duration of any persisted block.
+	// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
+	// Typically it is in milliseconds.
+	MinBlockDuration int64
+
+	// The maximum timestamp range of compacted blocks.
+	// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
+	// Typically it is in milliseconds.
+	MaxBlockDuration int64
+}
+
+// DB handles reads and writes of time series falling into
+// a hashed partition of a seriedb.
+type DB struct {
+	dir   string
+	lockf fileutil.Releaser
+
+	logger    log.Logger
+	metrics   *dbMetrics
+	opts      *Options
+	chunkPool chunkenc.Pool
+	compactor Compactor
+
+	// Mutex for that must be held when modifying the general block layout.
+	mtx    sync.RWMutex
+	blocks []*Block
+
+	head *Head
+
+	compactc chan struct{}
+	donec    chan struct{}
+	stopc    chan struct{}
+
+	// cmtx ensures that compactions and deletions don't run simultaneously.
+	cmtx sync.Mutex
+
+	// autoCompactMtx ensures that no compaction gets triggered while
+	// changing the autoCompact var.
+	autoCompactMtx sync.Mutex
+	autoCompact    bool
+
+	// Cancel a running compaction when a shutdown is initiated.
+	compactCancel context.CancelFunc
+}
+
+type dbMetrics struct {
+	loadedBlocks         prometheus.GaugeFunc
+	symbolTableSize      prometheus.GaugeFunc
+	reloads              prometheus.Counter
+	reloadsFailed        prometheus.Counter
+	compactionsFailed    prometheus.Counter
+	compactionsTriggered prometheus.Counter
+	compactionsSkipped   prometheus.Counter
+	sizeRetentionCount   prometheus.Counter
+	timeRetentionCount   prometheus.Counter
+	startTime            prometheus.GaugeFunc
+	tombCleanTimer       prometheus.Histogram
+	blocksBytes          prometheus.Gauge
+	maxBytes             prometheus.Gauge
+}
+
+func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
+	m := &dbMetrics{}
+
+	m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_blocks_loaded",
+		Help: "Number of currently loaded data blocks",
+	}, func() float64 {
+		db.mtx.RLock()
+		defer db.mtx.RUnlock()
+		return float64(len(db.blocks))
+	})
+	m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_symbol_table_size_bytes",
+		Help: "Size of symbol table on disk (in bytes)",
+	}, func() float64 {
+		db.mtx.RLock()
+		blocks := db.blocks[:]
+		db.mtx.RUnlock()
+		symTblSize := uint64(0)
+		for _, b := range blocks {
+			symTblSize += b.GetSymbolTableSize()
+		}
+		return float64(symTblSize)
+	})
+	m.reloads = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_reloads_total",
+		Help: "Number of times the database reloaded block data from disk.",
+	})
+	m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_reloads_failures_total",
+		Help: "Number of times the database failed to reload block data from disk.",
+	})
+	m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_compactions_triggered_total",
+		Help: "Total number of triggered compactions for the partition.",
+	})
+	m.compactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_compactions_failed_total",
+		Help: "Total number of compactions that failed for the partition.",
+	})
+	m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_time_retentions_total",
+		Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.",
+	})
+	m.compactionsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_compactions_skipped_total",
+		Help: "Total number of skipped compactions due to disabled auto compaction.",
+	})
+	m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_lowest_timestamp",
+		Help: "Lowest timestamp value stored in the database. The unit is decided by the library consumer.",
+	}, func() float64 {
+		db.mtx.RLock()
+		defer db.mtx.RUnlock()
+		if len(db.blocks) == 0 {
+			return float64(db.head.MinTime())
+		}
+		return float64(db.blocks[0].meta.MinTime)
+	})
+	m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Name: "prometheus_tsdb_tombstone_cleanup_seconds",
+		Help: "The time taken to recompact blocks to remove tombstones.",
+	})
+	m.blocksBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_storage_blocks_bytes",
+		Help: "The number of bytes that are currently used for local storage by all blocks.",
+	})
+	m.maxBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_retention_limit_bytes",
+		Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
+	})
+	m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_size_retentions_total",
+		Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
+	})
+
+	if r != nil {
+		r.MustRegister(
+			m.loadedBlocks,
+			m.symbolTableSize,
+			m.reloads,
+			m.reloadsFailed,
+			m.compactionsFailed,
+			m.compactionsTriggered,
+			m.compactionsSkipped,
+			m.sizeRetentionCount,
+			m.timeRetentionCount,
+			m.startTime,
+			m.tombCleanTimer,
+			m.blocksBytes,
+			m.maxBytes,
+		)
+	}
+	return m
+}
+
+// ErrClosed is returned when the db is closed.
+var ErrClosed = errors.New("db already closed")
+
+// DBReadOnly provides APIs for read only operations on a database.
+// Current implementation doesn't support concurrency so
+// all API calls should happen in the same go routine.
+type DBReadOnly struct {
+	logger  log.Logger
+	dir     string
+	closers []io.Closer
+	closed  chan struct{}
+}
+
+// OpenDBReadOnly opens DB in the given directory for read only operations.
+func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
+	if _, err := os.Stat(dir); err != nil {
+		return nil, errors.Wrap(err, "opening the db dir")
+	}
+
+	if l == nil {
+		l = log.NewNopLogger()
+	}
+
+	return &DBReadOnly{
+		logger: l,
+		dir:    dir,
+		closed: make(chan struct{}),
+	}, nil
+}
+
+// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL.
+// Samples that are in existing blocks will not be written to the new block.
+// Note that if the read only database is running concurrently with a
+// writable database then writing the WAL to the database directory can race.
+func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
+	blockReaders, err := db.Blocks()
+	if err != nil {
+		return errors.Wrap(err, "read blocks")
+	}
+	maxBlockTime := int64(math.MinInt64)
+	if len(blockReaders) > 0 {
+		maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime
+	}
+	w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
+	if err != nil {
+		return err
+	}
+	head, err := NewHead(nil, db.logger, w, 1, DefaultStripeSize)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		var merr tsdb_errors.MultiError
+		merr.Add(returnErr)
+		merr.Add(errors.Wrap(head.Close(), "closing Head"))
+		returnErr = merr.Err()
+	}()
+	// Set the min valid time for the ingested wal samples
+	// to be no lower than the maxt of the last block.
+	if err := head.Init(maxBlockTime); err != nil {
+		return errors.Wrap(err, "read WAL")
+	}
+	mint := head.MinTime()
+	maxt := head.MaxTime()
+	rh := &RangeHead{
+		head: head,
+		mint: mint,
+		maxt: maxt,
+	}
+	compactor, err := NewLeveledCompactor(
+		context.Background(),
+		nil,
+		db.logger,
+		ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
+		chunkenc.NewPool(),
+	)
+	if err != nil {
+		return errors.Wrap(err, "create leveled compactor")
+	}
+	// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
+	// Because of this block intervals are always +1 than the total samples it includes.
+	_, err = compactor.Write(dir, rh, mint, maxt+1, nil)
+	return errors.Wrap(err, "writing WAL")
+}
+
+// Querier loads the wal and returns a new querier over the data partition for the given time range.
+// Current implementation doesn't support multiple Queriers.
+func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
+	select {
+	case <-db.closed:
+		return nil, ErrClosed
+	default:
+	}
+	blockReaders, err := db.Blocks()
+	if err != nil {
+		return nil, err
+	}
+	blocks := make([]*Block, len(blockReaders))
+	for i, b := range blockReaders {
+		b, ok := b.(*Block)
+		if !ok {
+			return nil, errors.New("unable to convert a read only block to a normal block")
+		}
+		blocks[i] = b
+	}
+
+	head, err := NewHead(nil, db.logger, nil, 1, DefaultStripeSize)
+	if err != nil {
+		return nil, err
+	}
+	maxBlockTime := int64(math.MinInt64)
+	if len(blocks) > 0 {
+		maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
+	}
+
+	// Also add the WAL if the current blocks don't cover the requests time range.
+	if maxBlockTime <= maxt {
+		w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
+		if err != nil {
+			return nil, err
+		}
+		head, err = NewHead(nil, db.logger, w, 1, DefaultStripeSize)
+		if err != nil {
+			return nil, err
+		}
+		// Set the min valid time for the ingested wal samples
+		// to be no lower than the maxt of the last block.
+		if err := head.Init(maxBlockTime); err != nil {
+			return nil, errors.Wrap(err, "read WAL")
+		}
+		// Set the wal to nil to disable all wal operations.
+		// This is mainly to avoid blocking when closing the head.
+		head.wal = nil
+
+		db.closers = append(db.closers, head)
+	}
+
+	// TODO: Refactor so that it is possible to obtain a Querier without initializing a writable DB instance.
+	// Option 1: refactor DB to have the Querier implementation using the DBReadOnly.Querier implementation not the opposite.
+	// Option 2: refactor Querier to use another independent func which
+	// can than be used by a read only and writable db instances without any code duplication.
+	dbWritable := &DB{
+		dir:    db.dir,
+		logger: db.logger,
+		blocks: blocks,
+		head:   head,
+	}
+
+	return dbWritable.Querier(ctx, mint, maxt)
+}
+
+// Blocks returns a slice of block readers for persisted blocks.
+func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
+	select {
+	case <-db.closed:
+		return nil, ErrClosed
+	default:
+	}
+	loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
+	for _, block := range loadable {
+		for _, b := range block.Meta().Compaction.Parents {
+			delete(corrupted, b.ULID)
+		}
+	}
+	if len(corrupted) > 0 {
+		for _, b := range loadable {
+			if err := b.Close(); err != nil {
+				level.Warn(db.logger).Log("msg", "Closing a block", err)
+			}
+		}
+		return nil, errors.Errorf("unexpected corrupted block:%v", corrupted)
+	}
+
+	if len(loadable) == 0 {
+		return nil, nil
+	}
+
+	sort.Slice(loadable, func(i, j int) bool {
+		return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
+	})
+
+	blockMetas := make([]BlockMeta, 0, len(loadable))
+	for _, b := range loadable {
+		blockMetas = append(blockMetas, b.Meta())
+	}
+	if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
+		level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String())
+	}
+
+	// Close all previously open readers and add the new ones to the cache.
+	for _, closer := range db.closers {
+		closer.Close()
+	}
+
+	blockClosers := make([]io.Closer, len(loadable))
+	blockReaders := make([]BlockReader, len(loadable))
+	for i, b := range loadable {
+		blockClosers[i] = b
+		blockReaders[i] = b
+	}
+	db.closers = blockClosers
+
+	return blockReaders, nil
+}
+
+// Close all block readers.
+func (db *DBReadOnly) Close() error {
+	select {
+	case <-db.closed:
+		return ErrClosed
+	default:
+	}
+	close(db.closed)
+
+	var merr tsdb_errors.MultiError
+
+	for _, b := range db.closers {
+		merr.Add(b.Close())
+	}
+	return merr.Err()
+}
+
+// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
+func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db *DB, err error) {
+	var rngs []int64
+	opts, rngs = validateOpts(opts, nil)
+	return open(dir, l, r, opts, rngs)
+}
+
+func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
+	if opts == nil {
+		opts = DefaultOptions()
+	}
+	if opts.StripeSize <= 0 {
+		opts.StripeSize = DefaultStripeSize
+	}
+
+	if opts.MinBlockDuration <= 0 {
+		opts.MinBlockDuration = DefaultBlockDuration
+	}
+	if opts.MinBlockDuration > opts.MaxBlockDuration {
+		opts.MaxBlockDuration = opts.MinBlockDuration
+	}
+
+	if len(rngs) == 0 {
+		// Start with smallest block duration and create exponential buckets until the exceed the
+		// configured maximum block duration.
+		rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
+	}
+	return opts, rngs
+}
+
+func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (db *DB, err error) {
+	if err := os.MkdirAll(dir, 0777); err != nil {
+		return nil, err
+	}
+	if l == nil {
+		l = log.NewNopLogger()
+	}
+
+	for i, v := range rngs {
+		if v > opts.MaxBlockDuration {
+			rngs = rngs[:i]
+			break
+		}
+	}
+
+	// Fixup bad format written by Prometheus 2.1.
+	if err := repairBadIndexVersion(l, dir); err != nil {
+		return nil, err
+	}
+	// Migrate old WAL if one exists.
+	if err := MigrateWAL(l, filepath.Join(dir, "wal")); err != nil {
+		return nil, errors.Wrap(err, "migrate WAL")
+	}
+
+	db = &DB{
+		dir:         dir,
+		logger:      l,
+		opts:        opts,
+		compactc:    make(chan struct{}, 1),
+		donec:       make(chan struct{}),
+		stopc:       make(chan struct{}),
+		autoCompact: true,
+		chunkPool:   chunkenc.NewPool(),
+	}
+	db.metrics = newDBMetrics(db, r)
+
+	maxBytes := opts.MaxBytes
+	if maxBytes < 0 {
+		maxBytes = 0
+	}
+	db.metrics.maxBytes.Set(float64(maxBytes))
+
+	if !opts.NoLockfile {
+		absdir, err := filepath.Abs(dir)
+		if err != nil {
+			return nil, err
+		}
+		lockf, _, err := fileutil.Flock(filepath.Join(absdir, "lock"))
+		if err != nil {
+			return nil, errors.Wrap(err, "lock DB directory")
+		}
+		db.lockf = lockf
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	db.compactor, err = NewLeveledCompactor(ctx, r, l, rngs, db.chunkPool)
+	if err != nil {
+		cancel()
+		return nil, errors.Wrap(err, "create leveled compactor")
+	}
+	db.compactCancel = cancel
+
+	var wlog *wal.WAL
+	segmentSize := wal.DefaultSegmentSize
+	// Wal is enabled.
+	if opts.WALSegmentSize >= 0 {
+		// Wal is set to a custom size.
+		if opts.WALSegmentSize > 0 {
+			segmentSize = opts.WALSegmentSize
+		}
+		wlog, err = wal.NewSize(l, r, filepath.Join(dir, "wal"), segmentSize, opts.WALCompression)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	db.head, err = NewHead(r, l, wlog, rngs[0], opts.StripeSize)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := db.reload(); err != nil {
+		return nil, err
+	}
+	// Set the min valid time for the ingested samples
+	// to be no lower than the maxt of the last block.
+	blocks := db.Blocks()
+	minValidTime := int64(math.MinInt64)
+	if len(blocks) > 0 {
+		minValidTime = blocks[len(blocks)-1].Meta().MaxTime
+	}
+
+	if initErr := db.head.Init(minValidTime); initErr != nil {
+		db.head.metrics.walCorruptionsTotal.Inc()
+		level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
+		if err := wlog.Repair(initErr); err != nil {
+			return nil, errors.Wrap(err, "repair corrupted WAL")
+		}
+	}
+
+	go db.run()
+
+	return db, nil
+}
+
+// StartTime implements the Storage interface.
+func (db *DB) StartTime() (int64, error) {
+	db.mtx.RLock()
+	defer db.mtx.RUnlock()
+
+	if len(db.blocks) > 0 {
+		return db.blocks[0].Meta().MinTime, nil
+	}
+	return db.head.MinTime(), nil
+}
+
+// Dir returns the directory of the database.
+func (db *DB) Dir() string {
+	return db.dir
+}
+
+func (db *DB) run() {
+	defer close(db.donec)
+
+	backoff := time.Duration(0)
+
+	for {
+		select {
+		case <-db.stopc:
+			return
+		case <-time.After(backoff):
+		}
+
+		select {
+		case <-time.After(1 * time.Minute):
+			select {
+			case db.compactc <- struct{}{}:
+			default:
+			}
+		case <-db.compactc:
+			db.metrics.compactionsTriggered.Inc()
+
+			db.autoCompactMtx.Lock()
+			if db.autoCompact {
+				if err := db.Compact(); err != nil {
+					level.Error(db.logger).Log("msg", "compaction failed", "err", err)
+					backoff = exponential(backoff, 1*time.Second, 1*time.Minute)
+				} else {
+					backoff = 0
+				}
+			} else {
+				db.metrics.compactionsSkipped.Inc()
+			}
+			db.autoCompactMtx.Unlock()
+		case <-db.stopc:
+			return
+		}
+	}
+}
+
+// Appender opens a new appender against the database.
+func (db *DB) Appender() storage.Appender {
+	return dbAppender{db: db, Appender: db.head.Appender()}
+}
+
+// dbAppender wraps the DB's head appender and triggers compactions on commit
+// if necessary.
+type dbAppender struct {
+	storage.Appender
+	db *DB
+}
+
+func (a dbAppender) Commit() error {
+	err := a.Appender.Commit()
+
+	// We could just run this check every few minutes practically. But for benchmarks
+	// and high frequency use cases this is the safer way.
+	if a.db.head.compactable() {
+		select {
+		case a.db.compactc <- struct{}{}:
+		default:
+		}
+	}
+	return err
+}
+
+// Compact data if possible. After successful compaction blocks are reloaded
+// which will also trigger blocks to be deleted that fall out of the retention
+// window.
+// If no blocks are compacted, the retention window state doesn't change. Thus,
+// this is sufficient to reliably delete old data.
+// Old blocks are only deleted on reload based on the new block's parent information.
+// See DB.reload documentation for further information.
+func (db *DB) Compact() (err error) {
+	db.cmtx.Lock()
+	defer db.cmtx.Unlock()
+	defer func() {
+		if err != nil {
+			db.metrics.compactionsFailed.Inc()
+		}
+	}()
+	// Check whether we have pending head blocks that are ready to be persisted.
+	// They have the highest priority.
+	for {
+		select {
+		case <-db.stopc:
+			return nil
+		default:
+		}
+		if !db.head.compactable() {
+			break
+		}
+		mint := db.head.MinTime()
+		maxt := rangeForTimestamp(mint, db.head.chunkRange)
+
+		// Wrap head into a range that bounds all reads to it.
+		// We remove 1 millisecond from maxt because block
+		// intervals are half-open: [b.MinTime, b.MaxTime). But
+		// chunk intervals are closed: [c.MinTime, c.MaxTime];
+		// so in order to make sure that overlaps are evaluated
+		// consistently, we explicitly remove the last value
+		// from the block interval here.
+		head := NewRangeHead(db.head, mint, maxt-1)
+		if err := db.compactHead(head, mint, maxt); err != nil {
+			return err
+		}
+	}
+
+	return db.compactBlocks()
+}
+
+// CompactHead compacts the given the RangeHead.
+func (db *DB) CompactHead(head *RangeHead, mint, maxt int64) (err error) {
+	db.cmtx.Lock()
+	defer db.cmtx.Unlock()
+
+	return db.compactHead(head, mint, maxt)
+}
+
+// compactHead compacts the given the RangeHead.
+// The compaction mutex should be held before calling this method.
+func (db *DB) compactHead(head *RangeHead, mint, maxt int64) (err error) {
+	uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil)
+	if err != nil {
+		return errors.Wrap(err, "persist head block")
+	}
+
+	runtime.GC()
+
+	if err := db.reload(); err != nil {
+		if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
+			return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid)
+		}
+		return errors.Wrap(err, "reload blocks")
+	}
+	if (uid == ulid.ULID{}) {
+		// Compaction resulted in an empty block.
+		// Head truncating during db.reload() depends on the persisted blocks and
+		// in this case no new block will be persisted so manually truncate the head.
+		if err = db.head.Truncate(maxt); err != nil {
+			return errors.Wrap(err, "head truncate failed (in compact)")
+		}
+	}
+	runtime.GC()
+
+	return nil
+}
+
+// compactBlocks compacts all the eligible on-disk blocks.
+// The compaction mutex should be held before calling this method.
+func (db *DB) compactBlocks() (err error) {
+	// Check for compactions of multiple blocks.
+	for {
+		plan, err := db.compactor.Plan(db.dir)
+		if err != nil {
+			return errors.Wrap(err, "plan compaction")
+		}
+		if len(plan) == 0 {
+			break
+		}
+
+		select {
+		case <-db.stopc:
+			return nil
+		default:
+		}
+
+		uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
+		if err != nil {
+			return errors.Wrapf(err, "compact %s", plan)
+		}
+		runtime.GC()
+
+		if err := db.reload(); err != nil {
+			if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
+				return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid)
+			}
+			return errors.Wrap(err, "reload blocks")
+		}
+		runtime.GC()
+	}
+
+	return nil
+}
+
+// getBlock iterates a given block range to find a block by a given id.
+// If found it returns the block itself and a boolean to indicate that it was found.
+func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) {
+	for _, b := range allBlocks {
+		if b.Meta().ULID == id {
+			return b, true
+		}
+	}
+	return nil, false
+}
+
+// reload blocks and trigger head truncation if new blocks appeared.
+// Blocks that are obsolete due to replacement or retention will be deleted.
+func (db *DB) reload() (err error) {
+	defer func() {
+		if err != nil {
+			db.metrics.reloadsFailed.Inc()
+		}
+		db.metrics.reloads.Inc()
+	}()
+
+	loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool)
+	if err != nil {
+		return err
+	}
+
+	deletable := db.deletableBlocks(loadable)
+
+	// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
+	// This makes it resilient against the process crashing towards the end of a compaction.
+	// Creation of a new block and deletion of its parents cannot happen atomically.
+	// By creating blocks with their parents, we can pick up the deletion where it left off during a crash.
+	for _, block := range loadable {
+		for _, b := range block.Meta().Compaction.Parents {
+			delete(corrupted, b.ULID)
+			deletable[b.ULID] = nil
+		}
+	}
+	if len(corrupted) > 0 {
+		// Close all new blocks to release the lock for windows.
+		for _, block := range loadable {
+			if _, open := getBlock(db.blocks, block.Meta().ULID); !open {
+				block.Close()
+			}
+		}
+		return fmt.Errorf("unexpected corrupted block:%v", corrupted)
+	}
+
+	// All deletable blocks should not be loaded.
+	var (
+		bb         []*Block
+		blocksSize int64
+	)
+	for _, block := range loadable {
+		if _, ok := deletable[block.Meta().ULID]; ok {
+			deletable[block.Meta().ULID] = block
+			continue
+		}
+		bb = append(bb, block)
+		blocksSize += block.Size()
+
+	}
+	loadable = bb
+	db.metrics.blocksBytes.Set(float64(blocksSize))
+
+	sort.Slice(loadable, func(i, j int) bool {
+		return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
+	})
+	if !db.opts.AllowOverlappingBlocks {
+		if err := validateBlockSequence(loadable); err != nil {
+			return errors.Wrap(err, "invalid block sequence")
+		}
+	}
+
+	// Swap new blocks first for subsequently created readers to be seen.
+	db.mtx.Lock()
+	oldBlocks := db.blocks
+	db.blocks = loadable
+	db.mtx.Unlock()
+
+	blockMetas := make([]BlockMeta, 0, len(loadable))
+	for _, b := range loadable {
+		blockMetas = append(blockMetas, b.Meta())
+	}
+	if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
+		level.Warn(db.logger).Log("msg", "Overlapping blocks found during reload", "detail", overlaps.String())
+	}
+
+	for _, b := range oldBlocks {
+		if _, ok := deletable[b.Meta().ULID]; ok {
+			deletable[b.Meta().ULID] = b
+		}
+	}
+
+	if err := db.deleteBlocks(deletable); err != nil {
+		return err
+	}
+
+	// Garbage collect data in the head if the most recent persisted block
+	// covers data of its current time range.
+	if len(loadable) == 0 {
+		return nil
+	}
+
+	maxt := loadable[len(loadable)-1].Meta().MaxTime
+
+	return errors.Wrap(db.head.Truncate(maxt), "head truncate failed")
+}
+
+func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) {
+	bDirs, err := blockDirs(dir)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "find blocks")
+	}
+
+	corrupted = make(map[ulid.ULID]error)
+	for _, bDir := range bDirs {
+		meta, _, err := readMetaFile(bDir)
+		if err != nil {
+			level.Error(l).Log("msg", "failed to read meta.json for a block", "dir", bDir, "err", err)
+			continue
+		}
+
+		// See if we already have the block in memory or open it otherwise.
+		block, open := getBlock(loaded, meta.ULID)
+		if !open {
+			block, err = OpenBlock(l, bDir, chunkPool)
+			if err != nil {
+				corrupted[meta.ULID] = err
+				continue
+			}
+		}
+		blocks = append(blocks, block)
+	}
+	return blocks, corrupted, nil
+}
+
+// deletableBlocks returns all blocks past retention policy.
+func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
+	deletable := make(map[ulid.ULID]*Block)
+
+	// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
+	// This ensures that the retentions will remove the oldest  blocks.
+	sort.Slice(blocks, func(i, j int) bool {
+		return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
+	})
+
+	for _, block := range blocks {
+		if block.Meta().Compaction.Deletable {
+			deletable[block.Meta().ULID] = block
+		}
+	}
+
+	for ulid, block := range db.beyondTimeRetention(blocks) {
+		deletable[ulid] = block
+	}
+
+	for ulid, block := range db.beyondSizeRetention(blocks) {
+		deletable[ulid] = block
+	}
+
+	return deletable
+}
+
+func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
+	// Time retention is disabled or no blocks to work with.
+	if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 {
+		return
+	}
+
+	deletable = make(map[ulid.ULID]*Block)
+	for i, block := range blocks {
+		// The difference between the first block and this block is larger than
+		// the retention period so any blocks after that are added as deletable.
+		if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration {
+			for _, b := range blocks[i:] {
+				deletable[b.meta.ULID] = b
+			}
+			db.metrics.timeRetentionCount.Inc()
+			break
+		}
+	}
+	return deletable
+}
+
+func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
+	// Size retention is disabled or no blocks to work with.
+	if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 {
+		return
+	}
+
+	deletable = make(map[ulid.ULID]*Block)
+
+	walSize, _ := db.Head().wal.Size()
+	// Initializing size counter with WAL size,
+	// as that is part of the retention strategy.
+	blocksSize := walSize
+	for i, block := range blocks {
+		blocksSize += block.Size()
+		if blocksSize > int64(db.opts.MaxBytes) {
+			// Add this and all following blocks for deletion.
+			for _, b := range blocks[i:] {
+				deletable[b.meta.ULID] = b
+			}
+			db.metrics.sizeRetentionCount.Inc()
+			break
+		}
+	}
+	return deletable
+}
+
+// deleteBlocks closes and deletes blocks from the disk.
+// When the map contains a non nil block object it means it is loaded in memory
+// so needs to be closed first as it might need to wait for pending readers to complete.
+func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
+	for ulid, block := range blocks {
+		if block != nil {
+			if err := block.Close(); err != nil {
+				level.Warn(db.logger).Log("msg", "Closing block failed", "err", err)
+			}
+		}
+		if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
+			return errors.Wrapf(err, "delete obsolete block %s", ulid)
+		}
+	}
+	return nil
+}
+
+// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence.
+func validateBlockSequence(bs []*Block) error {
+	if len(bs) <= 1 {
+		return nil
+	}
+
+	var metas []BlockMeta
+	for _, b := range bs {
+		metas = append(metas, b.meta)
+	}
+
+	overlaps := OverlappingBlocks(metas)
+	if len(overlaps) > 0 {
+		return errors.Errorf("block time ranges overlap: %s", overlaps)
+	}
+
+	return nil
+}
+
+// TimeRange specifies minTime and maxTime range.
+type TimeRange struct {
+	Min, Max int64
+}
+
+// Overlaps contains overlapping blocks aggregated by overlapping range.
+type Overlaps map[TimeRange][]BlockMeta
+
+// String returns human readable string form of overlapped blocks.
+func (o Overlaps) String() string {
+	var res []string
+	for r, overlaps := range o {
+		var groups []string
+		for _, m := range overlaps {
+			groups = append(groups, fmt.Sprintf(
+				"",
+				m.ULID.String(),
+				m.MinTime,
+				m.MaxTime,
+				(time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(),
+			))
+		}
+		res = append(res, fmt.Sprintf(
+			"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s",
+			r.Min, r.Max,
+			(time.Duration((r.Max-r.Min)/1000)*time.Second).String(),
+			len(overlaps),
+			strings.Join(groups, ", ")),
+		)
+	}
+	return strings.Join(res, "\n")
+}
+
+// OverlappingBlocks returns all overlapping blocks from given meta files.
+func OverlappingBlocks(bm []BlockMeta) Overlaps {
+	if len(bm) <= 1 {
+		return nil
+	}
+	var (
+		overlaps [][]BlockMeta
+
+		// pending contains not ended blocks in regards to "current" timestamp.
+		pending = []BlockMeta{bm[0]}
+		// continuousPending helps to aggregate same overlaps to single group.
+		continuousPending = true
+	)
+
+	// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
+	// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
+	// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
+	for _, b := range bm[1:] {
+		var newPending []BlockMeta
+
+		for _, p := range pending {
+			// "b.MinTime" is our current time.
+			if b.MinTime >= p.MaxTime {
+				continuousPending = false
+				continue
+			}
+
+			// "p" overlaps with "b" and "p" is still pending.
+			newPending = append(newPending, p)
+		}
+
+		// Our block "b" is now pending.
+		pending = append(newPending, b)
+		if len(newPending) == 0 {
+			// No overlaps.
+			continue
+		}
+
+		if continuousPending && len(overlaps) > 0 {
+			overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b)
+			continue
+		}
+		overlaps = append(overlaps, append(newPending, b))
+		// Start new pendings.
+		continuousPending = true
+	}
+
+	// Fetch the critical overlapped time range foreach overlap groups.
+	overlapGroups := Overlaps{}
+	for _, overlap := range overlaps {
+
+		minRange := TimeRange{Min: 0, Max: math.MaxInt64}
+		for _, b := range overlap {
+			if minRange.Max > b.MaxTime {
+				minRange.Max = b.MaxTime
+			}
+
+			if minRange.Min < b.MinTime {
+				minRange.Min = b.MinTime
+			}
+		}
+		overlapGroups[minRange] = overlap
+	}
+
+	return overlapGroups
+}
+
+func (db *DB) String() string {
+	return "HEAD"
+}
+
+// Blocks returns the databases persisted blocks.
+func (db *DB) Blocks() []*Block {
+	db.mtx.RLock()
+	defer db.mtx.RUnlock()
+
+	return db.blocks
+}
+
+// Head returns the databases's head.
+func (db *DB) Head() *Head {
+	return db.head
+}
+
+// Close the partition.
+func (db *DB) Close() error {
+	close(db.stopc)
+	db.compactCancel()
+	<-db.donec
+
+	db.mtx.Lock()
+	defer db.mtx.Unlock()
+
+	var g errgroup.Group
+
+	// blocks also contains all head blocks.
+	for _, pb := range db.blocks {
+		g.Go(pb.Close)
+	}
+
+	var merr tsdb_errors.MultiError
+
+	merr.Add(g.Wait())
+
+	if db.lockf != nil {
+		merr.Add(db.lockf.Release())
+	}
+	merr.Add(db.head.Close())
+	return merr.Err()
+}
+
+// DisableCompactions disables auto compactions.
+func (db *DB) DisableCompactions() {
+	db.autoCompactMtx.Lock()
+	defer db.autoCompactMtx.Unlock()
+
+	db.autoCompact = false
+	level.Info(db.logger).Log("msg", "Compactions disabled")
+}
+
+// EnableCompactions enables auto compactions.
+func (db *DB) EnableCompactions() {
+	db.autoCompactMtx.Lock()
+	defer db.autoCompactMtx.Unlock()
+
+	db.autoCompact = true
+	level.Info(db.logger).Log("msg", "Compactions enabled")
+}
+
+// Snapshot writes the current data to the directory. If withHead is set to true it
+// will create a new block containing all data that's currently in the memory buffer/WAL.
+func (db *DB) Snapshot(dir string, withHead bool) error {
+	if dir == db.dir {
+		return errors.Errorf("cannot snapshot into base directory")
+	}
+	if _, err := ulid.ParseStrict(dir); err == nil {
+		return errors.Errorf("dir must not be a valid ULID")
+	}
+
+	db.cmtx.Lock()
+	defer db.cmtx.Unlock()
+
+	db.mtx.RLock()
+	defer db.mtx.RUnlock()
+
+	for _, b := range db.blocks {
+		level.Info(db.logger).Log("msg", "Snapshotting block", "block", b)
+
+		if err := b.Snapshot(dir); err != nil {
+			return errors.Wrapf(err, "error snapshotting block: %s", b.Dir())
+		}
+	}
+	if !withHead {
+		return nil
+	}
+
+	mint := db.head.MinTime()
+	maxt := db.head.MaxTime()
+	head := &RangeHead{
+		head: db.head,
+		mint: mint,
+		maxt: maxt,
+	}
+	// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
+	// Because of this block intervals are always +1 than the total samples it includes.
+	if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
+		return errors.Wrap(err, "snapshot head block")
+	}
+	return nil
+}
+
+// Querier returns a new querier over the data partition for the given time range.
+// A goroutine must not handle more than one open Querier.
+func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
+	var blocks []BlockReader
+	var blockMetas []BlockMeta
+
+	db.mtx.RLock()
+	defer db.mtx.RUnlock()
+
+	for _, b := range db.blocks {
+		if b.OverlapsClosedInterval(mint, maxt) {
+			blocks = append(blocks, b)
+			blockMetas = append(blockMetas, b.Meta())
+		}
+	}
+	if maxt >= db.head.MinTime() {
+		blocks = append(blocks, &RangeHead{
+			head: db.head,
+			mint: mint,
+			maxt: maxt,
+		})
+	}
+
+	blockQueriers := make([]storage.Querier, 0, len(blocks))
+	for _, b := range blocks {
+		q, err := NewBlockQuerier(b, mint, maxt)
+		if err == nil {
+			blockQueriers = append(blockQueriers, q)
+			continue
+		}
+		// If we fail, all previously opened queriers must be closed.
+		for _, q := range blockQueriers {
+			q.Close()
+		}
+		return nil, errors.Wrapf(err, "open querier for block %s", b)
+	}
+
+	if len(OverlappingBlocks(blockMetas)) > 0 {
+		return &verticalQuerier{
+			querier: querier{
+				blocks: blockQueriers,
+			},
+		}, nil
+	}
+
+	return &querier{
+		blocks: blockQueriers,
+	}, nil
+}
+
+func rangeForTimestamp(t int64, width int64) (maxt int64) {
+	return (t/width)*width + width
+}
+
+// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
+func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
+	db.cmtx.Lock()
+	defer db.cmtx.Unlock()
+
+	var g errgroup.Group
+
+	db.mtx.RLock()
+	defer db.mtx.RUnlock()
+
+	for _, b := range db.blocks {
+		if b.OverlapsClosedInterval(mint, maxt) {
+			g.Go(func(b *Block) func() error {
+				return func() error { return b.Delete(mint, maxt, ms...) }
+			}(b))
+		}
+	}
+	g.Go(func() error {
+		return db.head.Delete(mint, maxt, ms...)
+	})
+	return g.Wait()
+}
+
+// CleanTombstones re-writes any blocks with tombstones.
+func (db *DB) CleanTombstones() (err error) {
+	db.cmtx.Lock()
+	defer db.cmtx.Unlock()
+
+	start := time.Now()
+	defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
+
+	newUIDs := []ulid.ULID{}
+	defer func() {
+		// If any error is caused, we need to delete all the new directory created.
+		if err != nil {
+			for _, uid := range newUIDs {
+				dir := filepath.Join(db.Dir(), uid.String())
+				if err := os.RemoveAll(dir); err != nil {
+					level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
+				}
+			}
+		}
+	}()
+
+	db.mtx.RLock()
+	blocks := db.blocks[:]
+	db.mtx.RUnlock()
+
+	for _, b := range blocks {
+		if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
+			err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
+			return err
+		} else if uid != nil { // New block was created.
+			newUIDs = append(newUIDs, *uid)
+		}
+	}
+	return errors.Wrap(db.reload(), "reload blocks")
+}
+
+func isBlockDir(fi os.FileInfo) bool {
+	if !fi.IsDir() {
+		return false
+	}
+	_, err := ulid.ParseStrict(fi.Name())
+	return err == nil
+}
+
+func blockDirs(dir string) ([]string, error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	var dirs []string
+
+	for _, fi := range files {
+		if isBlockDir(fi) {
+			dirs = append(dirs, filepath.Join(dir, fi.Name()))
+		}
+	}
+	return dirs, nil
+}
+
+func sequenceFiles(dir string) ([]string, error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	var res []string
+
+	for _, fi := range files {
+		if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
+			continue
+		}
+		res = append(res, filepath.Join(dir, fi.Name()))
+	}
+	return res, nil
+}
+
+func nextSequenceFile(dir string) (string, int, error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return "", 0, err
+	}
+
+	i := uint64(0)
+	for _, f := range files {
+		j, err := strconv.ParseUint(f.Name(), 10, 64)
+		if err != nil {
+			continue
+		}
+		i = j
+	}
+	return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
+}
+
+func closeAll(cs []io.Closer) error {
+	var merr tsdb_errors.MultiError
+
+	for _, c := range cs {
+		merr.Add(c.Close())
+	}
+	return merr.Err()
+}
+
+func exponential(d, min, max time.Duration) time.Duration {
+	d *= 2
+	if d < min {
+		d = min
+	}
+	if d > max {
+		d = max
+	}
+	return d
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/goversion/goversion.go b/vendor/github.com/prometheus/prometheus/tsdb/goversion/goversion.go
new file mode 100644
index 0000000000000000000000000000000000000000..93ff6ef8b9f94fd0246c1d4985737ecd7759d7b7
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/goversion/goversion.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.12
+
+// Package goversion enforces the go version supported by the tsdb module.
+package goversion
+
+const _SoftwareRequiresGOVERSION1_12 = uint8(0)
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/goversion/init.go b/vendor/github.com/prometheus/prometheus/tsdb/goversion/init.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd15e1f7afdadf649f59ce22a521ca707971cb05
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/goversion/init.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package goversion
+
+// This will fail to compile if the Go runtime version isn't >= 1.12.
+var _ = _SoftwareRequiresGOVERSION1_12
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b374fd5d4bd1af2c9ec13dcf602629b56e62350
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go
@@ -0,0 +1,2054 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/oklog/ulid"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/chunks"
+	"github.com/prometheus/prometheus/tsdb/index"
+	"github.com/prometheus/prometheus/tsdb/record"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+	"github.com/prometheus/prometheus/tsdb/wal"
+)
+
+var (
+	// ErrInvalidSample is returned if an appended sample is not valid and can't
+	// be ingested.
+	ErrInvalidSample = errors.New("invalid sample")
+)
+
+// Head handles reads and writes of time series data within a time window.
+type Head struct {
+	// Keep all 64bit atomically accessed variables at the top of this struct.
+	// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more info.
+	chunkRange       int64
+	numSeries        uint64
+	minTime, maxTime int64 // Current min and max of the samples included in the head.
+	minValidTime     int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block.
+	lastSeriesID     uint64
+
+	metrics    *headMetrics
+	wal        *wal.WAL
+	logger     log.Logger
+	appendPool sync.Pool
+	seriesPool sync.Pool
+	bytesPool  sync.Pool
+
+	// All series addressable by their ID or hash.
+	series *stripeSeries
+
+	symMtx  sync.RWMutex
+	symbols map[string]struct{}
+	values  map[string]stringset // Label names to possible values.
+
+	deletedMtx sync.Mutex
+	deleted    map[uint64]int // Deleted series, and what WAL segment they must be kept until.
+
+	postings *index.MemPostings // Postings lists for terms.
+
+	tombstones *tombstones.MemTombstones
+
+	iso *isolation
+
+	cardinalityMutex      sync.Mutex
+	cardinalityCache      *index.PostingsStats // Posting stats cache which will expire after 30sec.
+	lastPostingsStatsCall time.Duration        // Last posting stats call (PostingsCardinalityStats()) time for caching.
+}
+
+type headMetrics struct {
+	activeAppenders         prometheus.Gauge
+	series                  prometheus.GaugeFunc
+	seriesCreated           prometheus.Counter
+	seriesRemoved           prometheus.Counter
+	seriesNotFound          prometheus.Counter
+	chunks                  prometheus.Gauge
+	chunksCreated           prometheus.Counter
+	chunksRemoved           prometheus.Counter
+	gcDuration              prometheus.Summary
+	samplesAppended         prometheus.Counter
+	walTruncateDuration     prometheus.Summary
+	walCorruptionsTotal     prometheus.Counter
+	headTruncateFail        prometheus.Counter
+	headTruncateTotal       prometheus.Counter
+	checkpointDeleteFail    prometheus.Counter
+	checkpointDeleteTotal   prometheus.Counter
+	checkpointCreationFail  prometheus.Counter
+	checkpointCreationTotal prometheus.Counter
+}
+
+func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
+	m := &headMetrics{
+		activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "prometheus_tsdb_head_active_appenders",
+			Help: "Number of currently active appender transactions",
+		}),
+		series: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+			Name: "prometheus_tsdb_head_series",
+			Help: "Total number of series in the head block.",
+		}, func() float64 {
+			return float64(h.NumSeries())
+		}),
+		seriesCreated: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_series_created_total",
+			Help: "Total number of series created in the head",
+		}),
+		seriesRemoved: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_series_removed_total",
+			Help: "Total number of series removed in the head",
+		}),
+		seriesNotFound: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_series_not_found_total",
+			Help: "Total number of requests for series that were not found.",
+		}),
+		chunks: prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "prometheus_tsdb_head_chunks",
+			Help: "Total number of chunks in the head block.",
+		}),
+		chunksCreated: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_chunks_created_total",
+			Help: "Total number of chunks created in the head",
+		}),
+		chunksRemoved: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_chunks_removed_total",
+			Help: "Total number of chunks removed in the head",
+		}),
+		gcDuration: prometheus.NewSummary(prometheus.SummaryOpts{
+			Name: "prometheus_tsdb_head_gc_duration_seconds",
+			Help: "Runtime of garbage collection in the head block.",
+		}),
+		walTruncateDuration: prometheus.NewSummary(prometheus.SummaryOpts{
+			Name: "prometheus_tsdb_wal_truncate_duration_seconds",
+			Help: "Duration of WAL truncation.",
+		}),
+		walCorruptionsTotal: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_wal_corruptions_total",
+			Help: "Total number of WAL corruptions.",
+		}),
+		samplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_samples_appended_total",
+			Help: "Total number of appended samples.",
+		}),
+		headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_truncations_failed_total",
+			Help: "Total number of head truncations that failed.",
+		}),
+		headTruncateTotal: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_head_truncations_total",
+			Help: "Total number of head truncations attempted.",
+		}),
+		checkpointDeleteFail: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_checkpoint_deletions_failed_total",
+			Help: "Total number of checkpoint deletions that failed.",
+		}),
+		checkpointDeleteTotal: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_checkpoint_deletions_total",
+			Help: "Total number of checkpoint deletions attempted.",
+		}),
+		checkpointCreationFail: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_checkpoint_creations_failed_total",
+			Help: "Total number of checkpoint creations that failed.",
+		}),
+		checkpointCreationTotal: prometheus.NewCounter(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_checkpoint_creations_total",
+			Help: "Total number of checkpoint creations attempted.",
+		}),
+	}
+
+	if r != nil {
+		r.MustRegister(
+			m.activeAppenders,
+			m.series,
+			m.chunks,
+			m.chunksCreated,
+			m.chunksRemoved,
+			m.seriesCreated,
+			m.seriesRemoved,
+			m.seriesNotFound,
+			m.gcDuration,
+			m.walTruncateDuration,
+			m.walCorruptionsTotal,
+			m.samplesAppended,
+			m.headTruncateFail,
+			m.headTruncateTotal,
+			m.checkpointDeleteFail,
+			m.checkpointDeleteTotal,
+			m.checkpointCreationFail,
+			m.checkpointCreationTotal,
+			// Metrics bound to functions and not needed in tests
+			// can be created and registered on the spot.
+			prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+				Name: "prometheus_tsdb_head_max_time",
+				Help: "Maximum timestamp of the head block. The unit is decided by the library consumer.",
+			}, func() float64 {
+				return float64(h.MaxTime())
+			}),
+			prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+				Name: "prometheus_tsdb_head_min_time",
+				Help: "Minimum time bound of the head block. The unit is decided by the library consumer.",
+			}, func() float64 {
+				return float64(h.MinTime())
+			}),
+			prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+				Name: "prometheus_tsdb_isolation_low_watermark",
+				Help: "The lowest TSDB append ID that is still referenced.",
+			}, func() float64 {
+				return float64(h.iso.lowWatermark())
+			}),
+			prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+				Name: "prometheus_tsdb_isolation_high_watermark",
+				Help: "The highest TSDB append ID that has been given out.",
+			}, func() float64 {
+				h.iso.appendMtx.Lock()
+				defer h.iso.appendMtx.Unlock()
+				return float64(h.iso.lastAppendID)
+			}),
+		)
+	}
+	return m
+}
+
+const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
+
+// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
+func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
+	h.cardinalityMutex.Lock()
+	defer h.cardinalityMutex.Unlock()
+	currentTime := time.Duration(time.Now().Unix()) * time.Second
+	seconds := currentTime - h.lastPostingsStatsCall
+	if seconds > cardinalityCacheExpirationTime {
+		h.cardinalityCache = nil
+	}
+	if h.cardinalityCache != nil {
+		return h.cardinalityCache
+	}
+	h.cardinalityCache = h.postings.Stats(statsByLabelName)
+	h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
+
+	return h.cardinalityCache
+}
+
+// NewHead opens the head block in dir.
+// stripeSize sets the number of entries in the hash map, it must be a power of 2.
+// A larger stripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
+// A smaller stripeSize reduces the memory allocated, but can decrease performance with large number of series.
+func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int64, stripeSize int) (*Head, error) {
+	if l == nil {
+		l = log.NewNopLogger()
+	}
+	if chunkRange < 1 {
+		return nil, errors.Errorf("invalid chunk range %d", chunkRange)
+	}
+	h := &Head{
+		wal:        wal,
+		logger:     l,
+		chunkRange: chunkRange,
+		minTime:    math.MaxInt64,
+		maxTime:    math.MinInt64,
+		series:     newStripeSeries(stripeSize),
+		values:     map[string]stringset{},
+		symbols:    map[string]struct{}{},
+		postings:   index.NewUnorderedMemPostings(),
+		tombstones: tombstones.NewMemTombstones(),
+		iso:        newIsolation(),
+		deleted:    map[uint64]int{},
+	}
+	h.metrics = newHeadMetrics(h, r)
+
+	return h, nil
+}
+
+// processWALSamples adds a partition of samples it receives to the head and passes
+// them on to other workers.
+// Samples before the mint timestamp are discarded.
+func (h *Head) processWALSamples(
+	minValidTime int64,
+	input <-chan []record.RefSample, output chan<- []record.RefSample,
+) (unknownRefs uint64) {
+	defer close(output)
+
+	// Mitigate lock contention in getByID.
+	refSeries := map[uint64]*memSeries{}
+
+	mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
+
+	for samples := range input {
+		for _, s := range samples {
+			if s.T < minValidTime {
+				continue
+			}
+			ms := refSeries[s.Ref]
+			if ms == nil {
+				ms = h.series.getByID(s.Ref)
+				if ms == nil {
+					unknownRefs++
+					continue
+				}
+				refSeries[s.Ref] = ms
+			}
+			if _, chunkCreated := ms.append(s.T, s.V, 0); chunkCreated {
+				h.metrics.chunksCreated.Inc()
+				h.metrics.chunks.Inc()
+			}
+			if s.T > maxt {
+				maxt = s.T
+			}
+			if s.T < mint {
+				mint = s.T
+			}
+		}
+		output <- samples
+	}
+	h.updateMinMaxTime(mint, maxt)
+
+	return unknownRefs
+}
+
+func (h *Head) updateMinMaxTime(mint, maxt int64) {
+	for {
+		lt := h.MinTime()
+		if mint >= lt {
+			break
+		}
+		if atomic.CompareAndSwapInt64(&h.minTime, lt, mint) {
+			break
+		}
+	}
+	for {
+		ht := h.MaxTime()
+		if maxt <= ht {
+			break
+		}
+		if atomic.CompareAndSwapInt64(&h.maxTime, ht, maxt) {
+			break
+		}
+	}
+}
+
+func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) {
+	// Track number of samples that referenced a series we don't know about
+	// for error reporting.
+	var unknownRefs uint64
+
+	// Start workers that each process samples for a partition of the series ID space.
+	// They are connected through a ring of channels which ensures that all sample batches
+	// read from the WAL are processed in order.
+	var (
+		wg      sync.WaitGroup
+		n       = runtime.GOMAXPROCS(0)
+		inputs  = make([]chan []record.RefSample, n)
+		outputs = make([]chan []record.RefSample, n)
+	)
+	wg.Add(n)
+
+	defer func() {
+		// For CorruptionErr ensure to terminate all workers before exiting.
+		if _, ok := err.(*wal.CorruptionErr); ok {
+			for i := 0; i < n; i++ {
+				close(inputs[i])
+				for range outputs[i] {
+				}
+			}
+			wg.Wait()
+		}
+	}()
+
+	for i := 0; i < n; i++ {
+		outputs[i] = make(chan []record.RefSample, 300)
+		inputs[i] = make(chan []record.RefSample, 300)
+
+		go func(input <-chan []record.RefSample, output chan<- []record.RefSample) {
+			unknown := h.processWALSamples(h.minValidTime, input, output)
+			atomic.AddUint64(&unknownRefs, unknown)
+			wg.Done()
+		}(inputs[i], outputs[i])
+	}
+
+	var (
+		dec    record.Decoder
+		shards = make([][]record.RefSample, n)
+	)
+
+	var (
+		decoded    = make(chan interface{}, 10)
+		errCh      = make(chan error, 1)
+		seriesPool = sync.Pool{
+			New: func() interface{} {
+				return []record.RefSeries{}
+			},
+		}
+		samplesPool = sync.Pool{
+			New: func() interface{} {
+				return []record.RefSample{}
+			},
+		}
+		tstonesPool = sync.Pool{
+			New: func() interface{} {
+				return []tombstones.Stone{}
+			},
+		}
+	)
+	go func() {
+		defer close(decoded)
+		for r.Next() {
+			rec := r.Record()
+			switch dec.Type(rec) {
+			case record.Series:
+				series := seriesPool.Get().([]record.RefSeries)[:0]
+				series, err = dec.Series(rec, series)
+				if err != nil {
+					errCh <- &wal.CorruptionErr{
+						Err:     errors.Wrap(err, "decode series"),
+						Segment: r.Segment(),
+						Offset:  r.Offset(),
+					}
+					return
+				}
+				decoded <- series
+			case record.Samples:
+				samples := samplesPool.Get().([]record.RefSample)[:0]
+				samples, err = dec.Samples(rec, samples)
+				if err != nil {
+					errCh <- &wal.CorruptionErr{
+						Err:     errors.Wrap(err, "decode samples"),
+						Segment: r.Segment(),
+						Offset:  r.Offset(),
+					}
+					return
+				}
+				decoded <- samples
+			case record.Tombstones:
+				tstones := tstonesPool.Get().([]tombstones.Stone)[:0]
+				tstones, err = dec.Tombstones(rec, tstones)
+				if err != nil {
+					errCh <- &wal.CorruptionErr{
+						Err:     errors.Wrap(err, "decode tombstones"),
+						Segment: r.Segment(),
+						Offset:  r.Offset(),
+					}
+					return
+				}
+				decoded <- tstones
+			default:
+				errCh <- &wal.CorruptionErr{
+					Err:     errors.Errorf("invalid record type %v", dec.Type(rec)),
+					Segment: r.Segment(),
+					Offset:  r.Offset(),
+				}
+				return
+			}
+		}
+	}()
+
+	for d := range decoded {
+		switch v := d.(type) {
+		case []record.RefSeries:
+			for _, s := range v {
+				series, created := h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
+
+				if !created {
+					// There's already a different ref for this series.
+					multiRef[s.Ref] = series.ref
+				}
+
+				if h.lastSeriesID < s.Ref {
+					h.lastSeriesID = s.Ref
+				}
+			}
+			//lint:ignore SA6002 relax staticcheck verification.
+			seriesPool.Put(v)
+		case []record.RefSample:
+			samples := v
+			// We split up the samples into chunks of 5000 samples or less.
+			// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
+			// cause thousands of very large in flight buffers occupying large amounts
+			// of unused memory.
+			for len(samples) > 0 {
+				m := 5000
+				if len(samples) < m {
+					m = len(samples)
+				}
+				for i := 0; i < n; i++ {
+					var buf []record.RefSample
+					select {
+					case buf = <-outputs[i]:
+					default:
+					}
+					shards[i] = buf[:0]
+				}
+				for _, sam := range samples[:m] {
+					if r, ok := multiRef[sam.Ref]; ok {
+						sam.Ref = r
+					}
+					mod := sam.Ref % uint64(n)
+					shards[mod] = append(shards[mod], sam)
+				}
+				for i := 0; i < n; i++ {
+					inputs[i] <- shards[i]
+				}
+				samples = samples[m:]
+			}
+			//lint:ignore SA6002 relax staticcheck verification.
+			samplesPool.Put(v)
+		case []tombstones.Stone:
+			for _, s := range v {
+				for _, itv := range s.Intervals {
+					if itv.Maxt < h.minValidTime {
+						continue
+					}
+					if m := h.series.getByID(s.Ref); m == nil {
+						unknownRefs++
+						continue
+					}
+					h.tombstones.AddInterval(s.Ref, itv)
+				}
+			}
+			//lint:ignore SA6002 relax staticcheck verification.
+			tstonesPool.Put(v)
+		default:
+			panic(fmt.Errorf("unexpected decoded type: %T", d))
+		}
+	}
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+	}
+
+	// Signal termination to each worker and wait for it to close its output channel.
+	for i := 0; i < n; i++ {
+		close(inputs[i])
+		for range outputs[i] {
+		}
+	}
+	wg.Wait()
+
+	if r.Err() != nil {
+		return errors.Wrap(r.Err(), "read records")
+	}
+
+	if unknownRefs > 0 {
+		level.Warn(h.logger).Log("msg", "Unknown series references", "count", unknownRefs)
+	}
+	return nil
+}
+
+// Init loads data from the write ahead log and prepares the head for writes.
+// It should be called before using an appender so that it
+// limits the ingested samples to the head min valid time.
+func (h *Head) Init(minValidTime int64) error {
+	h.minValidTime = minValidTime
+	defer h.postings.EnsureOrder()
+	defer h.gc() // After loading the wal remove the obsolete data from the head.
+
+	if h.wal == nil {
+		return nil
+	}
+
+	level.Info(h.logger).Log("msg", "Replaying WAL, this may take awhile")
+	start := time.Now()
+	// Backfill the checkpoint first if it exists.
+	dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir())
+	if err != nil && err != record.ErrNotFound {
+		return errors.Wrap(err, "find last checkpoint")
+	}
+	multiRef := map[uint64]uint64{}
+	if err == nil {
+		sr, err := wal.NewSegmentsReader(dir)
+		if err != nil {
+			return errors.Wrap(err, "open checkpoint")
+		}
+		defer func() {
+			if err := sr.Close(); err != nil {
+				level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err)
+			}
+		}()
+
+		// A corrupted checkpoint is a hard error for now and requires user
+		// intervention. There's likely little data that can be recovered anyway.
+		if err := h.loadWAL(wal.NewReader(sr), multiRef); err != nil {
+			return errors.Wrap(err, "backfill checkpoint")
+		}
+		startFrom++
+		level.Info(h.logger).Log("msg", "WAL checkpoint loaded")
+	}
+
+	// Find the last segment.
+	_, last, err := h.wal.Segments()
+	if err != nil {
+		return errors.Wrap(err, "finding WAL segments")
+	}
+
+	// Backfill segments from the most recent checkpoint onwards.
+	for i := startFrom; i <= last; i++ {
+		s, err := wal.OpenReadSegment(wal.SegmentName(h.wal.Dir(), i))
+		if err != nil {
+			return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i))
+		}
+
+		sr := wal.NewSegmentBufReader(s)
+		err = h.loadWAL(wal.NewReader(sr), multiRef)
+		if err := sr.Close(); err != nil {
+			level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err)
+		}
+		if err != nil {
+			return err
+		}
+		level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last)
+	}
+
+	level.Info(h.logger).Log("msg", "WAL replay completed", "duration", time.Since(start).String())
+
+	return nil
+}
+
+// Truncate removes old data before mint from the head.
+func (h *Head) Truncate(mint int64) (err error) {
+	defer func() {
+		if err != nil {
+			h.metrics.headTruncateFail.Inc()
+		}
+	}()
+	initialize := h.MinTime() == math.MaxInt64
+
+	if h.MinTime() >= mint && !initialize {
+		return nil
+	}
+	atomic.StoreInt64(&h.minTime, mint)
+	atomic.StoreInt64(&h.minValidTime, mint)
+
+	// Ensure that max time is at least as high as min time.
+	for h.MaxTime() < mint {
+		atomic.CompareAndSwapInt64(&h.maxTime, h.MaxTime(), mint)
+	}
+
+	// This was an initial call to Truncate after loading blocks on startup.
+	// We haven't read back the WAL yet, so do not attempt to truncate it.
+	if initialize {
+		return nil
+	}
+
+	h.metrics.headTruncateTotal.Inc()
+	start := time.Now()
+
+	h.gc()
+	level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start))
+	h.metrics.gcDuration.Observe(time.Since(start).Seconds())
+
+	if h.wal == nil {
+		return nil
+	}
+	start = time.Now()
+
+	first, last, err := h.wal.Segments()
+	if err != nil {
+		return errors.Wrap(err, "get segment range")
+	}
+	// Start a new segment, so low ingestion volume TSDB don't have more WAL than
+	// needed.
+	err = h.wal.NextSegment()
+	if err != nil {
+		return errors.Wrap(err, "next segment")
+	}
+	last-- // Never consider last segment for checkpoint.
+	if last < 0 {
+		return nil // no segments yet.
+	}
+	// The lower two thirds of segments should contain mostly obsolete samples.
+	// If we have less than two segments, it's not worth checkpointing yet.
+	// With the default 2h blocks, this will keeping up to around 3h worth
+	// of WAL segments.
+	last = first + (last-first)*2/3
+	if last <= first {
+		return nil
+	}
+
+	keep := func(id uint64) bool {
+		if h.series.getByID(id) != nil {
+			return true
+		}
+		h.deletedMtx.Lock()
+		_, ok := h.deleted[id]
+		h.deletedMtx.Unlock()
+		return ok
+	}
+	h.metrics.checkpointCreationTotal.Inc()
+	if _, err = wal.Checkpoint(h.wal, first, last, keep, mint); err != nil {
+		h.metrics.checkpointCreationFail.Inc()
+		return errors.Wrap(err, "create checkpoint")
+	}
+	if err := h.wal.Truncate(last + 1); err != nil {
+		// If truncating fails, we'll just try again at the next checkpoint.
+		// Leftover segments will just be ignored in the future if there's a checkpoint
+		// that supersedes them.
+		level.Error(h.logger).Log("msg", "truncating segments failed", "err", err)
+	}
+
+	// The checkpoint is written and segments before it is truncated, so we no
+	// longer need to track deleted series that are before it.
+	h.deletedMtx.Lock()
+	for ref, segment := range h.deleted {
+		if segment < first {
+			delete(h.deleted, ref)
+		}
+	}
+	h.deletedMtx.Unlock()
+
+	h.metrics.checkpointDeleteTotal.Inc()
+	if err := wal.DeleteCheckpoints(h.wal.Dir(), last); err != nil {
+		// Leftover old checkpoints do not cause problems down the line beyond
+		// occupying disk space.
+		// They will just be ignored since a higher checkpoint exists.
+		level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err)
+		h.metrics.checkpointDeleteFail.Inc()
+	}
+	h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds())
+
+	level.Info(h.logger).Log("msg", "WAL checkpoint complete",
+		"first", first, "last", last, "duration", time.Since(start))
+
+	return nil
+}
+
+// initTime initializes a head with the first timestamp. This only needs to be called
+// for a completely fresh head with an empty WAL.
+// Returns true if the initialization took an effect.
+func (h *Head) initTime(t int64) (initialized bool) {
+	if !atomic.CompareAndSwapInt64(&h.minTime, math.MaxInt64, t) {
+		return false
+	}
+	// Ensure that max time is initialized to at least the min time we just set.
+	// Concurrent appenders may already have set it to a higher value.
+	atomic.CompareAndSwapInt64(&h.maxTime, math.MinInt64, t)
+
+	return true
+}
+
+type Stats struct {
+	NumSeries         uint64
+	MinTime, MaxTime  int64
+	IndexPostingStats *index.PostingsStats
+}
+
+// Stats returns important current HEAD statistics. Note that it is expensive to
+// calculate these.
+func (h *Head) Stats(statsByLabelName string) *Stats {
+	return &Stats{
+		NumSeries:         h.NumSeries(),
+		MaxTime:           h.MaxTime(),
+		MinTime:           h.MinTime(),
+		IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName),
+	}
+}
+
+type RangeHead struct {
+	head       *Head
+	mint, maxt int64
+}
+
+// NewRangeHead returns a *RangeHead.
+func NewRangeHead(head *Head, mint, maxt int64) *RangeHead {
+	return &RangeHead{
+		head: head,
+		mint: mint,
+		maxt: maxt,
+	}
+}
+
+func (h *RangeHead) Index() (IndexReader, error) {
+	return h.head.indexRange(h.mint, h.maxt), nil
+}
+
+func (h *RangeHead) Chunks() (ChunkReader, error) {
+	return h.head.chunksRange(h.mint, h.maxt, h.head.iso.State()), nil
+}
+
+func (h *RangeHead) Tombstones() (tombstones.Reader, error) {
+	return h.head.tombstones, nil
+}
+
+func (h *RangeHead) MinTime() int64 {
+	return h.mint
+}
+
+func (h *RangeHead) MaxTime() int64 {
+	return h.maxt
+}
+
+func (h *RangeHead) NumSeries() uint64 {
+	return h.head.NumSeries()
+}
+
+func (h *RangeHead) Meta() BlockMeta {
+	return BlockMeta{
+		MinTime: h.MinTime(),
+		MaxTime: h.MaxTime(),
+		ULID:    h.head.Meta().ULID,
+		Stats: BlockStats{
+			NumSeries: h.NumSeries(),
+		},
+	}
+}
+
+// initAppender is a helper to initialize the time bounds of the head
+// upon the first sample it receives.
+type initAppender struct {
+	app  storage.Appender
+	head *Head
+}
+
+func (a *initAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+	if a.app != nil {
+		return a.app.Add(lset, t, v)
+	}
+	a.head.initTime(t)
+	a.app = a.head.appender()
+
+	return a.app.Add(lset, t, v)
+}
+
+func (a *initAppender) AddFast(ref uint64, t int64, v float64) error {
+	if a.app == nil {
+		return storage.ErrNotFound
+	}
+	return a.app.AddFast(ref, t, v)
+}
+
+func (a *initAppender) Commit() error {
+	if a.app == nil {
+		return nil
+	}
+	return a.app.Commit()
+}
+
+func (a *initAppender) Rollback() error {
+	if a.app == nil {
+		return nil
+	}
+	return a.app.Rollback()
+}
+
+// Appender returns a new Appender on the database.
+func (h *Head) Appender() storage.Appender {
+	h.metrics.activeAppenders.Inc()
+
+	// The head cache might not have a starting point yet. The init appender
+	// picks up the first appended timestamp as the base.
+	if h.MinTime() == math.MaxInt64 {
+		return &initAppender{
+			head: h,
+		}
+	}
+	return h.appender()
+}
+
+func (h *Head) appender() *headAppender {
+	appendID := h.iso.newAppendID()
+	cleanupAppendIDsBelow := h.iso.lowWatermark()
+
+	return &headAppender{
+		head: h,
+		// Set the minimum valid time to whichever is greater the head min valid time or the compaction window.
+		// This ensures that no samples will be added within the compaction window to avoid races.
+		minValidTime:          max(atomic.LoadInt64(&h.minValidTime), h.MaxTime()-h.chunkRange/2),
+		mint:                  math.MaxInt64,
+		maxt:                  math.MinInt64,
+		samples:               h.getAppendBuffer(),
+		sampleSeries:          h.getSeriesBuffer(),
+		appendID:              appendID,
+		cleanupAppendIDsBelow: cleanupAppendIDsBelow,
+	}
+}
+
+func max(a, b int64) int64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func (h *Head) getAppendBuffer() []record.RefSample {
+	b := h.appendPool.Get()
+	if b == nil {
+		return make([]record.RefSample, 0, 512)
+	}
+	return b.([]record.RefSample)
+}
+
+func (h *Head) putAppendBuffer(b []record.RefSample) {
+	//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+	h.appendPool.Put(b[:0])
+}
+
+func (h *Head) getSeriesBuffer() []*memSeries {
+	b := h.seriesPool.Get()
+	if b == nil {
+		return make([]*memSeries, 0, 512)
+	}
+	return b.([]*memSeries)
+}
+
+func (h *Head) putSeriesBuffer(b []*memSeries) {
+	//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+	h.seriesPool.Put(b[:0])
+}
+
+func (h *Head) getBytesBuffer() []byte {
+	b := h.bytesPool.Get()
+	if b == nil {
+		return make([]byte, 0, 1024)
+	}
+	return b.([]byte)
+}
+
+func (h *Head) putBytesBuffer(b []byte) {
+	//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+	h.bytesPool.Put(b[:0])
+}
+
+type headAppender struct {
+	head         *Head
+	minValidTime int64 // No samples below this timestamp are allowed.
+	mint, maxt   int64
+
+	series       []record.RefSeries
+	samples      []record.RefSample
+	sampleSeries []*memSeries
+
+	appendID, cleanupAppendIDsBelow uint64
+}
+
+func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
+	if t < a.minValidTime {
+		return 0, storage.ErrOutOfBounds
+	}
+
+	// Ensure no empty labels have gotten through.
+	lset = lset.WithoutEmpty()
+
+	if len(lset) == 0 {
+		return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
+	}
+
+	if l, dup := lset.HasDuplicateLabelNames(); dup {
+		return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
+	}
+
+	s, created := a.head.getOrCreate(lset.Hash(), lset)
+	if created {
+		a.series = append(a.series, record.RefSeries{
+			Ref:    s.ref,
+			Labels: lset,
+		})
+	}
+	return s.ref, a.AddFast(s.ref, t, v)
+}
+
+func (a *headAppender) AddFast(ref uint64, t int64, v float64) error {
+	if t < a.minValidTime {
+		return storage.ErrOutOfBounds
+	}
+
+	s := a.head.series.getByID(ref)
+	if s == nil {
+		return errors.Wrap(storage.ErrNotFound, "unknown series")
+	}
+	s.Lock()
+	if err := s.appendable(t, v); err != nil {
+		s.Unlock()
+		return err
+	}
+	s.pendingCommit = true
+	s.Unlock()
+
+	if t < a.mint {
+		a.mint = t
+	}
+	if t > a.maxt {
+		a.maxt = t
+	}
+
+	a.samples = append(a.samples, record.RefSample{
+		Ref: ref,
+		T:   t,
+		V:   v,
+	})
+	a.sampleSeries = append(a.sampleSeries, s)
+	return nil
+}
+
+func (a *headAppender) log() error {
+	if a.head.wal == nil {
+		return nil
+	}
+
+	buf := a.head.getBytesBuffer()
+	defer func() { a.head.putBytesBuffer(buf) }()
+
+	var rec []byte
+	var enc record.Encoder
+
+	if len(a.series) > 0 {
+		rec = enc.Series(a.series, buf)
+		buf = rec[:0]
+
+		if err := a.head.wal.Log(rec); err != nil {
+			return errors.Wrap(err, "log series")
+		}
+	}
+	if len(a.samples) > 0 {
+		rec = enc.Samples(a.samples, buf)
+		buf = rec[:0]
+
+		if err := a.head.wal.Log(rec); err != nil {
+			return errors.Wrap(err, "log samples")
+		}
+	}
+	return nil
+}
+
+func (a *headAppender) Commit() error {
+	if err := a.log(); err != nil {
+		//nolint: errcheck
+		a.Rollback() // Most likely the same error will happen again.
+		return errors.Wrap(err, "write to WAL")
+	}
+
+	defer a.head.metrics.activeAppenders.Dec()
+	defer a.head.putAppendBuffer(a.samples)
+	defer a.head.putSeriesBuffer(a.sampleSeries)
+	defer a.head.iso.closeAppend(a.appendID)
+
+	total := len(a.samples)
+	var series *memSeries
+	for i, s := range a.samples {
+		series = a.sampleSeries[i]
+		series.Lock()
+		ok, chunkCreated := series.append(s.T, s.V, a.appendID)
+		series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
+		series.pendingCommit = false
+		series.Unlock()
+
+		if !ok {
+			total--
+		}
+		if chunkCreated {
+			a.head.metrics.chunks.Inc()
+			a.head.metrics.chunksCreated.Inc()
+		}
+	}
+
+	a.head.metrics.samplesAppended.Add(float64(total))
+	a.head.updateMinMaxTime(a.mint, a.maxt)
+
+	return nil
+}
+
+func (a *headAppender) Rollback() error {
+	defer a.head.metrics.activeAppenders.Dec()
+	defer a.head.iso.closeAppend(a.appendID)
+	defer a.head.putSeriesBuffer(a.sampleSeries)
+
+	var series *memSeries
+	for i := range a.samples {
+		series = a.sampleSeries[i]
+		series.Lock()
+		series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
+		series.pendingCommit = false
+		series.Unlock()
+	}
+	a.head.putAppendBuffer(a.samples)
+	a.samples = nil
+
+	// Series are created in the head memory regardless of rollback. Thus we have
+	// to log them to the WAL in any case.
+	return a.log()
+}
+
+// Delete all samples in the range of [mint, maxt] for series that satisfy the given
+// label matchers.
+func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
+	// Do not delete anything beyond the currently valid range.
+	mint, maxt = clampInterval(mint, maxt, h.MinTime(), h.MaxTime())
+
+	ir := h.indexRange(mint, maxt)
+
+	p, err := PostingsForMatchers(ir, ms...)
+	if err != nil {
+		return errors.Wrap(err, "select series")
+	}
+
+	var stones []tombstones.Stone
+	for p.Next() {
+		series := h.series.getByID(p.At())
+
+		series.RLock()
+		t0, t1 := series.minTime(), series.maxTime()
+		series.RUnlock()
+		if t0 == math.MinInt64 || t1 == math.MinInt64 {
+			continue
+		}
+		// Delete only until the current values and not beyond.
+		t0, t1 = clampInterval(mint, maxt, t0, t1)
+		stones = append(stones, tombstones.Stone{Ref: p.At(), Intervals: tombstones.Intervals{{Mint: t0, Maxt: t1}}})
+	}
+	if p.Err() != nil {
+		return p.Err()
+	}
+	if h.wal != nil {
+		var enc record.Encoder
+		if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil {
+			return err
+		}
+	}
+	for _, s := range stones {
+		h.tombstones.AddInterval(s.Ref, s.Intervals[0])
+	}
+
+	return nil
+}
+
+// gc removes data before the minimum timestamp from the head.
+func (h *Head) gc() {
+	// Only data strictly lower than this timestamp must be deleted.
+	mint := h.MinTime()
+
+	// Drop old chunks and remember series IDs and hashes if they can be
+	// deleted entirely.
+	deleted, chunksRemoved := h.series.gc(mint)
+	seriesRemoved := len(deleted)
+
+	h.metrics.seriesRemoved.Add(float64(seriesRemoved))
+	h.metrics.chunksRemoved.Add(float64(chunksRemoved))
+	h.metrics.chunks.Sub(float64(chunksRemoved))
+	// Using AddUint64 to subtract series removed.
+	// See: https://golang.org/pkg/sync/atomic/#AddUint64.
+	atomic.AddUint64(&h.numSeries, ^uint64(seriesRemoved-1))
+
+	// Remove deleted series IDs from the postings lists.
+	h.postings.Delete(deleted)
+
+	if h.wal != nil {
+		_, last, _ := h.wal.Segments()
+		h.deletedMtx.Lock()
+		// Keep series records until we're past segment 'last'
+		// because the WAL will still have samples records with
+		// this ref ID. If we didn't keep these series records then
+		// on start up when we replay the WAL, or any other code
+		// that reads the WAL, wouldn't be able to use those
+		// samples since we would have no labels for that ref ID.
+		for ref := range deleted {
+			h.deleted[ref] = last
+		}
+		h.deletedMtx.Unlock()
+	}
+
+	// Rebuild symbols and label value indices from what is left in the postings terms.
+	symbols := make(map[string]struct{}, len(h.symbols))
+	values := make(map[string]stringset, len(h.values))
+
+	if err := h.postings.Iter(func(t labels.Label, _ index.Postings) error {
+		symbols[t.Name] = struct{}{}
+		symbols[t.Value] = struct{}{}
+
+		ss, ok := values[t.Name]
+		if !ok {
+			ss = stringset{}
+			values[t.Name] = ss
+		}
+		ss.set(t.Value)
+		return nil
+	}); err != nil {
+		// This should never happen, as the iteration function only returns nil.
+		panic(err)
+	}
+
+	h.symMtx.Lock()
+
+	h.symbols = symbols
+	h.values = values
+
+	h.symMtx.Unlock()
+}
+
+// Tombstones returns a new reader over the head's tombstones
+func (h *Head) Tombstones() (tombstones.Reader, error) {
+	return h.tombstones, nil
+}
+
+// Index returns an IndexReader against the block.
+func (h *Head) Index() (IndexReader, error) {
+	return h.indexRange(math.MinInt64, math.MaxInt64), nil
+}
+
+func (h *Head) indexRange(mint, maxt int64) *headIndexReader {
+	if hmin := h.MinTime(); hmin > mint {
+		mint = hmin
+	}
+	return &headIndexReader{head: h, mint: mint, maxt: maxt}
+}
+
+// Chunks returns a ChunkReader against the block.
+func (h *Head) Chunks() (ChunkReader, error) {
+	return h.chunksRange(math.MinInt64, math.MaxInt64, h.iso.State()), nil
+}
+
+func (h *Head) chunksRange(mint, maxt int64, is *isolationState) *headChunkReader {
+	if hmin := h.MinTime(); hmin > mint {
+		mint = hmin
+	}
+	return &headChunkReader{
+		head:     h,
+		mint:     mint,
+		maxt:     maxt,
+		isoState: is,
+	}
+}
+
+// NumSeries returns the number of active series in the head.
+func (h *Head) NumSeries() uint64 {
+	return atomic.LoadUint64(&h.numSeries)
+}
+
+// Meta returns meta information about the head.
+// The head is dynamic so will return dynamic results.
+func (h *Head) Meta() BlockMeta {
+	var id [16]byte
+	copy(id[:], "______head______")
+	return BlockMeta{
+		MinTime: h.MinTime(),
+		MaxTime: h.MaxTime(),
+		ULID:    ulid.ULID(id),
+		Stats: BlockStats{
+			NumSeries: h.NumSeries(),
+		},
+	}
+}
+
+// MinTime returns the lowest time bound on visible data in the head.
+func (h *Head) MinTime() int64 {
+	return atomic.LoadInt64(&h.minTime)
+}
+
+// MaxTime returns the highest timestamp seen in data of the head.
+func (h *Head) MaxTime() int64 {
+	return atomic.LoadInt64(&h.maxTime)
+}
+
+// compactable returns whether the head has a compactable range.
+// The head has a compactable range when the head time range is 1.5 times the chunk range.
+// The 0.5 acts as a buffer of the appendable window.
+func (h *Head) compactable() bool {
+	return h.MaxTime()-h.MinTime() > h.chunkRange/2*3
+}
+
+// Close flushes the WAL and closes the head.
+func (h *Head) Close() error {
+	if h.wal == nil {
+		return nil
+	}
+	return h.wal.Close()
+}
+
+type headChunkReader struct {
+	head       *Head
+	mint, maxt int64
+	isoState   *isolationState
+}
+
+func (h *headChunkReader) Close() error {
+	h.isoState.Close()
+	return nil
+}
+
+// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID.
+// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes.
+func packChunkID(seriesID, chunkID uint64) uint64 {
+	if seriesID > (1<<40)-1 {
+		panic("series ID exceeds 5 bytes")
+	}
+	if chunkID > (1<<24)-1 {
+		panic("chunk ID exceeds 3 bytes")
+	}
+	return (seriesID << 24) | chunkID
+}
+
+func unpackChunkID(id uint64) (seriesID, chunkID uint64) {
+	return id >> 24, (id << 40) >> 40
+}
+
+// Chunk returns the chunk for the reference number.
+func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) {
+	sid, cid := unpackChunkID(ref)
+
+	s := h.head.series.getByID(sid)
+	// This means that the series has been garbage collected.
+	if s == nil {
+		return nil, storage.ErrNotFound
+	}
+
+	s.Lock()
+	c := s.chunk(int(cid))
+
+	// This means that the chunk has been garbage collected or is outside
+	// the specified range.
+	if c == nil || !c.OverlapsClosedInterval(h.mint, h.maxt) {
+		s.Unlock()
+		return nil, storage.ErrNotFound
+	}
+	s.Unlock()
+
+	return &safeChunk{
+		Chunk:    c.chunk,
+		s:        s,
+		cid:      int(cid),
+		isoState: h.isoState,
+	}, nil
+}
+
+type safeChunk struct {
+	chunkenc.Chunk
+	s        *memSeries
+	cid      int
+	isoState *isolationState
+}
+
+func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
+	c.s.Lock()
+	it := c.s.iterator(c.cid, c.isoState, reuseIter)
+	c.s.Unlock()
+	return it
+}
+
+type headIndexReader struct {
+	head       *Head
+	mint, maxt int64
+}
+
+func (h *headIndexReader) Close() error {
+	return nil
+}
+
+func (h *headIndexReader) Symbols() index.StringIter {
+	h.head.symMtx.RLock()
+	res := make([]string, 0, len(h.head.symbols))
+
+	for s := range h.head.symbols {
+		res = append(res, s)
+	}
+	h.head.symMtx.RUnlock()
+
+	sort.Strings(res)
+	return index.NewStringListIter(res)
+}
+
+// LabelValues returns the possible label values
+func (h *headIndexReader) LabelValues(name string) ([]string, error) {
+	h.head.symMtx.RLock()
+	sl := make([]string, 0, len(h.head.values[name]))
+	for s := range h.head.values[name] {
+		sl = append(sl, s)
+	}
+	h.head.symMtx.RUnlock()
+	sort.Strings(sl)
+	return sl, nil
+}
+
+// LabelNames returns all the unique label names present in the head.
+func (h *headIndexReader) LabelNames() ([]string, error) {
+	h.head.symMtx.RLock()
+	defer h.head.symMtx.RUnlock()
+	labelNames := make([]string, 0, len(h.head.values))
+	for name := range h.head.values {
+		if name == "" {
+			continue
+		}
+		labelNames = append(labelNames, name)
+	}
+	sort.Strings(labelNames)
+	return labelNames, nil
+}
+
+// Postings returns the postings list iterator for the label pairs.
+func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) {
+	res := make([]index.Postings, 0, len(values))
+	for _, value := range values {
+		res = append(res, h.head.postings.Get(name, value))
+	}
+	return index.Merge(res...), nil
+}
+
+func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
+	series := make([]*memSeries, 0, 128)
+
+	// Fetch all the series only once.
+	for p.Next() {
+		s := h.head.series.getByID(p.At())
+		if s == nil {
+			level.Debug(h.head.logger).Log("msg", "Looked up series not found")
+		} else {
+			series = append(series, s)
+		}
+	}
+	if err := p.Err(); err != nil {
+		return index.ErrPostings(errors.Wrap(err, "expand postings"))
+	}
+
+	sort.Slice(series, func(i, j int) bool {
+		return labels.Compare(series[i].lset, series[j].lset) < 0
+	})
+
+	// Convert back to list.
+	ep := make([]uint64, 0, len(series))
+	for _, p := range series {
+		ep = append(ep, p.ref)
+	}
+	return index.NewListPostings(ep)
+}
+
+// Series returns the series for the given reference.
+func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
+	s := h.head.series.getByID(ref)
+
+	if s == nil {
+		h.head.metrics.seriesNotFound.Inc()
+		return storage.ErrNotFound
+	}
+	*lbls = append((*lbls)[:0], s.lset...)
+
+	s.Lock()
+	defer s.Unlock()
+
+	*chks = (*chks)[:0]
+
+	for i, c := range s.chunks {
+		// Do not expose chunks that are outside of the specified range.
+		if !c.OverlapsClosedInterval(h.mint, h.maxt) {
+			continue
+		}
+		// Set the head chunks as open (being appended to).
+		maxTime := c.maxTime
+		if s.headChunk == c {
+			maxTime = math.MaxInt64
+		}
+
+		*chks = append(*chks, chunks.Meta{
+			MinTime: c.minTime,
+			MaxTime: maxTime,
+			Ref:     packChunkID(s.ref, uint64(s.chunkID(i))),
+		})
+	}
+
+	return nil
+}
+
+func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool) {
+	// Just using `getOrSet` below would be semantically sufficient, but we'd create
+	// a new series on every sample inserted via Add(), which causes allocations
+	// and makes our series IDs rather random and harder to compress in postings.
+	s := h.series.getByHash(hash, lset)
+	if s != nil {
+		return s, false
+	}
+
+	// Optimistically assume that we are the first one to create the series.
+	id := atomic.AddUint64(&h.lastSeriesID, 1)
+
+	return h.getOrCreateWithID(id, hash, lset)
+}
+
+func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool) {
+	s := newMemSeries(lset, id, h.chunkRange)
+
+	s, created := h.series.getOrSet(hash, s)
+	if !created {
+		return s, false
+	}
+
+	h.metrics.seriesCreated.Inc()
+	atomic.AddUint64(&h.numSeries, 1)
+
+	h.postings.Add(id, lset)
+
+	h.symMtx.Lock()
+	defer h.symMtx.Unlock()
+
+	for _, l := range lset {
+		valset, ok := h.values[l.Name]
+		if !ok {
+			valset = stringset{}
+			h.values[l.Name] = valset
+		}
+		valset.set(l.Value)
+
+		h.symbols[l.Name] = struct{}{}
+		h.symbols[l.Value] = struct{}{}
+	}
+
+	return s, true
+}
+
+// seriesHashmap is a simple hashmap for memSeries by their label set. It is built
+// on top of a regular hashmap and holds a slice of series to resolve hash collisions.
+// Its methods require the hash to be submitted with it to avoid re-computations throughout
+// the code.
+type seriesHashmap map[uint64][]*memSeries
+
+func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
+	for _, s := range m[hash] {
+		if labels.Equal(s.lset, lset) {
+			return s
+		}
+	}
+	return nil
+}
+
+func (m seriesHashmap) set(hash uint64, s *memSeries) {
+	l := m[hash]
+	for i, prev := range l {
+		if labels.Equal(prev.lset, s.lset) {
+			l[i] = s
+			return
+		}
+	}
+	m[hash] = append(l, s)
+}
+
+func (m seriesHashmap) del(hash uint64, lset labels.Labels) {
+	var rem []*memSeries
+	for _, s := range m[hash] {
+		if !labels.Equal(s.lset, lset) {
+			rem = append(rem, s)
+		}
+	}
+	if len(rem) == 0 {
+		delete(m, hash)
+	} else {
+		m[hash] = rem
+	}
+}
+
+const (
+	// DefaultStripeSize is the default number of entries to allocate in the stripeSeries hash map.
+	DefaultStripeSize = 1 << 14
+)
+
+// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention.
+// The locks are padded to not be on the same cache line. Filling the padded space
+// with the maps was profiled to be slower – likely due to the additional pointer
+// dereferences.
+type stripeSeries struct {
+	size   int
+	series []map[uint64]*memSeries
+	hashes []seriesHashmap
+	locks  []stripeLock
+}
+
+type stripeLock struct {
+	sync.RWMutex
+	// Padding to avoid multiple locks being on the same cache line.
+	_ [40]byte
+}
+
+func newStripeSeries(stripeSize int) *stripeSeries {
+	s := &stripeSeries{
+		size:   stripeSize,
+		series: make([]map[uint64]*memSeries, stripeSize),
+		hashes: make([]seriesHashmap, stripeSize),
+		locks:  make([]stripeLock, stripeSize),
+	}
+
+	for i := range s.series {
+		s.series[i] = map[uint64]*memSeries{}
+	}
+	for i := range s.hashes {
+		s.hashes[i] = seriesHashmap{}
+	}
+	return s
+}
+
+// gc garbage collects old chunks that are strictly before mint and removes
+// series entirely that have no chunks left.
+func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int) {
+	var (
+		deleted  = map[uint64]struct{}{}
+		rmChunks = 0
+	)
+	// Run through all series and truncate old chunks. Mark those with no
+	// chunks left as deleted and store their ID.
+	for i := 0; i < s.size; i++ {
+		s.locks[i].Lock()
+
+		for hash, all := range s.hashes[i] {
+			for _, series := range all {
+				series.Lock()
+				rmChunks += series.truncateChunksBefore(mint)
+
+				if len(series.chunks) > 0 || series.pendingCommit {
+					series.Unlock()
+					continue
+				}
+
+				// The series is gone entirely. We need to keep the series lock
+				// and make sure we have acquired the stripe locks for hash and ID of the
+				// series alike.
+				// If we don't hold them all, there's a very small chance that a series receives
+				// samples again while we are half-way into deleting it.
+				j := int(series.ref) & (s.size - 1)
+
+				if i != j {
+					s.locks[j].Lock()
+				}
+
+				deleted[series.ref] = struct{}{}
+				s.hashes[i].del(hash, series.lset)
+				delete(s.series[j], series.ref)
+
+				if i != j {
+					s.locks[j].Unlock()
+				}
+
+				series.Unlock()
+			}
+		}
+
+		s.locks[i].Unlock()
+	}
+
+	return deleted, rmChunks
+}
+
+func (s *stripeSeries) getByID(id uint64) *memSeries {
+	i := id & uint64(s.size-1)
+
+	s.locks[i].RLock()
+	series := s.series[i][id]
+	s.locks[i].RUnlock()
+
+	return series
+}
+
+func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries {
+	i := hash & uint64(s.size-1)
+
+	s.locks[i].RLock()
+	series := s.hashes[i].get(hash, lset)
+	s.locks[i].RUnlock()
+
+	return series
+}
+
+func (s *stripeSeries) getOrSet(hash uint64, series *memSeries) (*memSeries, bool) {
+	i := hash & uint64(s.size-1)
+
+	s.locks[i].Lock()
+
+	if prev := s.hashes[i].get(hash, series.lset); prev != nil {
+		s.locks[i].Unlock()
+		return prev, false
+	}
+	s.hashes[i].set(hash, series)
+	s.locks[i].Unlock()
+
+	i = series.ref & uint64(s.size-1)
+
+	s.locks[i].Lock()
+	s.series[i][series.ref] = series
+	s.locks[i].Unlock()
+
+	return series, true
+}
+
+type sample struct {
+	t int64
+	v float64
+}
+
+func (s sample) T() int64 {
+	return s.t
+}
+
+func (s sample) V() float64 {
+	return s.v
+}
+
+// memSeries is the in-memory representation of a series. None of its methods
+// are goroutine safe and it is the caller's responsibility to lock it.
+type memSeries struct {
+	sync.RWMutex
+
+	ref          uint64
+	lset         labels.Labels
+	chunks       []*memChunk
+	headChunk    *memChunk
+	chunkRange   int64
+	firstChunkID int
+
+	nextAt        int64 // Timestamp at which to cut the next chunk.
+	sampleBuf     [4]sample
+	pendingCommit bool // Whether there are samples waiting to be committed to this series.
+
+	app chunkenc.Appender // Current appender for the chunk.
+
+	txs *txRing
+}
+
+func newMemSeries(lset labels.Labels, id uint64, chunkRange int64) *memSeries {
+	s := &memSeries{
+		lset:       lset,
+		ref:        id,
+		chunkRange: chunkRange,
+		nextAt:     math.MinInt64,
+		txs:        newTxRing(4),
+	}
+	return s
+}
+
+func (s *memSeries) minTime() int64 {
+	if len(s.chunks) == 0 {
+		return math.MinInt64
+	}
+	return s.chunks[0].minTime
+}
+
+func (s *memSeries) maxTime() int64 {
+	c := s.head()
+	if c == nil {
+		return math.MinInt64
+	}
+	return c.maxTime
+}
+
+func (s *memSeries) cut(mint int64) *memChunk {
+	c := &memChunk{
+		chunk:   chunkenc.NewXORChunk(),
+		minTime: mint,
+		maxTime: math.MinInt64,
+	}
+	s.chunks = append(s.chunks, c)
+	s.headChunk = c
+
+	// Remove exceeding capacity from the previous chunk byte slice to save memory.
+	if l := len(s.chunks); l > 1 {
+		s.chunks[l-2].chunk.Compact()
+	}
+
+	// Set upper bound on when the next chunk must be started. An earlier timestamp
+	// may be chosen dynamically at a later point.
+	s.nextAt = rangeForTimestamp(mint, s.chunkRange)
+
+	app, err := c.chunk.Appender()
+	if err != nil {
+		panic(err)
+	}
+	s.app = app
+	return c
+}
+
+// appendable checks whether the given sample is valid for appending to the series.
+func (s *memSeries) appendable(t int64, v float64) error {
+	c := s.head()
+	if c == nil {
+		return nil
+	}
+
+	if t > c.maxTime {
+		return nil
+	}
+	if t < c.maxTime {
+		return storage.ErrOutOfOrderSample
+	}
+	// We are allowing exact duplicates as we can encounter them in valid cases
+	// like federation and erroring out at that time would be extremely noisy.
+	if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) {
+		return storage.ErrDuplicateSampleForTimestamp
+	}
+	return nil
+}
+
+func (s *memSeries) chunk(id int) *memChunk {
+	ix := id - s.firstChunkID
+	if ix < 0 || ix >= len(s.chunks) {
+		return nil
+	}
+	return s.chunks[ix]
+}
+
+func (s *memSeries) chunkID(pos int) int {
+	return pos + s.firstChunkID
+}
+
+// truncateChunksBefore removes all chunks from the series that have not timestamp
+// at or after mint. Chunk IDs remain unchanged.
+func (s *memSeries) truncateChunksBefore(mint int64) (removed int) {
+	var k int
+	for i, c := range s.chunks {
+		if c.maxTime >= mint {
+			break
+		}
+		k = i + 1
+	}
+	s.chunks = append(s.chunks[:0], s.chunks[k:]...)
+	s.firstChunkID += k
+	if len(s.chunks) == 0 {
+		s.headChunk = nil
+	} else {
+		s.headChunk = s.chunks[len(s.chunks)-1]
+	}
+
+	return k
+}
+
+// append adds the sample (t, v) to the series. The caller also has to provide
+// the appendID for isolation. (The appendID can be zero, which results in no
+// isolation for this append.)
+func (s *memSeries) append(t int64, v float64, appendID uint64) (success, chunkCreated bool) {
+	// Based on Gorilla white papers this offers near-optimal compression ratio
+	// so anything bigger that this has diminishing returns and increases
+	// the time range within which we have to decompress all samples.
+	const samplesPerChunk = 120
+
+	c := s.head()
+
+	if c == nil {
+		c = s.cut(t)
+		chunkCreated = true
+	}
+	numSamples := c.chunk.NumSamples()
+
+	// Out of order sample.
+	if c.maxTime >= t {
+		return false, chunkCreated
+	}
+	// If we reach 25% of a chunk's desired sample count, set a definitive time
+	// at which to start the next chunk.
+	// At latest it must happen at the timestamp set when the chunk was cut.
+	if numSamples == samplesPerChunk/4 {
+		s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt)
+	}
+	if t >= s.nextAt {
+		c = s.cut(t)
+		chunkCreated = true
+	}
+	s.app.Append(t, v)
+
+	c.maxTime = t
+
+	s.sampleBuf[0] = s.sampleBuf[1]
+	s.sampleBuf[1] = s.sampleBuf[2]
+	s.sampleBuf[2] = s.sampleBuf[3]
+	s.sampleBuf[3] = sample{t: t, v: v}
+
+	if appendID > 0 {
+		s.txs.add(appendID)
+	}
+
+	return true, chunkCreated
+}
+
+// cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after
+// acquiring lock.
+func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {
+	s.txs.cleanupAppendIDsBelow(bound)
+}
+
+// computeChunkEndTime estimates the end timestamp based the beginning of a
+// chunk, its current timestamp and the upper bound up to which we insert data.
+// It assumes that the time range is 1/4 full.
+func computeChunkEndTime(start, cur, max int64) int64 {
+	a := (max - start) / ((cur - start + 1) * 4)
+	if a == 0 {
+		return max
+	}
+	return start + (max-start)/a
+}
+
+func (s *memSeries) iterator(id int, isoState *isolationState, it chunkenc.Iterator) chunkenc.Iterator {
+	c := s.chunk(id)
+	// TODO(fabxc): Work around! A querier may have retrieved a pointer to a
+	// series's chunk, which got then garbage collected before it got
+	// accessed.  We must ensure to not garbage collect as long as any
+	// readers still hold a reference.
+	if c == nil {
+		return chunkenc.NewNopIterator()
+	}
+
+	ix := id - s.firstChunkID
+
+	numSamples := c.chunk.NumSamples()
+	stopAfter := numSamples
+
+	if isoState != nil {
+		totalSamples := 0    // Total samples in this series.
+		previousSamples := 0 // Samples before this chunk.
+
+		for j, d := range s.chunks {
+			totalSamples += d.chunk.NumSamples()
+			if j < ix {
+				previousSamples += d.chunk.NumSamples()
+			}
+		}
+
+		// Removing the extra transactionIDs that are relevant for samples that
+		// come after this chunk, from the total transactionIDs.
+		appendIDsToConsider := s.txs.txIDCount - (totalSamples - (previousSamples + numSamples))
+
+		// Iterate over the appendIDs, find the first one that the isolation state says not
+		// to return.
+		it := s.txs.iterator()
+		for index := 0; index < appendIDsToConsider; index++ {
+			appendID := it.At()
+			if appendID <= isoState.maxAppendID { // Easy check first.
+				if _, ok := isoState.incompleteAppends[appendID]; !ok {
+					it.Next()
+					continue
+				}
+			}
+			stopAfter = numSamples - (appendIDsToConsider - index)
+			if stopAfter < 0 {
+				stopAfter = 0 // Stopped in a previous chunk.
+			}
+			break
+		}
+	}
+
+	if stopAfter == 0 {
+		return chunkenc.NewNopIterator()
+	}
+
+	if id-s.firstChunkID < len(s.chunks)-1 {
+		if stopAfter == numSamples {
+			return c.chunk.Iterator(it)
+		}
+		if msIter, ok := it.(*stopIterator); ok {
+			msIter.Iterator = c.chunk.Iterator(msIter.Iterator)
+			msIter.i = -1
+			msIter.stopAfter = stopAfter
+			return msIter
+		}
+		return &stopIterator{
+			Iterator:  c.chunk.Iterator(it),
+			i:         -1,
+			stopAfter: stopAfter,
+		}
+	}
+	// Serve the last 4 samples for the last chunk from the sample buffer
+	// as their compressed bytes may be mutated by added samples.
+	if msIter, ok := it.(*memSafeIterator); ok {
+		msIter.Iterator = c.chunk.Iterator(msIter.Iterator)
+		msIter.i = -1
+		msIter.total = numSamples
+		msIter.stopAfter = stopAfter
+		msIter.buf = s.sampleBuf
+		return msIter
+	}
+	return &memSafeIterator{
+		stopIterator: stopIterator{
+			Iterator:  c.chunk.Iterator(it),
+			i:         -1,
+			stopAfter: stopAfter,
+		},
+		total: numSamples,
+		buf:   s.sampleBuf,
+	}
+}
+
+func (s *memSeries) head() *memChunk {
+	return s.headChunk
+}
+
+type memChunk struct {
+	chunk            chunkenc.Chunk
+	minTime, maxTime int64
+}
+
+// Returns true if the chunk overlaps [mint, maxt].
+func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool {
+	return mc.minTime <= maxt && mint <= mc.maxTime
+}
+
+type stopIterator struct {
+	chunkenc.Iterator
+
+	i, stopAfter int
+}
+
+func (it *stopIterator) Next() bool {
+	if it.i+1 >= it.stopAfter {
+		return false
+	}
+	it.i++
+	return it.Iterator.Next()
+}
+
+type memSafeIterator struct {
+	stopIterator
+
+	total int
+	buf   [4]sample
+}
+
+func (it *memSafeIterator) Next() bool {
+	if it.i+1 >= it.stopAfter {
+		return false
+	}
+	it.i++
+	if it.total-it.i > 4 {
+		return it.Iterator.Next()
+	}
+	return true
+}
+
+func (it *memSafeIterator) At() (int64, float64) {
+	if it.total-it.i > 4 {
+		return it.Iterator.At()
+	}
+	s := it.buf[4-(it.total-it.i)]
+	return s.t, s.v
+}
+
+type stringset map[string]struct{}
+
+func (ss stringset) set(s string) {
+	ss[s] = struct{}{}
+}
+
+func (ss stringset) String() string {
+	return strings.Join(ss.slice(), ",")
+}
+
+func (ss stringset) slice() []string {
+	slice := make([]string, 0, len(ss))
+	for k := range ss {
+		slice = append(slice, k)
+	}
+	sort.Strings(slice)
+	return slice
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go
new file mode 100644
index 0000000000000000000000000000000000000000..744f24f88e6bb8109f536eed0fc9c18e52ad70cb
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go
@@ -0,0 +1,1723 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/binary"
+	"hash"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"path/filepath"
+	"sort"
+	"unsafe"
+
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/tsdb/chunks"
+	"github.com/prometheus/prometheus/tsdb/encoding"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+)
+
+const (
+	// MagicIndex 4 bytes at the head of an index file.
+	MagicIndex = 0xBAAAD700
+	// HeaderLen represents number of bytes reserved of index for header.
+	HeaderLen = 5
+
+	// FormatV1 represents 1 version of index.
+	FormatV1 = 1
+	// FormatV2 represents 2 version of index.
+	FormatV2 = 2
+
+	indexFilename = "index"
+)
+
+type indexWriterSeries struct {
+	labels labels.Labels
+	chunks []chunks.Meta // series file offset of chunks
+}
+
+type indexWriterSeriesSlice []*indexWriterSeries
+
+func (s indexWriterSeriesSlice) Len() int      { return len(s) }
+func (s indexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s indexWriterSeriesSlice) Less(i, j int) bool {
+	return labels.Compare(s[i].labels, s[j].labels) < 0
+}
+
+type indexWriterStage uint8
+
+const (
+	idxStageNone indexWriterStage = iota
+	idxStageSymbols
+	idxStageSeries
+	idxStageDone
+)
+
+func (s indexWriterStage) String() string {
+	switch s {
+	case idxStageNone:
+		return "none"
+	case idxStageSymbols:
+		return "symbols"
+	case idxStageSeries:
+		return "series"
+	case idxStageDone:
+		return "done"
+	}
+	return ""
+}
+
+// The table gets initialized with sync.Once but may still cause a race
+// with any other use of the crc32 package anywhere. Thus we initialize it
+// before.
+var castagnoliTable *crc32.Table
+
+func init() {
+	castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+}
+
+// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
+// polynomial may be easily changed in one location at a later time, if necessary.
+func newCRC32() hash.Hash32 {
+	return crc32.New(castagnoliTable)
+}
+
+// Writer implements the IndexWriter interface for the standard
+// serialization format.
+type Writer struct {
+	ctx context.Context
+
+	// For the main index file.
+	f *FileWriter
+
+	// Temporary file for postings.
+	fP *FileWriter
+	// Temporary file for posting offsets table.
+	fPO   *FileWriter
+	cntPO uint64
+
+	toc           TOC
+	stage         indexWriterStage
+	postingsStart uint64 // Due to padding, can differ from TOC entry.
+
+	// Reusable memory.
+	buf1 encoding.Encbuf
+	buf2 encoding.Encbuf
+
+	numSymbols int
+	symbols    *Symbols
+	symbolFile *fileutil.MmapFile
+	lastSymbol string
+
+	labelIndexes []labelIndexHashEntry // Label index offsets.
+	labelNames   map[string]uint64     // Label names, and their usage.
+
+	// Hold last series to validate that clients insert new series in order.
+	lastSeries labels.Labels
+	lastRef    uint64
+
+	crc32 hash.Hash
+
+	Version int
+}
+
+// TOC represents index Table Of Content that states where each section of index starts.
+type TOC struct {
+	Symbols           uint64
+	Series            uint64
+	LabelIndices      uint64
+	LabelIndicesTable uint64
+	Postings          uint64
+	PostingsTable     uint64
+}
+
+// NewTOCFromByteSlice return parsed TOC from given index byte slice.
+func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
+	if bs.Len() < indexTOCLen {
+		return nil, encoding.ErrInvalidSize
+	}
+	b := bs.Range(bs.Len()-indexTOCLen, bs.Len())
+
+	expCRC := binary.BigEndian.Uint32(b[len(b)-4:])
+	d := encoding.Decbuf{B: b[:len(b)-4]}
+
+	if d.Crc32(castagnoliTable) != expCRC {
+		return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
+	}
+
+	if err := d.Err(); err != nil {
+		return nil, err
+	}
+
+	return &TOC{
+		Symbols:           d.Be64(),
+		Series:            d.Be64(),
+		LabelIndices:      d.Be64(),
+		LabelIndicesTable: d.Be64(),
+		Postings:          d.Be64(),
+		PostingsTable:     d.Be64(),
+	}, nil
+}
+
+// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
+func NewWriter(ctx context.Context, fn string) (*Writer, error) {
+	dir := filepath.Dir(fn)
+
+	df, err := fileutil.OpenDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	defer df.Close() // Close for platform windows.
+
+	if err := os.RemoveAll(fn); err != nil {
+		return nil, errors.Wrap(err, "remove any existing index at path")
+	}
+
+	// Main index file we are building.
+	f, err := NewFileWriter(fn)
+	if err != nil {
+		return nil, err
+	}
+	// Temporary file for postings.
+	fP, err := NewFileWriter(fn + "_tmp_p")
+	if err != nil {
+		return nil, err
+	}
+	// Temporary file for posting offset table.
+	fPO, err := NewFileWriter(fn + "_tmp_po")
+	if err != nil {
+		return nil, err
+	}
+	if err := df.Sync(); err != nil {
+		return nil, errors.Wrap(err, "sync dir")
+	}
+
+	iw := &Writer{
+		ctx:   ctx,
+		f:     f,
+		fP:    fP,
+		fPO:   fPO,
+		stage: idxStageNone,
+
+		// Reusable memory.
+		buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
+		buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
+
+		labelNames: make(map[string]uint64, 1<<8),
+		crc32:      newCRC32(),
+	}
+	if err := iw.writeMeta(); err != nil {
+		return nil, err
+	}
+	return iw, nil
+}
+
+func (w *Writer) write(bufs ...[]byte) error {
+	return w.f.Write(bufs...)
+}
+
+func (w *Writer) writeAt(buf []byte, pos uint64) error {
+	return w.f.WriteAt(buf, pos)
+}
+
+func (w *Writer) addPadding(size int) error {
+	return w.f.AddPadding(size)
+}
+
+type FileWriter struct {
+	f    *os.File
+	fbuf *bufio.Writer
+	pos  uint64
+	name string
+}
+
+func NewFileWriter(name string) (*FileWriter, error) {
+	f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666)
+	if err != nil {
+		return nil, err
+	}
+	return &FileWriter{
+		f:    f,
+		fbuf: bufio.NewWriterSize(f, 1<<22),
+		pos:  0,
+		name: name,
+	}, nil
+}
+
+func (fw *FileWriter) Pos() uint64 {
+	return fw.pos
+}
+
+func (fw *FileWriter) Write(bufs ...[]byte) error {
+	for _, b := range bufs {
+		n, err := fw.fbuf.Write(b)
+		fw.pos += uint64(n)
+		if err != nil {
+			return err
+		}
+		// For now the index file must not grow beyond 64GiB. Some of the fixed-sized
+		// offset references in v1 are only 4 bytes large.
+		// Once we move to compressed/varint representations in those areas, this limitation
+		// can be lifted.
+		if fw.pos > 16*math.MaxUint32 {
+			return errors.Errorf("%q exceeding max size of 64GiB", fw.name)
+		}
+	}
+	return nil
+}
+
+func (fw *FileWriter) Flush() error {
+	return fw.fbuf.Flush()
+}
+
+func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error {
+	if err := fw.Flush(); err != nil {
+		return err
+	}
+	_, err := fw.f.WriteAt(buf, int64(pos))
+	return err
+}
+
+// AddPadding adds zero byte padding until the file size is a multiple size.
+func (fw *FileWriter) AddPadding(size int) error {
+	p := fw.pos % uint64(size)
+	if p == 0 {
+		return nil
+	}
+	p = uint64(size) - p
+
+	if err := fw.Write(make([]byte, p)); err != nil {
+		return errors.Wrap(err, "add padding")
+	}
+	return nil
+}
+
+func (fw *FileWriter) Close() error {
+	if err := fw.Flush(); err != nil {
+		return err
+	}
+	if err := fw.f.Sync(); err != nil {
+		return err
+	}
+	return fw.f.Close()
+}
+
+func (fw *FileWriter) Remove() error {
+	return os.Remove(fw.name)
+}
+
+// ensureStage handles transitions between write stages and ensures that IndexWriter
+// methods are called in an order valid for the implementation.
+func (w *Writer) ensureStage(s indexWriterStage) error {
+	select {
+	case <-w.ctx.Done():
+		return w.ctx.Err()
+	default:
+	}
+
+	if w.stage == s {
+		return nil
+	}
+	if w.stage < s-1 {
+		// A stage has been skipped.
+		if err := w.ensureStage(s - 1); err != nil {
+			return err
+		}
+	}
+	if w.stage > s {
+		return errors.Errorf("invalid stage %q, currently at %q", s, w.stage)
+	}
+
+	// Mark start of sections in table of contents.
+	switch s {
+	case idxStageSymbols:
+		w.toc.Symbols = w.f.pos
+		if err := w.startSymbols(); err != nil {
+			return err
+		}
+	case idxStageSeries:
+		if err := w.finishSymbols(); err != nil {
+			return err
+		}
+		w.toc.Series = w.f.pos
+
+	case idxStageDone:
+		w.toc.LabelIndices = w.f.pos
+		// LabelIndices generation depends on the posting offset
+		// table produced at this stage.
+		if err := w.writePostingsToTmpFiles(); err != nil {
+			return err
+		}
+		if err := w.writeLabelIndices(); err != nil {
+			return err
+		}
+
+		w.toc.Postings = w.f.pos
+		if err := w.writePostings(); err != nil {
+			return err
+		}
+
+		w.toc.LabelIndicesTable = w.f.pos
+		if err := w.writeLabelIndexesOffsetTable(); err != nil {
+			return err
+		}
+
+		w.toc.PostingsTable = w.f.pos
+		if err := w.writePostingsOffsetTable(); err != nil {
+			return err
+		}
+		if err := w.writeTOC(); err != nil {
+			return err
+		}
+	}
+
+	w.stage = s
+	return nil
+}
+
+func (w *Writer) writeMeta() error {
+	w.buf1.Reset()
+	w.buf1.PutBE32(MagicIndex)
+	w.buf1.PutByte(FormatV2)
+
+	return w.write(w.buf1.Get())
+}
+
+// AddSeries adds the series one at a time along with its chunks.
+func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta) error {
+	if err := w.ensureStage(idxStageSeries); err != nil {
+		return err
+	}
+	if labels.Compare(lset, w.lastSeries) <= 0 {
+		return errors.Errorf("out-of-order series added with label set %q", lset)
+	}
+
+	if ref < w.lastRef && len(w.lastSeries) != 0 {
+		return errors.Errorf("series with reference greater than %d already added", ref)
+	}
+	// We add padding to 16 bytes to increase the addressable space we get through 4 byte
+	// series references.
+	if err := w.addPadding(16); err != nil {
+		return errors.Errorf("failed to write padding bytes: %v", err)
+	}
+
+	if w.f.pos%16 != 0 {
+		return errors.Errorf("series write not 16-byte aligned at %d", w.f.pos)
+	}
+
+	w.buf2.Reset()
+	w.buf2.PutUvarint(len(lset))
+
+	for _, l := range lset {
+		index, err := w.symbols.ReverseLookup(l.Name)
+		if err != nil {
+			return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err)
+		}
+		w.labelNames[l.Name]++
+		w.buf2.PutUvarint32(index)
+
+		index, err = w.symbols.ReverseLookup(l.Value)
+		if err != nil {
+			return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err)
+		}
+		w.buf2.PutUvarint32(index)
+	}
+
+	w.buf2.PutUvarint(len(chunks))
+
+	if len(chunks) > 0 {
+		c := chunks[0]
+		w.buf2.PutVarint64(c.MinTime)
+		w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
+		w.buf2.PutUvarint64(c.Ref)
+		t0 := c.MaxTime
+		ref0 := int64(c.Ref)
+
+		for _, c := range chunks[1:] {
+			w.buf2.PutUvarint64(uint64(c.MinTime - t0))
+			w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
+			t0 = c.MaxTime
+
+			w.buf2.PutVarint64(int64(c.Ref) - ref0)
+			ref0 = int64(c.Ref)
+		}
+	}
+
+	w.buf1.Reset()
+	w.buf1.PutUvarint(w.buf2.Len())
+
+	w.buf2.PutHash(w.crc32)
+
+	if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil {
+		return errors.Wrap(err, "write series data")
+	}
+
+	w.lastSeries = append(w.lastSeries[:0], lset...)
+	w.lastRef = ref
+
+	return nil
+}
+
+func (w *Writer) startSymbols() error {
+	// We are at w.toc.Symbols.
+	// Leave 4 bytes of space for the length, and another 4 for the number of symbols
+	// which will both be calculated later.
+	return w.write([]byte("alenblen"))
+}
+
+func (w *Writer) AddSymbol(sym string) error {
+	if err := w.ensureStage(idxStageSymbols); err != nil {
+		return err
+	}
+	if w.numSymbols != 0 && sym <= w.lastSymbol {
+		return errors.Errorf("symbol %q out-of-order", sym)
+	}
+	w.lastSymbol = sym
+	w.numSymbols++
+	w.buf1.Reset()
+	w.buf1.PutUvarintStr(sym)
+	return w.write(w.buf1.Get())
+}
+
+func (w *Writer) finishSymbols() error {
+	// Write out the length and symbol count.
+	w.buf1.Reset()
+	w.buf1.PutBE32int(int(w.f.pos - w.toc.Symbols - 4))
+	w.buf1.PutBE32int(int(w.numSymbols))
+	if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
+		return err
+	}
+
+	hashPos := w.f.pos
+	// Leave space for the hash. We can only calculate it
+	// now that the number of symbols is known, so mmap and do it from there.
+	if err := w.write([]byte("hash")); err != nil {
+		return err
+	}
+	if err := w.f.Flush(); err != nil {
+		return err
+	}
+
+	sf, err := fileutil.OpenMmapFile(w.f.name)
+	if err != nil {
+		return err
+	}
+	w.symbolFile = sf
+	hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
+	w.buf1.Reset()
+	w.buf1.PutBE32(hash)
+	if err := w.writeAt(w.buf1.Get(), hashPos); err != nil {
+		return err
+	}
+
+	// Load in the symbol table efficiently for the rest of the index writing.
+	w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols))
+	if err != nil {
+		return errors.Wrap(err, "read symbols")
+	}
+	return nil
+}
+
+func (w *Writer) writeLabelIndices() error {
+	if err := w.fPO.Flush(); err != nil {
+		return err
+	}
+
+	// Find all the label values in the tmp posting offset table.
+	f, err := fileutil.OpenMmapFile(w.fPO.name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
+	cnt := w.cntPO
+	current := []byte{}
+	values := []uint32{}
+	for d.Err() == nil && cnt > 0 {
+		cnt--
+		d.Uvarint()                           // Keycount.
+		name := d.UvarintBytes()              // Label name.
+		value := yoloString(d.UvarintBytes()) // Label value.
+		d.Uvarint64()                         // Offset.
+		if len(name) == 0 {
+			continue // All index is ignored.
+		}
+
+		if !bytes.Equal(name, current) && len(values) > 0 {
+			// We've reached a new label name.
+			if err := w.writeLabelIndex(string(current), values); err != nil {
+				return err
+			}
+			values = values[:0]
+		}
+		current = name
+		sid, err := w.symbols.ReverseLookup(value)
+		if err != nil {
+			return err
+		}
+		values = append(values, sid)
+	}
+	if d.Err() != nil {
+		return d.Err()
+	}
+
+	// Handle the last label.
+	if len(values) > 0 {
+		if err := w.writeLabelIndex(string(current), values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (w *Writer) writeLabelIndex(name string, values []uint32) error {
+	// Align beginning to 4 bytes for more efficient index list scans.
+	if err := w.addPadding(4); err != nil {
+		return err
+	}
+
+	w.labelIndexes = append(w.labelIndexes, labelIndexHashEntry{
+		keys:   []string{name},
+		offset: w.f.pos,
+	})
+
+	startPos := w.f.pos
+	// Leave 4 bytes of space for the length, which will be calculated later.
+	if err := w.write([]byte("alen")); err != nil {
+		return err
+	}
+	w.crc32.Reset()
+
+	w.buf1.Reset()
+	w.buf1.PutBE32int(1) // Number of names.
+	w.buf1.PutBE32int(len(values))
+	w.buf1.WriteToHash(w.crc32)
+	if err := w.write(w.buf1.Get()); err != nil {
+		return err
+	}
+
+	for _, v := range values {
+		w.buf1.Reset()
+		w.buf1.PutBE32(v)
+		w.buf1.WriteToHash(w.crc32)
+		if err := w.write(w.buf1.Get()); err != nil {
+			return err
+		}
+	}
+
+	// Write out the length.
+	w.buf1.Reset()
+	w.buf1.PutBE32int(int(w.f.pos - startPos - 4))
+	if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
+		return err
+	}
+
+	w.buf1.Reset()
+	w.buf1.PutHashSum(w.crc32)
+	return w.write(w.buf1.Get())
+}
+
+// writeLabelIndexesOffsetTable writes the label indices offset table.
+func (w *Writer) writeLabelIndexesOffsetTable() error {
+	startPos := w.f.pos
+	// Leave 4 bytes of space for the length, which will be calculated later.
+	if err := w.write([]byte("alen")); err != nil {
+		return err
+	}
+	w.crc32.Reset()
+
+	w.buf1.Reset()
+	w.buf1.PutBE32int(len(w.labelIndexes))
+	w.buf1.WriteToHash(w.crc32)
+	if err := w.write(w.buf1.Get()); err != nil {
+		return err
+	}
+
+	for _, e := range w.labelIndexes {
+		w.buf1.Reset()
+		w.buf1.PutUvarint(len(e.keys))
+		for _, k := range e.keys {
+			w.buf1.PutUvarintStr(k)
+		}
+		w.buf1.PutUvarint64(e.offset)
+		w.buf1.WriteToHash(w.crc32)
+		if err := w.write(w.buf1.Get()); err != nil {
+			return err
+		}
+	}
+	// Write out the length.
+	w.buf1.Reset()
+	w.buf1.PutBE32int(int(w.f.pos - startPos - 4))
+	if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
+		return err
+	}
+
+	w.buf1.Reset()
+	w.buf1.PutHashSum(w.crc32)
+	return w.write(w.buf1.Get())
+}
+
+// writePostingsOffsetTable writes the postings offset table.
+func (w *Writer) writePostingsOffsetTable() error {
+	// Ensure everything is in the temporary file.
+	if err := w.fPO.Flush(); err != nil {
+		return err
+	}
+
+	startPos := w.f.pos
+	// Leave 4 bytes of space for the length, which will be calculated later.
+	if err := w.write([]byte("alen")); err != nil {
+		return err
+	}
+
+	// Copy over the tmp posting offset table, however we need to
+	// adjust the offsets.
+	adjustment := w.postingsStart
+
+	w.buf1.Reset()
+	w.crc32.Reset()
+	w.buf1.PutBE32int(int(w.cntPO)) // Count.
+	w.buf1.WriteToHash(w.crc32)
+	if err := w.write(w.buf1.Get()); err != nil {
+		return err
+	}
+
+	f, err := fileutil.OpenMmapFile(w.fPO.name)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if f != nil {
+			f.Close()
+		}
+	}()
+	d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
+	cnt := w.cntPO
+	for d.Err() == nil && cnt > 0 {
+		w.buf1.Reset()
+		w.buf1.PutUvarint(d.Uvarint())                     // Keycount.
+		w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label name.
+		w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label value.
+		w.buf1.PutUvarint64(d.Uvarint64() + adjustment)    // Offset.
+		w.buf1.WriteToHash(w.crc32)
+		if err := w.write(w.buf1.Get()); err != nil {
+			return err
+		}
+		cnt--
+	}
+	if d.Err() != nil {
+		return d.Err()
+	}
+
+	// Cleanup temporary file.
+	if err := f.Close(); err != nil {
+		return err
+	}
+	f = nil
+	if err := w.fPO.Close(); err != nil {
+		return err
+	}
+	if err := w.fPO.Remove(); err != nil {
+		return err
+	}
+	w.fPO = nil
+
+	// Write out the length.
+	w.buf1.Reset()
+	w.buf1.PutBE32int(int(w.f.pos - startPos - 4))
+	if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
+		return err
+	}
+
+	// Finally write the hash.
+	w.buf1.Reset()
+	w.buf1.PutHashSum(w.crc32)
+	return w.write(w.buf1.Get())
+}
+
+const indexTOCLen = 6*8 + crc32.Size
+
+func (w *Writer) writeTOC() error {
+	w.buf1.Reset()
+
+	w.buf1.PutBE64(w.toc.Symbols)
+	w.buf1.PutBE64(w.toc.Series)
+	w.buf1.PutBE64(w.toc.LabelIndices)
+	w.buf1.PutBE64(w.toc.LabelIndicesTable)
+	w.buf1.PutBE64(w.toc.Postings)
+	w.buf1.PutBE64(w.toc.PostingsTable)
+
+	w.buf1.PutHash(w.crc32)
+
+	return w.write(w.buf1.Get())
+}
+
+func (w *Writer) writePostingsToTmpFiles() error {
+	names := make([]string, 0, len(w.labelNames))
+	for n := range w.labelNames {
+		names = append(names, n)
+	}
+	sort.Strings(names)
+
+	if err := w.f.Flush(); err != nil {
+		return err
+	}
+	f, err := fileutil.OpenMmapFile(w.f.name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	// Write out the special all posting.
+	offsets := []uint32{}
+	d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
+	d.Skip(int(w.toc.Series))
+	for d.Len() > 0 {
+		d.ConsumePadding()
+		startPos := w.toc.LabelIndices - uint64(d.Len())
+		if startPos%16 != 0 {
+			return errors.Errorf("series not 16-byte aligned at %d", startPos)
+		}
+		offsets = append(offsets, uint32(startPos/16))
+		// Skip to next series.
+		x := d.Uvarint()
+		d.Skip(x + crc32.Size)
+		if err := d.Err(); err != nil {
+			return err
+		}
+	}
+	if err := w.writePosting("", "", offsets); err != nil {
+		return err
+	}
+	maxPostings := uint64(len(offsets)) // No label name can have more postings than this.
+
+	for len(names) > 0 {
+		batchNames := []string{}
+		var c uint64
+		// Try to bunch up label names into one loop, but avoid
+		// using more memory than a single label name can.
+		for len(names) > 0 {
+			if w.labelNames[names[0]]+c > maxPostings {
+				break
+			}
+			batchNames = append(batchNames, names[0])
+			c += w.labelNames[names[0]]
+			names = names[1:]
+		}
+
+		nameSymbols := map[uint32]string{}
+		for _, name := range batchNames {
+			sid, err := w.symbols.ReverseLookup(name)
+			if err != nil {
+				return err
+			}
+			nameSymbols[sid] = name
+		}
+		// Label name -> label value -> positions.
+		postings := map[uint32]map[uint32][]uint32{}
+
+		d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
+		d.Skip(int(w.toc.Series))
+		for d.Len() > 0 {
+			d.ConsumePadding()
+			startPos := w.toc.LabelIndices - uint64(d.Len())
+			l := d.Uvarint() // Length of this series in bytes.
+			startLen := d.Len()
+
+			// See if label names we want are in the series.
+			numLabels := d.Uvarint()
+			for i := 0; i < numLabels; i++ {
+				lno := uint32(d.Uvarint())
+				lvo := uint32(d.Uvarint())
+
+				if _, ok := nameSymbols[lno]; ok {
+					if _, ok := postings[lno]; !ok {
+						postings[lno] = map[uint32][]uint32{}
+					}
+					postings[lno][lvo] = append(postings[lno][lvo], uint32(startPos/16))
+				}
+			}
+			// Skip to next series.
+			d.Skip(l - (startLen - d.Len()) + crc32.Size)
+			if err := d.Err(); err != nil {
+				return err
+			}
+		}
+
+		for _, name := range batchNames {
+			// Write out postings for this label name.
+			sid, err := w.symbols.ReverseLookup(name)
+			if err != nil {
+				return err
+			}
+			values := make([]uint32, 0, len(postings[sid]))
+			for v := range postings[sid] {
+				values = append(values, v)
+
+			}
+			// Symbol numbers are in order, so the strings will also be in order.
+			sort.Sort(uint32slice(values))
+			for _, v := range values {
+				value, err := w.symbols.Lookup(v)
+				if err != nil {
+					return err
+				}
+				if err := w.writePosting(name, value, postings[sid][v]); err != nil {
+					return err
+				}
+			}
+		}
+		select {
+		case <-w.ctx.Done():
+			return w.ctx.Err()
+		default:
+		}
+
+	}
+	return nil
+}
+
+func (w *Writer) writePosting(name, value string, offs []uint32) error {
+	// Align beginning to 4 bytes for more efficient postings list scans.
+	if err := w.fP.AddPadding(4); err != nil {
+		return err
+	}
+
+	// Write out postings offset table to temporary file as we go.
+	w.buf1.Reset()
+	w.buf1.PutUvarint(2)
+	w.buf1.PutUvarintStr(name)
+	w.buf1.PutUvarintStr(value)
+	w.buf1.PutUvarint64(w.fP.pos) // This is relative to the postings tmp file, not the final index file.
+	if err := w.fPO.Write(w.buf1.Get()); err != nil {
+		return err
+	}
+	w.cntPO++
+
+	w.buf1.Reset()
+	w.buf1.PutBE32int(len(offs))
+
+	for _, off := range offs {
+		if off > (1<<32)-1 {
+			return errors.Errorf("series offset %d exceeds 4 bytes", off)
+		}
+		w.buf1.PutBE32(off)
+	}
+
+	w.buf2.Reset()
+	w.buf2.PutBE32int(w.buf1.Len())
+	w.buf1.PutHash(w.crc32)
+	return w.fP.Write(w.buf2.Get(), w.buf1.Get())
+}
+
+func (w *Writer) writePostings() error {
+	// There's padding in the tmp file, make sure it actually works.
+	if err := w.f.AddPadding(4); err != nil {
+		return err
+	}
+	w.postingsStart = w.f.pos
+
+	// Copy temporary file into main index.
+	if err := w.fP.Flush(); err != nil {
+		return err
+	}
+	if _, err := w.fP.f.Seek(0, 0); err != nil {
+		return err
+	}
+	// Don't need to calculate a checksum, so can copy directly.
+	n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, make([]byte, 1<<20))
+	if err != nil {
+		return err
+	}
+	if uint64(n) != w.fP.pos {
+		return errors.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n)
+	}
+	w.f.pos += uint64(n)
+
+	if err := w.fP.Close(); err != nil {
+		return err
+	}
+	if err := w.fP.Remove(); err != nil {
+		return err
+	}
+	w.fP = nil
+	return nil
+}
+
+type uint32slice []uint32
+
+func (s uint32slice) Len() int           { return len(s) }
+func (s uint32slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] }
+
+type labelIndexHashEntry struct {
+	keys   []string
+	offset uint64
+}
+
+func (w *Writer) Close() error {
+	// Even if this fails, we need to close all the files.
+	ensureErr := w.ensureStage(idxStageDone)
+
+	if w.symbolFile != nil {
+		if err := w.symbolFile.Close(); err != nil {
+			return err
+		}
+	}
+	if w.fP != nil {
+		if err := w.fP.Close(); err != nil {
+			return err
+		}
+	}
+	if w.fPO != nil {
+		if err := w.fPO.Close(); err != nil {
+			return err
+		}
+	}
+	if err := w.f.Close(); err != nil {
+		return err
+	}
+	return ensureErr
+}
+
+// StringIter iterates over a sorted list of strings.
+type StringIter interface {
+	// Next advances the iterator and returns true if another value was found.
+	Next() bool
+
+	// At returns the value at the current iterator position.
+	At() string
+
+	// Err returns the last error of the iterator.
+	Err() error
+}
+
+type Reader struct {
+	b   ByteSlice
+	toc *TOC
+
+	// Close that releases the underlying resources of the byte slice.
+	c io.Closer
+
+	// Map of LabelName to a list of some LabelValues's position in the offset table.
+	// The first and last values for each name are always present.
+	postings map[string][]postingOffset
+	// For the v1 format, labelname -> labelvalue -> offset.
+	postingsV1 map[string]map[string]uint64
+
+	symbols     *Symbols
+	nameSymbols map[uint32]string // Cache of the label name symbol lookups,
+	// as there are not many and they are half of all lookups.
+
+	dec *Decoder
+
+	version int
+}
+
+type postingOffset struct {
+	value string
+	off   int
+}
+
+// ByteSlice abstracts a byte slice.
+type ByteSlice interface {
+	Len() int
+	Range(start, end int) []byte
+}
+
+type realByteSlice []byte
+
+func (b realByteSlice) Len() int {
+	return len(b)
+}
+
+func (b realByteSlice) Range(start, end int) []byte {
+	return b[start:end]
+}
+
+func (b realByteSlice) Sub(start, end int) ByteSlice {
+	return b[start:end]
+}
+
+// NewReader returns a new index reader on the given byte slice. It automatically
+// handles different format versions.
+func NewReader(b ByteSlice) (*Reader, error) {
+	return newReader(b, ioutil.NopCloser(nil))
+}
+
+// NewFileReader returns a new index reader against the given index file.
+func NewFileReader(path string) (*Reader, error) {
+	f, err := fileutil.OpenMmapFile(path)
+	if err != nil {
+		return nil, err
+	}
+	r, err := newReader(realByteSlice(f.Bytes()), f)
+	if err != nil {
+		var merr tsdb_errors.MultiError
+		merr.Add(err)
+		merr.Add(f.Close())
+		return nil, merr
+	}
+
+	return r, nil
+}
+
+func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
+	r := &Reader{
+		b:        b,
+		c:        c,
+		postings: map[string][]postingOffset{},
+	}
+
+	// Verify header.
+	if r.b.Len() < HeaderLen {
+		return nil, errors.Wrap(encoding.ErrInvalidSize, "index header")
+	}
+	if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex {
+		return nil, errors.Errorf("invalid magic number %x", m)
+	}
+	r.version = int(r.b.Range(4, 5)[0])
+
+	if r.version != FormatV1 && r.version != FormatV2 {
+		return nil, errors.Errorf("unknown index file version %d", r.version)
+	}
+
+	var err error
+	r.toc, err = NewTOCFromByteSlice(b)
+	if err != nil {
+		return nil, errors.Wrap(err, "read TOC")
+	}
+
+	r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols))
+	if err != nil {
+		return nil, errors.Wrap(err, "read symbols")
+	}
+
+	if r.version == FormatV1 {
+		// Earlier V1 formats don't have a sorted postings offset table, so
+		// load the whole offset table into memory.
+		r.postingsV1 = map[string]map[string]uint64{}
+		if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error {
+			if len(key) != 2 {
+				return errors.Errorf("unexpected key length for posting table %d", len(key))
+			}
+			if _, ok := r.postingsV1[key[0]]; !ok {
+				r.postingsV1[key[0]] = map[string]uint64{}
+				r.postings[key[0]] = nil // Used to get a list of labelnames in places.
+			}
+			r.postingsV1[key[0]][key[1]] = off
+			return nil
+		}); err != nil {
+			return nil, errors.Wrap(err, "read postings table")
+		}
+	} else {
+		var lastKey []string
+		lastOff := 0
+		valueCount := 0
+		// For the postings offset table we keep every label name but only every nth
+		// label value (plus the first and last one), to save memory.
+		if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error {
+			if len(key) != 2 {
+				return errors.Errorf("unexpected key length for posting table %d", len(key))
+			}
+			if _, ok := r.postings[key[0]]; !ok {
+				// Next label name.
+				r.postings[key[0]] = []postingOffset{}
+				if lastKey != nil {
+					// Always include last value for each label name.
+					r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
+				}
+				lastKey = nil
+				valueCount = 0
+			}
+			if valueCount%32 == 0 {
+				r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off})
+				lastKey = nil
+			} else {
+				lastKey = key
+				lastOff = off
+			}
+			valueCount++
+			return nil
+		}); err != nil {
+			return nil, errors.Wrap(err, "read postings table")
+		}
+		if lastKey != nil {
+			r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
+		}
+		// Trim any extra space in the slices.
+		for k, v := range r.postings {
+			l := make([]postingOffset, len(v))
+			copy(l, v)
+			r.postings[k] = l
+		}
+	}
+
+	r.nameSymbols = make(map[uint32]string, len(r.postings))
+	for k := range r.postings {
+		if k == "" {
+			continue
+		}
+		off, err := r.symbols.ReverseLookup(k)
+		if err != nil {
+			return nil, errors.Wrap(err, "reverse symbol lookup")
+		}
+		r.nameSymbols[off] = k
+	}
+
+	r.dec = &Decoder{LookupSymbol: r.lookupSymbol}
+
+	return r, nil
+}
+
+// Version returns the file format version of the underlying index.
+func (r *Reader) Version() int {
+	return r.version
+}
+
+// Range marks a byte range.
+type Range struct {
+	Start, End int64
+}
+
+// PostingsRanges returns a new map of byte range in the underlying index file
+// for all postings lists.
+func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
+	m := map[labels.Label]Range{}
+	if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error {
+		if len(key) != 2 {
+			return errors.Errorf("unexpected key length for posting table %d", len(key))
+		}
+		d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
+		if d.Err() != nil {
+			return d.Err()
+		}
+		m[labels.Label{Name: key[0], Value: key[1]}] = Range{
+			Start: int64(off) + 4,
+			End:   int64(off) + 4 + int64(d.Len()),
+		}
+		return nil
+	}); err != nil {
+		return nil, errors.Wrap(err, "read postings table")
+	}
+	return m, nil
+}
+
+type Symbols struct {
+	bs      ByteSlice
+	version int
+	off     int
+
+	offsets []int
+	seen    int
+}
+
+const symbolFactor = 32
+
+// NewSymbols returns a Symbols object for symbol lookups.
+func NewSymbols(bs ByteSlice, version int, off int) (*Symbols, error) {
+	s := &Symbols{
+		bs:      bs,
+		version: version,
+		off:     off,
+	}
+	d := encoding.NewDecbufAt(bs, off, castagnoliTable)
+	var (
+		origLen = d.Len()
+		cnt     = d.Be32int()
+		basePos = off + 4
+	)
+	s.offsets = make([]int, 0, 1+cnt/symbolFactor)
+	for d.Err() == nil && s.seen < cnt {
+		if s.seen%symbolFactor == 0 {
+			s.offsets = append(s.offsets, basePos+origLen-d.Len())
+		}
+		d.UvarintBytes() // The symbol.
+		s.seen++
+	}
+	if d.Err() != nil {
+		return nil, d.Err()
+	}
+	return s, nil
+}
+
+func (s Symbols) Lookup(o uint32) (string, error) {
+	d := encoding.Decbuf{
+		B: s.bs.Range(0, s.bs.Len()),
+	}
+
+	if s.version == FormatV2 {
+		if int(o) >= s.seen {
+			return "", errors.Errorf("unknown symbol offset %d", o)
+		}
+		d.Skip(s.offsets[int(o/symbolFactor)])
+		// Walk until we find the one we want.
+		for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- {
+			d.UvarintBytes()
+		}
+	} else {
+		d.Skip(int(o))
+	}
+	sym := d.UvarintStr()
+	if d.Err() != nil {
+		return "", d.Err()
+	}
+	return sym, nil
+}
+
+func (s Symbols) ReverseLookup(sym string) (uint32, error) {
+	if len(s.offsets) == 0 {
+		return 0, errors.Errorf("unknown symbol %q - no symbols", sym)
+	}
+	i := sort.Search(len(s.offsets), func(i int) bool {
+		// Any decoding errors here will be lost, however
+		// we already read through all of this at startup.
+		d := encoding.Decbuf{
+			B: s.bs.Range(0, s.bs.Len()),
+		}
+		d.Skip(s.offsets[i])
+		return yoloString(d.UvarintBytes()) > sym
+	})
+	d := encoding.Decbuf{
+		B: s.bs.Range(0, s.bs.Len()),
+	}
+	if i > 0 {
+		i--
+	}
+	d.Skip(s.offsets[i])
+	res := i * 32
+	var lastLen int
+	var lastSymbol string
+	for d.Err() == nil && res <= s.seen {
+		lastLen = d.Len()
+		lastSymbol = yoloString(d.UvarintBytes())
+		if lastSymbol >= sym {
+			break
+		}
+		res++
+	}
+	if d.Err() != nil {
+		return 0, d.Err()
+	}
+	if lastSymbol != sym {
+		return 0, errors.Errorf("unknown symbol %q", sym)
+	}
+	if s.version == FormatV2 {
+		return uint32(res), nil
+	}
+	return uint32(s.bs.Len() - lastLen), nil
+}
+
+func (s Symbols) Size() int {
+	return len(s.offsets) * 8
+}
+
+func (s Symbols) Iter() StringIter {
+	d := encoding.NewDecbufAt(s.bs, s.off, castagnoliTable)
+	cnt := d.Be32int()
+	return &symbolsIter{
+		d:   d,
+		cnt: cnt,
+	}
+}
+
+// symbolsIter implements StringIter.
+type symbolsIter struct {
+	d   encoding.Decbuf
+	cnt int
+	cur string
+	err error
+}
+
+func (s *symbolsIter) Next() bool {
+	if s.cnt == 0 || s.err != nil {
+		return false
+	}
+	s.cur = yoloString(s.d.UvarintBytes())
+	s.cnt--
+	if s.d.Err() != nil {
+		s.err = s.d.Err()
+		return false
+	}
+	return true
+}
+
+func (s symbolsIter) At() string { return s.cur }
+func (s symbolsIter) Err() error { return s.err }
+
+// ReadOffsetTable reads an offset table and at the given position calls f for each
+// found entry. If f returns an error it stops decoding and returns the received error.
+func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) error) error {
+	d := encoding.NewDecbufAt(bs, int(off), castagnoliTable)
+	startLen := d.Len()
+	cnt := d.Be32()
+
+	for d.Err() == nil && d.Len() > 0 && cnt > 0 {
+		offsetPos := startLen - d.Len()
+		keyCount := d.Uvarint()
+		// The Postings offset table takes only 2 keys per entry (name and value of label),
+		// and the LabelIndices offset table takes only 1 key per entry (a label name).
+		// Hence setting the size to max of both, i.e. 2.
+		keys := make([]string, 0, 2)
+
+		for i := 0; i < keyCount; i++ {
+			keys = append(keys, d.UvarintStr())
+		}
+		o := d.Uvarint64()
+		if d.Err() != nil {
+			break
+		}
+		if err := f(keys, o, offsetPos); err != nil {
+			return err
+		}
+		cnt--
+	}
+	return d.Err()
+}
+
+// Close the reader and its underlying resources.
+func (r *Reader) Close() error {
+	return r.c.Close()
+}
+
+func (r *Reader) lookupSymbol(o uint32) (string, error) {
+	if s, ok := r.nameSymbols[o]; ok {
+		return s, nil
+	}
+	return r.symbols.Lookup(o)
+}
+
+// Symbols returns an iterator over the symbols that exist within the index.
+func (r *Reader) Symbols() StringIter {
+	return r.symbols.Iter()
+}
+
+// SymbolTableSize returns the symbol table size in bytes.
+func (r *Reader) SymbolTableSize() uint64 {
+	return uint64(r.symbols.Size())
+}
+
+// LabelValues returns value tuples that exist for the given label name.
+// It is not safe to use the return value beyond the lifetime of the byte slice
+// passed into the Reader.
+func (r *Reader) LabelValues(name string) ([]string, error) {
+	if r.version == FormatV1 {
+		e, ok := r.postingsV1[name]
+		if !ok {
+			return nil, nil
+		}
+		values := make([]string, 0, len(e))
+		for k := range e {
+			values = append(values, k)
+		}
+		sort.Strings(values)
+		return values, nil
+
+	}
+	e, ok := r.postings[name]
+	if !ok {
+		return nil, nil
+	}
+	if len(e) == 0 {
+		return nil, nil
+	}
+	values := make([]string, 0, len(e)*symbolFactor)
+
+	d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)
+	d.Skip(e[0].off)
+	lastVal := e[len(e)-1].value
+
+	skip := 0
+	for d.Err() == nil {
+		if skip == 0 {
+			// These are always the same number of bytes,
+			// and it's faster to skip than parse.
+			skip = d.Len()
+			d.Uvarint()      // Keycount.
+			d.UvarintBytes() // Label name.
+			skip -= d.Len()
+		} else {
+			d.Skip(skip)
+		}
+		s := yoloString(d.UvarintBytes()) //Label value.
+		values = append(values, s)
+		if s == lastVal {
+			break
+		}
+		d.Uvarint64() // Offset.
+	}
+	if d.Err() != nil {
+		return nil, errors.Wrap(d.Err(), "get postings offset entry")
+	}
+	return values, nil
+}
+
+// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
+func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
+	offset := id
+	// In version 2 series IDs are no longer exact references but series are 16-byte padded
+	// and the ID is the multiple of 16 of the actual position.
+	if r.version == FormatV2 {
+		offset = id * 16
+	}
+	d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
+	if d.Err() != nil {
+		return d.Err()
+	}
+	return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series")
+}
+
+func (r *Reader) Postings(name string, values ...string) (Postings, error) {
+	if r.version == FormatV1 {
+		e, ok := r.postingsV1[name]
+		if !ok {
+			return EmptyPostings(), nil
+		}
+		res := make([]Postings, 0, len(values))
+		for _, v := range values {
+			postingsOff, ok := e[v]
+			if !ok {
+				continue
+			}
+			// Read from the postings table.
+			d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
+			_, p, err := r.dec.Postings(d.Get())
+			if err != nil {
+				return nil, errors.Wrap(err, "decode postings")
+			}
+			res = append(res, p)
+		}
+		return Merge(res...), nil
+	}
+
+	e, ok := r.postings[name]
+	if !ok {
+		return EmptyPostings(), nil
+	}
+
+	if len(values) == 0 {
+		return EmptyPostings(), nil
+	}
+
+	res := make([]Postings, 0, len(values))
+	skip := 0
+	valueIndex := 0
+	for valueIndex < len(values) && values[valueIndex] < e[0].value {
+		// Discard values before the start.
+		valueIndex++
+	}
+	for valueIndex < len(values) {
+		value := values[valueIndex]
+
+		i := sort.Search(len(e), func(i int) bool { return e[i].value >= value })
+		if i == len(e) {
+			// We're past the end.
+			break
+		}
+		if i > 0 && e[i].value != value {
+			// Need to look from previous entry.
+			i--
+		}
+		// Don't Crc32 the entire postings offset table, this is very slow
+		// so hope any issues were caught at startup.
+		d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)
+		d.Skip(e[i].off)
+
+		// Iterate on the offset table.
+		var postingsOff uint64 // The offset into the postings table.
+		for d.Err() == nil {
+			if skip == 0 {
+				// These are always the same number of bytes,
+				// and it's faster to skip than parse.
+				skip = d.Len()
+				d.Uvarint()      // Keycount.
+				d.UvarintBytes() // Label name.
+				skip -= d.Len()
+			} else {
+				d.Skip(skip)
+			}
+			v := d.UvarintBytes()       // Label value.
+			postingsOff = d.Uvarint64() // Offset.
+			for string(v) >= value {
+				if string(v) == value {
+					// Read from the postings table.
+					d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
+					_, p, err := r.dec.Postings(d2.Get())
+					if err != nil {
+						return nil, errors.Wrap(err, "decode postings")
+					}
+					res = append(res, p)
+				}
+				valueIndex++
+				if valueIndex == len(values) {
+					break
+				}
+				value = values[valueIndex]
+			}
+			if i+1 == len(e) || value >= e[i+1].value || valueIndex == len(values) {
+				// Need to go to a later postings offset entry, if there is one.
+				break
+			}
+		}
+		if d.Err() != nil {
+			return nil, errors.Wrap(d.Err(), "get postings offset entry")
+		}
+	}
+
+	return Merge(res...), nil
+}
+
+// SortedPostings returns the given postings list reordered so that the backing series
+// are sorted.
+func (r *Reader) SortedPostings(p Postings) Postings {
+	return p
+}
+
+// Size returns the size of an index file.
+func (r *Reader) Size() int64 {
+	return int64(r.b.Len())
+}
+
+// LabelNames returns all the unique label names present in the index.
+func (r *Reader) LabelNames() ([]string, error) {
+	labelNames := make([]string, 0, len(r.postings))
+	for name := range r.postings {
+		if name == allPostingsKey.Name {
+			// This is not from any metric.
+			continue
+		}
+		labelNames = append(labelNames, name)
+	}
+	sort.Strings(labelNames)
+	return labelNames, nil
+}
+
+// NewStringListIterator returns a StringIter for the given sorted list of strings.
+func NewStringListIter(s []string) StringIter {
+	return &stringListIter{l: s}
+}
+
+// symbolsIter implements StringIter.
+type stringListIter struct {
+	l   []string
+	cur string
+}
+
+func (s *stringListIter) Next() bool {
+	if len(s.l) == 0 {
+		return false
+	}
+	s.cur = s.l[0]
+	s.l = s.l[1:]
+	return true
+}
+func (s stringListIter) At() string { return s.cur }
+func (s stringListIter) Err() error { return nil }
+
+// Decoder provides decoding methods for the v1 and v2 index file format.
+//
+// It currently does not contain decoding methods for all entry types but can be extended
+// by them if there's demand.
+type Decoder struct {
+	LookupSymbol func(uint32) (string, error)
+}
+
+// Postings returns a postings list for b and its number of elements.
+func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
+	d := encoding.Decbuf{B: b}
+	n := d.Be32int()
+	l := d.Get()
+	return n, newBigEndianPostings(l), d.Err()
+}
+
+// Series decodes a series entry from the given byte slice into lset and chks.
+func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
+	*lbls = (*lbls)[:0]
+	*chks = (*chks)[:0]
+
+	d := encoding.Decbuf{B: b}
+
+	k := d.Uvarint()
+
+	for i := 0; i < k; i++ {
+		lno := uint32(d.Uvarint())
+		lvo := uint32(d.Uvarint())
+
+		if d.Err() != nil {
+			return errors.Wrap(d.Err(), "read series label offsets")
+		}
+
+		ln, err := dec.LookupSymbol(lno)
+		if err != nil {
+			return errors.Wrap(err, "lookup label name")
+		}
+		lv, err := dec.LookupSymbol(lvo)
+		if err != nil {
+			return errors.Wrap(err, "lookup label value")
+		}
+
+		*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
+	}
+
+	// Read the chunks meta data.
+	k = d.Uvarint()
+
+	if k == 0 {
+		return nil
+	}
+
+	t0 := d.Varint64()
+	maxt := int64(d.Uvarint64()) + t0
+	ref0 := int64(d.Uvarint64())
+
+	*chks = append(*chks, chunks.Meta{
+		Ref:     uint64(ref0),
+		MinTime: t0,
+		MaxTime: maxt,
+	})
+	t0 = maxt
+
+	for i := 1; i < k; i++ {
+		mint := int64(d.Uvarint64()) + t0
+		maxt := int64(d.Uvarint64()) + mint
+
+		ref0 += d.Varint64()
+		t0 = maxt
+
+		if d.Err() != nil {
+			return errors.Wrapf(d.Err(), "read meta for chunk %d", i)
+		}
+
+		*chks = append(*chks, chunks.Meta{
+			Ref:     uint64(ref0),
+			MinTime: mint,
+			MaxTime: maxt,
+		})
+	}
+	return d.Err()
+}
+
+func yoloString(b []byte) string {
+	return *((*string)(unsafe.Pointer(&b)))
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
new file mode 100644
index 0000000000000000000000000000000000000000..ddd7ad601d97a093b441769aeb94eeaeb8b52aed
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
@@ -0,0 +1,742 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+	"container/heap"
+	"encoding/binary"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+)
+
+var allPostingsKey = labels.Label{}
+
+// AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
+func AllPostingsKey() (name, value string) {
+	return allPostingsKey.Name, allPostingsKey.Value
+}
+
+// MemPostings holds postings list for series ID per label pair. They may be written
+// to out of order.
+// ensureOrder() must be called once before any reads are done. This allows for quick
+// unordered batch fills on startup.
+type MemPostings struct {
+	mtx     sync.RWMutex
+	m       map[string]map[string][]uint64
+	ordered bool
+}
+
+// NewMemPostings returns a memPostings that's ready for reads and writes.
+func NewMemPostings() *MemPostings {
+	return &MemPostings{
+		m:       make(map[string]map[string][]uint64, 512),
+		ordered: true,
+	}
+}
+
+// NewUnorderedMemPostings returns a memPostings that is not safe to be read from
+// until ensureOrder was called once.
+func NewUnorderedMemPostings() *MemPostings {
+	return &MemPostings{
+		m:       make(map[string]map[string][]uint64, 512),
+		ordered: false,
+	}
+}
+
+// SortedKeys returns a list of sorted label keys of the postings.
+func (p *MemPostings) SortedKeys() []labels.Label {
+	p.mtx.RLock()
+	keys := make([]labels.Label, 0, len(p.m))
+
+	for n, e := range p.m {
+		for v := range e {
+			keys = append(keys, labels.Label{Name: n, Value: v})
+		}
+	}
+	p.mtx.RUnlock()
+
+	sort.Slice(keys, func(i, j int) bool {
+		if d := strings.Compare(keys[i].Name, keys[j].Name); d != 0 {
+			return d < 0
+		}
+		return keys[i].Value < keys[j].Value
+	})
+	return keys
+}
+
+// PostingsStats contains cardinality based statistics for postings.
+type PostingsStats struct {
+	CardinalityMetricsStats []Stat
+	CardinalityLabelStats   []Stat
+	LabelValueStats         []Stat
+	LabelValuePairsStats    []Stat
+}
+
+// Stats calculates the cardinality statistics from postings.
+func (p *MemPostings) Stats(label string) *PostingsStats {
+	const maxNumOfRecords = 10
+	var size uint64
+
+	p.mtx.RLock()
+
+	metrics := &maxHeap{}
+	labels := &maxHeap{}
+	labelValueLength := &maxHeap{}
+	labelValuePairs := &maxHeap{}
+
+	metrics.init(maxNumOfRecords)
+	labels.init(maxNumOfRecords)
+	labelValueLength.init(maxNumOfRecords)
+	labelValuePairs.init(maxNumOfRecords)
+
+	for n, e := range p.m {
+		if n == "" {
+			continue
+		}
+		labels.push(Stat{Name: n, Count: uint64(len(e))})
+		size = 0
+		for name, values := range e {
+			if n == label {
+				metrics.push(Stat{Name: name, Count: uint64(len(values))})
+			}
+			labelValuePairs.push(Stat{Name: n + "=" + name, Count: uint64(len(values))})
+			size += uint64(len(name))
+		}
+		labelValueLength.push(Stat{Name: n, Count: size})
+	}
+
+	p.mtx.RUnlock()
+
+	return &PostingsStats{
+		CardinalityMetricsStats: metrics.get(),
+		CardinalityLabelStats:   labels.get(),
+		LabelValueStats:         labelValueLength.get(),
+		LabelValuePairsStats:    labelValuePairs.get(),
+	}
+}
+
+// Get returns a postings list for the given label pair.
+func (p *MemPostings) Get(name, value string) Postings {
+	var lp []uint64
+	p.mtx.RLock()
+	l := p.m[name]
+	if l != nil {
+		lp = l[value]
+	}
+	p.mtx.RUnlock()
+
+	if lp == nil {
+		return EmptyPostings()
+	}
+	return newListPostings(lp...)
+}
+
+// All returns a postings list over all documents ever added.
+func (p *MemPostings) All() Postings {
+	return p.Get(AllPostingsKey())
+}
+
+// EnsureOrder ensures that all postings lists are sorted. After it returns all further
+// calls to add and addFor will insert new IDs in a sorted manner.
+func (p *MemPostings) EnsureOrder() {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.ordered {
+		return
+	}
+
+	n := runtime.GOMAXPROCS(0)
+	workc := make(chan []uint64)
+
+	var wg sync.WaitGroup
+	wg.Add(n)
+
+	for i := 0; i < n; i++ {
+		go func() {
+			for l := range workc {
+				sort.Slice(l, func(i, j int) bool { return l[i] < l[j] })
+			}
+			wg.Done()
+		}()
+	}
+
+	for _, e := range p.m {
+		for _, l := range e {
+			workc <- l
+		}
+	}
+	close(workc)
+	wg.Wait()
+
+	p.ordered = true
+}
+
+// Delete removes all ids in the given map from the postings lists.
+func (p *MemPostings) Delete(deleted map[uint64]struct{}) {
+	var keys, vals []string
+
+	// Collect all keys relevant for deletion once. New keys added afterwards
+	// can by definition not be affected by any of the given deletes.
+	p.mtx.RLock()
+	for n := range p.m {
+		keys = append(keys, n)
+	}
+	p.mtx.RUnlock()
+
+	for _, n := range keys {
+		p.mtx.RLock()
+		vals = vals[:0]
+		for v := range p.m[n] {
+			vals = append(vals, v)
+		}
+		p.mtx.RUnlock()
+
+		// For each posting we first analyse whether the postings list is affected by the deletes.
+		// If yes, we actually reallocate a new postings list.
+		for _, l := range vals {
+			// Only lock for processing one postings list so we don't block reads for too long.
+			p.mtx.Lock()
+
+			found := false
+			for _, id := range p.m[n][l] {
+				if _, ok := deleted[id]; ok {
+					found = true
+					break
+				}
+			}
+			if !found {
+				p.mtx.Unlock()
+				continue
+			}
+			repl := make([]uint64, 0, len(p.m[n][l]))
+
+			for _, id := range p.m[n][l] {
+				if _, ok := deleted[id]; !ok {
+					repl = append(repl, id)
+				}
+			}
+			if len(repl) > 0 {
+				p.m[n][l] = repl
+			} else {
+				delete(p.m[n], l)
+			}
+			p.mtx.Unlock()
+		}
+		p.mtx.Lock()
+		if len(p.m[n]) == 0 {
+			delete(p.m, n)
+		}
+		p.mtx.Unlock()
+	}
+}
+
+// Iter calls f for each postings list. It aborts if f returns an error and returns it.
+func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
+	p.mtx.RLock()
+	defer p.mtx.RUnlock()
+
+	for n, e := range p.m {
+		for v, p := range e {
+			if err := f(labels.Label{Name: n, Value: v}, newListPostings(p...)); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Add a label set to the postings index.
+func (p *MemPostings) Add(id uint64, lset labels.Labels) {
+	p.mtx.Lock()
+
+	for _, l := range lset {
+		p.addFor(id, l)
+	}
+	p.addFor(id, allPostingsKey)
+
+	p.mtx.Unlock()
+}
+
+func (p *MemPostings) addFor(id uint64, l labels.Label) {
+	nm, ok := p.m[l.Name]
+	if !ok {
+		nm = map[string][]uint64{}
+		p.m[l.Name] = nm
+	}
+	list := append(nm[l.Value], id)
+	nm[l.Value] = list
+
+	if !p.ordered {
+		return
+	}
+	// There is no guarantee that no higher ID was inserted before as they may
+	// be generated independently before adding them to postings.
+	// We repair order violations on insert. The invariant is that the first n-1
+	// items in the list are already sorted.
+	for i := len(list) - 1; i >= 1; i-- {
+		if list[i] >= list[i-1] {
+			break
+		}
+		list[i], list[i-1] = list[i-1], list[i]
+	}
+}
+
+// ExpandPostings returns the postings expanded as a slice.
+func ExpandPostings(p Postings) (res []uint64, err error) {
+	for p.Next() {
+		res = append(res, p.At())
+	}
+	return res, p.Err()
+}
+
+// Postings provides iterative access over a postings list.
+type Postings interface {
+	// Next advances the iterator and returns true if another value was found.
+	Next() bool
+
+	// Seek advances the iterator to value v or greater and returns
+	// true if a value was found.
+	Seek(v uint64) bool
+
+	// At returns the value at the current iterator position.
+	At() uint64
+
+	// Err returns the last error of the iterator.
+	Err() error
+}
+
+// errPostings is an empty iterator that always errors.
+type errPostings struct {
+	err error
+}
+
+func (e errPostings) Next() bool       { return false }
+func (e errPostings) Seek(uint64) bool { return false }
+func (e errPostings) At() uint64       { return 0 }
+func (e errPostings) Err() error       { return e.err }
+
+var emptyPostings = errPostings{}
+
+// EmptyPostings returns a postings list that's always empty.
+// NOTE: Returning EmptyPostings sentinel when index.Postings struct has no postings is recommended.
+// It triggers optimized flow in other functions like Intersect, Without etc.
+func EmptyPostings() Postings {
+	return emptyPostings
+}
+
+// ErrPostings returns new postings that immediately error.
+func ErrPostings(err error) Postings {
+	return errPostings{err}
+}
+
+// Intersect returns a new postings list over the intersection of the
+// input postings.
+func Intersect(its ...Postings) Postings {
+	if len(its) == 0 {
+		return EmptyPostings()
+	}
+	if len(its) == 1 {
+		return its[0]
+	}
+	for _, p := range its {
+		if p == EmptyPostings() {
+			return EmptyPostings()
+		}
+	}
+
+	return newIntersectPostings(its...)
+}
+
+type intersectPostings struct {
+	arr []Postings
+	cur uint64
+}
+
+func newIntersectPostings(its ...Postings) *intersectPostings {
+	return &intersectPostings{arr: its}
+}
+
+func (it *intersectPostings) At() uint64 {
+	return it.cur
+}
+
+func (it *intersectPostings) doNext() bool {
+Loop:
+	for {
+		for _, p := range it.arr {
+			if !p.Seek(it.cur) {
+				return false
+			}
+			if p.At() > it.cur {
+				it.cur = p.At()
+				continue Loop
+			}
+		}
+		return true
+	}
+}
+
+func (it *intersectPostings) Next() bool {
+	for _, p := range it.arr {
+		if !p.Next() {
+			return false
+		}
+		if p.At() > it.cur {
+			it.cur = p.At()
+		}
+	}
+	return it.doNext()
+}
+
+func (it *intersectPostings) Seek(id uint64) bool {
+	it.cur = id
+	return it.doNext()
+}
+
+func (it *intersectPostings) Err() error {
+	for _, p := range it.arr {
+		if p.Err() != nil {
+			return p.Err()
+		}
+	}
+	return nil
+}
+
+// Merge returns a new iterator over the union of the input iterators.
+func Merge(its ...Postings) Postings {
+	if len(its) == 0 {
+		return EmptyPostings()
+	}
+	if len(its) == 1 {
+		return its[0]
+	}
+
+	p, ok := newMergedPostings(its)
+	if !ok {
+		return EmptyPostings()
+	}
+	return p
+}
+
+type postingsHeap []Postings
+
+func (h postingsHeap) Len() int           { return len(h) }
+func (h postingsHeap) Less(i, j int) bool { return h[i].At() < h[j].At() }
+func (h *postingsHeap) Swap(i, j int)     { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
+
+func (h *postingsHeap) Push(x interface{}) {
+	*h = append(*h, x.(Postings))
+}
+
+func (h *postingsHeap) Pop() interface{} {
+	old := *h
+	n := len(old)
+	x := old[n-1]
+	*h = old[0 : n-1]
+	return x
+}
+
+type mergedPostings struct {
+	h           postingsHeap
+	initialized bool
+	cur         uint64
+	err         error
+}
+
+func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
+	ph := make(postingsHeap, 0, len(p))
+
+	for _, it := range p {
+		// NOTE: mergedPostings struct requires the user to issue an initial Next.
+		if it.Next() {
+			ph = append(ph, it)
+		} else {
+			if it.Err() != nil {
+				return &mergedPostings{err: it.Err()}, true
+			}
+		}
+	}
+
+	if len(ph) == 0 {
+		return nil, false
+	}
+	return &mergedPostings{h: ph}, true
+}
+
+func (it *mergedPostings) Next() bool {
+	if it.h.Len() == 0 || it.err != nil {
+		return false
+	}
+
+	// The user must issue an initial Next.
+	if !it.initialized {
+		heap.Init(&it.h)
+		it.cur = it.h[0].At()
+		it.initialized = true
+		return true
+	}
+
+	for {
+		cur := it.h[0]
+		if !cur.Next() {
+			heap.Pop(&it.h)
+			if cur.Err() != nil {
+				it.err = cur.Err()
+				return false
+			}
+			if it.h.Len() == 0 {
+				return false
+			}
+		} else {
+			// Value of top of heap has changed, re-heapify.
+			heap.Fix(&it.h, 0)
+		}
+
+		if it.h[0].At() != it.cur {
+			it.cur = it.h[0].At()
+			return true
+		}
+	}
+}
+
+func (it *mergedPostings) Seek(id uint64) bool {
+	if it.h.Len() == 0 || it.err != nil {
+		return false
+	}
+	if !it.initialized {
+		if !it.Next() {
+			return false
+		}
+	}
+	for it.cur < id {
+		cur := it.h[0]
+		if !cur.Seek(id) {
+			heap.Pop(&it.h)
+			if cur.Err() != nil {
+				it.err = cur.Err()
+				return false
+			}
+			if it.h.Len() == 0 {
+				return false
+			}
+		} else {
+			// Value of top of heap has changed, re-heapify.
+			heap.Fix(&it.h, 0)
+		}
+
+		it.cur = it.h[0].At()
+	}
+	return true
+}
+
+func (it mergedPostings) At() uint64 {
+	return it.cur
+}
+
+func (it mergedPostings) Err() error {
+	return it.err
+}
+
+// Without returns a new postings list that contains all elements from the full list that
+// are not in the drop list.
+func Without(full, drop Postings) Postings {
+	if full == EmptyPostings() {
+		return EmptyPostings()
+	}
+
+	if drop == EmptyPostings() {
+		return full
+	}
+	return newRemovedPostings(full, drop)
+}
+
+type removedPostings struct {
+	full, remove Postings
+
+	cur uint64
+
+	initialized bool
+	fok, rok    bool
+}
+
+func newRemovedPostings(full, remove Postings) *removedPostings {
+	return &removedPostings{
+		full:   full,
+		remove: remove,
+	}
+}
+
+func (rp *removedPostings) At() uint64 {
+	return rp.cur
+}
+
+func (rp *removedPostings) Next() bool {
+	if !rp.initialized {
+		rp.fok = rp.full.Next()
+		rp.rok = rp.remove.Next()
+		rp.initialized = true
+	}
+	for {
+		if !rp.fok {
+			return false
+		}
+
+		if !rp.rok {
+			rp.cur = rp.full.At()
+			rp.fok = rp.full.Next()
+			return true
+		}
+
+		fcur, rcur := rp.full.At(), rp.remove.At()
+		if fcur < rcur {
+			rp.cur = fcur
+			rp.fok = rp.full.Next()
+
+			return true
+		} else if rcur < fcur {
+			// Forward the remove postings to the right position.
+			rp.rok = rp.remove.Seek(fcur)
+		} else {
+			// Skip the current posting.
+			rp.fok = rp.full.Next()
+		}
+	}
+}
+
+func (rp *removedPostings) Seek(id uint64) bool {
+	if rp.cur >= id {
+		return true
+	}
+
+	rp.fok = rp.full.Seek(id)
+	rp.rok = rp.remove.Seek(id)
+	rp.initialized = true
+
+	return rp.Next()
+}
+
+func (rp *removedPostings) Err() error {
+	if rp.full.Err() != nil {
+		return rp.full.Err()
+	}
+
+	return rp.remove.Err()
+}
+
+// ListPostings implements the Postings interface over a plain list.
+type ListPostings struct {
+	list []uint64
+	cur  uint64
+}
+
+func NewListPostings(list []uint64) Postings {
+	return newListPostings(list...)
+}
+
+func newListPostings(list ...uint64) *ListPostings {
+	return &ListPostings{list: list}
+}
+
+func (it *ListPostings) At() uint64 {
+	return it.cur
+}
+
+func (it *ListPostings) Next() bool {
+	if len(it.list) > 0 {
+		it.cur = it.list[0]
+		it.list = it.list[1:]
+		return true
+	}
+	it.cur = 0
+	return false
+}
+
+func (it *ListPostings) Seek(x uint64) bool {
+	// If the current value satisfies, then return.
+	if it.cur >= x {
+		return true
+	}
+	if len(it.list) == 0 {
+		return false
+	}
+
+	// Do binary search between current position and end.
+	i := sort.Search(len(it.list), func(i int) bool {
+		return it.list[i] >= x
+	})
+	if i < len(it.list) {
+		it.cur = it.list[i]
+		it.list = it.list[i+1:]
+		return true
+	}
+	it.list = nil
+	return false
+}
+
+func (it *ListPostings) Err() error {
+	return nil
+}
+
+// bigEndianPostings implements the Postings interface over a byte stream of
+// big endian numbers.
+type bigEndianPostings struct {
+	list []byte
+	cur  uint32
+}
+
+func newBigEndianPostings(list []byte) *bigEndianPostings {
+	return &bigEndianPostings{list: list}
+}
+
+func (it *bigEndianPostings) At() uint64 {
+	return uint64(it.cur)
+}
+
+func (it *bigEndianPostings) Next() bool {
+	if len(it.list) >= 4 {
+		it.cur = binary.BigEndian.Uint32(it.list)
+		it.list = it.list[4:]
+		return true
+	}
+	return false
+}
+
+func (it *bigEndianPostings) Seek(x uint64) bool {
+	if uint64(it.cur) >= x {
+		return true
+	}
+
+	num := len(it.list) / 4
+	// Do binary search between current position and end.
+	i := sort.Search(num, func(i int) bool {
+		return binary.BigEndian.Uint32(it.list[i*4:]) >= uint32(x)
+	})
+	if i < num {
+		j := i * 4
+		it.cur = binary.BigEndian.Uint32(it.list[j:])
+		it.list = it.list[j+4:]
+		return true
+	}
+	it.list = nil
+	return false
+}
+
+func (it *bigEndianPostings) Err() error {
+	return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
new file mode 100644
index 0000000000000000000000000000000000000000..2fc2465d9c5034b0796c79edfe2702fc80eb354f
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+	"math"
+	"sort"
+)
+
+// Stat holds values for a single cardinality statistic.
+type Stat struct {
+	Name  string
+	Count uint64
+}
+
+type maxHeap struct {
+	maxLength int
+	minValue  uint64
+	minIndex  int
+	Items     []Stat
+}
+
+func (m *maxHeap) init(len int) {
+	m.maxLength = len
+	m.minValue = math.MaxUint64
+	m.Items = make([]Stat, 0, len)
+}
+
+func (m *maxHeap) push(item Stat) {
+	if len(m.Items) < m.maxLength {
+		if item.Count < m.minValue {
+			m.minValue = item.Count
+			m.minIndex = len(m.Items)
+		}
+		m.Items = append(m.Items, item)
+		return
+	}
+	if item.Count < m.minValue {
+		return
+	}
+
+	m.Items[m.minIndex] = item
+	m.minValue = item.Count
+
+	for i, stat := range m.Items {
+		if stat.Count < m.minValue {
+			m.minValue = stat.Count
+			m.minIndex = i
+		}
+	}
+
+}
+
+func (m *maxHeap) get() []Stat {
+	sort.Slice(m.Items, func(i, j int) bool {
+		return m.Items[i].Count > m.Items[j].Count
+	})
+	return m.Items
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go
new file mode 100644
index 0000000000000000000000000000000000000000..12a9bfe92c52199e3168607a036e313786bf590a
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go
@@ -0,0 +1,200 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"sync"
+)
+
+// isolationState holds the isolation information.
+type isolationState struct {
+	// We will ignore all appends above the max, or that are incomplete.
+	maxAppendID       uint64
+	incompleteAppends map[uint64]struct{}
+	lowWatermark      uint64 // Lowest of incompleteAppends/maxAppendID.
+	isolation         *isolation
+
+	// Doubly linked list of active reads.
+	next *isolationState
+	prev *isolationState
+}
+
+// Close closes the state.
+func (i *isolationState) Close() {
+	i.isolation.readMtx.Lock()
+	defer i.isolation.readMtx.Unlock()
+	i.next.prev = i.prev
+	i.prev.next = i.next
+}
+
+// isolation is the global isolation state.
+type isolation struct {
+	// Mutex for accessing lastAppendID and appendsOpen.
+	appendMtx sync.Mutex
+	// Each append is given an internal id.
+	lastAppendID uint64
+	// Which appends are currently in progress.
+	appendsOpen map[uint64]struct{}
+	// Mutex for accessing readsOpen.
+	// If taking both appendMtx and readMtx, take appendMtx first.
+	readMtx sync.Mutex
+	// All current in use isolationStates. This is a doubly-linked list.
+	readsOpen *isolationState
+}
+
+func newIsolation() *isolation {
+	isoState := &isolationState{}
+	isoState.next = isoState
+	isoState.prev = isoState
+
+	return &isolation{
+		appendsOpen: map[uint64]struct{}{},
+		readsOpen:   isoState,
+	}
+}
+
+// lowWatermark returns the appendID below which we no longer need to track
+// which appends were from which appendID.
+func (i *isolation) lowWatermark() uint64 {
+	i.appendMtx.Lock() // Take appendMtx first.
+	defer i.appendMtx.Unlock()
+	i.readMtx.Lock()
+	defer i.readMtx.Unlock()
+	if i.readsOpen.prev != i.readsOpen {
+		return i.readsOpen.prev.lowWatermark
+	}
+	lw := i.lastAppendID
+	for k := range i.appendsOpen {
+		if k < lw {
+			lw = k
+		}
+	}
+	return lw
+}
+
+// State returns an object used to control isolation
+// between a query and appends. Must be closed when complete.
+func (i *isolation) State() *isolationState {
+	i.appendMtx.Lock() // Take append mutex before read mutex.
+	defer i.appendMtx.Unlock()
+	isoState := &isolationState{
+		maxAppendID:       i.lastAppendID,
+		lowWatermark:      i.lastAppendID,
+		incompleteAppends: make(map[uint64]struct{}, len(i.appendsOpen)),
+		isolation:         i,
+	}
+	for k := range i.appendsOpen {
+		isoState.incompleteAppends[k] = struct{}{}
+		if k < isoState.lowWatermark {
+			isoState.lowWatermark = k
+		}
+	}
+
+	i.readMtx.Lock()
+	defer i.readMtx.Unlock()
+	isoState.prev = i.readsOpen
+	isoState.next = i.readsOpen.next
+	i.readsOpen.next.prev = isoState
+	i.readsOpen.next = isoState
+	return isoState
+}
+
+// newAppendID increments the transaction counter and returns a new transaction
+// ID. The first ID returned is 1.
+func (i *isolation) newAppendID() uint64 {
+	i.appendMtx.Lock()
+	defer i.appendMtx.Unlock()
+	i.lastAppendID++
+	i.appendsOpen[i.lastAppendID] = struct{}{}
+	return i.lastAppendID
+}
+
+func (i *isolation) closeAppend(appendID uint64) {
+	i.appendMtx.Lock()
+	defer i.appendMtx.Unlock()
+	delete(i.appendsOpen, appendID)
+}
+
+// The transactionID ring buffer.
+type txRing struct {
+	txIDs     []uint64
+	txIDFirst int // Position of the first id in the ring.
+	txIDCount int // How many ids in the ring.
+}
+
+func newTxRing(cap int) *txRing {
+	return &txRing{
+		txIDs: make([]uint64, cap),
+	}
+}
+
+func (txr *txRing) add(appendID uint64) {
+	if txr.txIDCount == len(txr.txIDs) {
+		// Ring buffer is full, expand by doubling.
+		newRing := make([]uint64, txr.txIDCount*2)
+		idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:])
+		copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
+		txr.txIDs = newRing
+		txr.txIDFirst = 0
+	}
+
+	txr.txIDs[(txr.txIDFirst+txr.txIDCount)%len(txr.txIDs)] = appendID
+	txr.txIDCount++
+}
+
+func (txr *txRing) cleanupAppendIDsBelow(bound uint64) {
+	pos := txr.txIDFirst
+
+	for txr.txIDCount > 0 {
+		if txr.txIDs[pos] < bound {
+			txr.txIDFirst++
+			txr.txIDCount--
+		} else {
+			break
+		}
+
+		pos++
+		if pos == len(txr.txIDs) {
+			pos = 0
+		}
+	}
+
+	txr.txIDFirst %= len(txr.txIDs)
+}
+
+func (txr *txRing) iterator() *txRingIterator {
+	return &txRingIterator{
+		pos: txr.txIDFirst,
+		ids: txr.txIDs,
+	}
+}
+
+// txRingIterator lets you iterate over the ring. It doesn't terminate,
+// it DOESN'T terminate.
+type txRingIterator struct {
+	ids []uint64
+
+	pos int
+}
+
+func (it *txRingIterator) At() uint64 {
+	return it.ids[it.pos]
+}
+
+func (it *txRingIterator) Next() {
+	it.pos++
+	if it.pos == len(it.ids) {
+		it.pos = 0
+	}
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9d6c62ee4c8ac137516d07bb6d305e08b3ef6ac
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go
@@ -0,0 +1,1207 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"sort"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/chunks"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/index"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+)
+
+// querier aggregates querying results from time blocks within
+// a single partition.
+type querier struct {
+	blocks []storage.Querier
+}
+
+func (q *querier) LabelValues(n string) ([]string, storage.Warnings, error) {
+	return q.lvals(q.blocks, n)
+}
+
+// LabelNames returns all the unique label names present querier blocks.
+func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
+	labelNamesMap := make(map[string]struct{})
+	var ws storage.Warnings
+	for _, b := range q.blocks {
+		names, w, err := b.LabelNames()
+		ws = append(ws, w...)
+		if err != nil {
+			return nil, ws, errors.Wrap(err, "LabelNames() from Querier")
+		}
+		for _, name := range names {
+			labelNamesMap[name] = struct{}{}
+		}
+	}
+
+	labelNames := make([]string, 0, len(labelNamesMap))
+	for name := range labelNamesMap {
+		labelNames = append(labelNames, name)
+	}
+	sort.Strings(labelNames)
+
+	return labelNames, ws, nil
+}
+
+func (q *querier) lvals(qs []storage.Querier, n string) ([]string, storage.Warnings, error) {
+	if len(qs) == 0 {
+		return nil, nil, nil
+	}
+	if len(qs) == 1 {
+		return qs[0].LabelValues(n)
+	}
+	l := len(qs) / 2
+
+	var ws storage.Warnings
+	s1, w, err := q.lvals(qs[:l], n)
+	ws = append(ws, w...)
+	if err != nil {
+		return nil, ws, err
+	}
+	s2, ws, err := q.lvals(qs[l:], n)
+	ws = append(ws, w...)
+	if err != nil {
+		return nil, ws, err
+	}
+	return mergeStrings(s1, s2), ws, nil
+}
+
+func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
+	if len(q.blocks) == 0 {
+		return storage.EmptySeriesSet(), nil, nil
+	}
+	if len(q.blocks) == 1 {
+		// Sorting Head series is slow, and unneeded when only the
+		// Head is being queried.
+		return q.blocks[0].Select(sortSeries, hints, ms...)
+	}
+
+	ss := make([]storage.SeriesSet, len(q.blocks))
+	var ws storage.Warnings
+	for i, b := range q.blocks {
+		// We have to sort if blocks > 1 as MergedSeriesSet requires it.
+		s, w, err := b.Select(true, hints, ms...)
+		ws = append(ws, w...)
+		if err != nil {
+			return nil, ws, err
+		}
+		ss[i] = s
+	}
+
+	return NewMergedSeriesSet(ss), ws, nil
+}
+
+func (q *querier) Close() error {
+	var merr tsdb_errors.MultiError
+
+	for _, bq := range q.blocks {
+		merr.Add(bq.Close())
+	}
+	return merr.Err()
+}
+
+// verticalQuerier aggregates querying results from time blocks within
+// a single partition. The block time ranges can be overlapping.
+type verticalQuerier struct {
+	querier
+}
+
+func (q *verticalQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
+	return q.sel(sortSeries, hints, q.blocks, ms)
+}
+
+func (q *verticalQuerier) sel(sortSeries bool, hints *storage.SelectHints, qs []storage.Querier, ms []*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
+	if len(qs) == 0 {
+		return storage.EmptySeriesSet(), nil, nil
+	}
+	if len(qs) == 1 {
+		return qs[0].Select(sortSeries, hints, ms...)
+	}
+	l := len(qs) / 2
+
+	var ws storage.Warnings
+	a, w, err := q.sel(sortSeries, hints, qs[:l], ms)
+	ws = append(ws, w...)
+	if err != nil {
+		return nil, ws, err
+	}
+	b, w, err := q.sel(sortSeries, hints, qs[l:], ms)
+	ws = append(ws, w...)
+	if err != nil {
+		return nil, ws, err
+	}
+	return newMergedVerticalSeriesSet(a, b), ws, nil
+}
+
+// NewBlockQuerier returns a querier against the reader.
+func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
+	indexr, err := b.Index()
+	if err != nil {
+		return nil, errors.Wrapf(err, "open index reader")
+	}
+	chunkr, err := b.Chunks()
+	if err != nil {
+		indexr.Close()
+		return nil, errors.Wrapf(err, "open chunk reader")
+	}
+	tombsr, err := b.Tombstones()
+	if err != nil {
+		indexr.Close()
+		chunkr.Close()
+		return nil, errors.Wrapf(err, "open tombstone reader")
+	}
+	return &blockQuerier{
+		mint:       mint,
+		maxt:       maxt,
+		index:      indexr,
+		chunks:     chunkr,
+		tombstones: tombsr,
+	}, nil
+}
+
+// blockQuerier provides querying access to a single block database.
+type blockQuerier struct {
+	index      IndexReader
+	chunks     ChunkReader
+	tombstones tombstones.Reader
+
+	closed bool
+
+	mint, maxt int64
+}
+
+func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
+	var base storage.DeprecatedChunkSeriesSet
+	var err error
+
+	if sortSeries {
+		base, err = LookupChunkSeriesSorted(q.index, q.tombstones, ms...)
+	} else {
+		base, err = LookupChunkSeries(q.index, q.tombstones, ms...)
+	}
+	if err != nil {
+		return nil, nil, err
+	}
+
+	mint := q.mint
+	maxt := q.maxt
+	if hints != nil {
+		mint = hints.Start
+		maxt = hints.End
+	}
+	return &blockSeriesSet{
+		set: &populatedChunkSeries{
+			set:    base,
+			chunks: q.chunks,
+			mint:   mint,
+			maxt:   maxt,
+		},
+
+		mint: mint,
+		maxt: maxt,
+	}, nil, nil
+}
+
+func (q *blockQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+	res, err := q.index.LabelValues(name)
+	return res, nil, err
+}
+
+func (q *blockQuerier) LabelNames() ([]string, storage.Warnings, error) {
+	res, err := q.index.LabelNames()
+	return res, nil, err
+}
+
+func (q *blockQuerier) Close() error {
+	if q.closed {
+		return errors.New("block querier already closed")
+	}
+
+	var merr tsdb_errors.MultiError
+	merr.Add(q.index.Close())
+	merr.Add(q.chunks.Close())
+	merr.Add(q.tombstones.Close())
+	q.closed = true
+	return merr.Err()
+}
+
+// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
+var regexMetaCharacterBytes [16]byte
+
+// isRegexMetaCharacter reports whether byte b needs to be escaped.
+func isRegexMetaCharacter(b byte) bool {
+	return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
+}
+
+func init() {
+	for _, b := range []byte(`.+*?()|[]{}^$`) {
+		regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
+	}
+}
+
+func findSetMatches(pattern string) []string {
+	// Return empty matches if the wrapper from Prometheus is missing.
+	if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
+		return nil
+	}
+	escaped := false
+	sets := []*strings.Builder{{}}
+	for i := 4; i < len(pattern)-2; i++ {
+		if escaped {
+			switch {
+			case isRegexMetaCharacter(pattern[i]):
+				sets[len(sets)-1].WriteByte(pattern[i])
+			case pattern[i] == '\\':
+				sets[len(sets)-1].WriteByte('\\')
+			default:
+				return nil
+			}
+			escaped = false
+		} else {
+			switch {
+			case isRegexMetaCharacter(pattern[i]):
+				if pattern[i] == '|' {
+					sets = append(sets, &strings.Builder{})
+				} else {
+					return nil
+				}
+			case pattern[i] == '\\':
+				escaped = true
+			default:
+				sets[len(sets)-1].WriteByte(pattern[i])
+			}
+		}
+	}
+	matches := make([]string, 0, len(sets))
+	for _, s := range sets {
+		if s.Len() > 0 {
+			matches = append(matches, s.String())
+		}
+	}
+	return matches
+}
+
+// PostingsForMatchers assembles a single postings iterator against the index reader
+// based on the given matchers. The resulting postings are not ordered by series.
+func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
+	var its, notIts []index.Postings
+	// See which label must be non-empty.
+	// Optimization for case like {l=~".", l!="1"}.
+	labelMustBeSet := make(map[string]bool, len(ms))
+	for _, m := range ms {
+		if !m.Matches("") {
+			labelMustBeSet[m.Name] = true
+		}
+	}
+
+	for _, m := range ms {
+		if labelMustBeSet[m.Name] {
+			// If this matcher must be non-empty, we can be smarter.
+			matchesEmpty := m.Matches("")
+			isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
+			if isNot && matchesEmpty { // l!="foo"
+				// If the label can't be empty and is a Not and the inner matcher
+				// doesn't match empty, then subtract it out at the end.
+				inverse, err := m.Inverse()
+				if err != nil {
+					return nil, err
+				}
+
+				it, err := postingsForMatcher(ix, inverse)
+				if err != nil {
+					return nil, err
+				}
+				notIts = append(notIts, it)
+			} else if isNot && !matchesEmpty { // l!=""
+				// If the label can't be empty and is a Not, but the inner matcher can
+				// be empty we need to use inversePostingsForMatcher.
+				inverse, err := m.Inverse()
+				if err != nil {
+					return nil, err
+				}
+
+				it, err := inversePostingsForMatcher(ix, inverse)
+				if err != nil {
+					return nil, err
+				}
+				its = append(its, it)
+			} else { // l="a"
+				// Non-Not matcher, use normal postingsForMatcher.
+				it, err := postingsForMatcher(ix, m)
+				if err != nil {
+					return nil, err
+				}
+				its = append(its, it)
+			}
+		} else { // l=""
+			// If the matchers for a labelname selects an empty value, it selects all
+			// the series which don't have the label name set too. See:
+			// https://github.com/prometheus/prometheus/issues/3575 and
+			// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
+			it, err := inversePostingsForMatcher(ix, m)
+			if err != nil {
+				return nil, err
+			}
+			notIts = append(notIts, it)
+		}
+	}
+
+	// If there's nothing to subtract from, add in everything and remove the notIts later.
+	if len(its) == 0 && len(notIts) != 0 {
+		k, v := index.AllPostingsKey()
+		allPostings, err := ix.Postings(k, v)
+		if err != nil {
+			return nil, err
+		}
+		its = append(its, allPostings)
+	}
+
+	it := index.Intersect(its...)
+
+	for _, n := range notIts {
+		it = index.Without(it, n)
+	}
+
+	return it, nil
+}
+
+func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, error) {
+	// This method will not return postings for missing labels.
+
+	// Fast-path for equal matching.
+	if m.Type == labels.MatchEqual {
+		return ix.Postings(m.Name, m.Value)
+	}
+
+	// Fast-path for set matching.
+	if m.Type == labels.MatchRegexp {
+		setMatches := findSetMatches(m.GetRegexString())
+		if len(setMatches) > 0 {
+			sort.Strings(setMatches)
+			return ix.Postings(m.Name, setMatches...)
+		}
+	}
+
+	vals, err := ix.LabelValues(m.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	var res []string
+	for _, val := range vals {
+		if m.Matches(val) {
+			res = append(res, val)
+		}
+	}
+
+	if len(res) == 0 {
+		return index.EmptyPostings(), nil
+	}
+
+	return ix.Postings(m.Name, res...)
+}
+
+// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
+func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, error) {
+	vals, err := ix.LabelValues(m.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	var res []string
+	for _, val := range vals {
+		if !m.Matches(val) {
+			res = append(res, val)
+		}
+	}
+
+	return ix.Postings(m.Name, res...)
+}
+
+func mergeStrings(a, b []string) []string {
+	maxl := len(a)
+	if len(b) > len(a) {
+		maxl = len(b)
+	}
+	res := make([]string, 0, maxl*10/9)
+
+	for len(a) > 0 && len(b) > 0 {
+		d := strings.Compare(a[0], b[0])
+
+		if d == 0 {
+			res = append(res, a[0])
+			a, b = a[1:], b[1:]
+		} else if d < 0 {
+			res = append(res, a[0])
+			a = a[1:]
+		} else if d > 0 {
+			res = append(res, b[0])
+			b = b[1:]
+		}
+	}
+
+	// Append all remaining elements.
+	res = append(res, a...)
+	res = append(res, b...)
+	return res
+}
+
+// mergedSeriesSet returns a series sets slice as a single series set. The input series sets
+// must be sorted and sequential in time.
+// TODO(bwplotka): Merge this with merge SeriesSet available in storage package.
+type mergedSeriesSet struct {
+	all  []storage.SeriesSet
+	buf  []storage.SeriesSet // A buffer for keeping the order of SeriesSet slice during forwarding the SeriesSet.
+	ids  []int               // The indices of chosen SeriesSet for the current run.
+	done bool
+	err  error
+	cur  storage.Series
+}
+
+// TODO(bwplotka): Merge this with merge SeriesSet available in storage package.
+func NewMergedSeriesSet(all []storage.SeriesSet) storage.SeriesSet {
+	if len(all) == 1 {
+		return all[0]
+	}
+	s := &mergedSeriesSet{all: all}
+	// Initialize first elements of all sets as Next() needs
+	// one element look-ahead.
+	s.nextAll()
+	if len(s.all) == 0 {
+		s.done = true
+	}
+
+	return s
+}
+
+func (s *mergedSeriesSet) At() storage.Series {
+	return s.cur
+}
+
+func (s *mergedSeriesSet) Err() error {
+	return s.err
+}
+
+// nextAll is to call Next() for all SeriesSet.
+// Because the order of the SeriesSet slice will affect the results,
+// we need to use an buffer slice to hold the order.
+func (s *mergedSeriesSet) nextAll() {
+	s.buf = s.buf[:0]
+	for _, ss := range s.all {
+		if ss.Next() {
+			s.buf = append(s.buf, ss)
+		} else if ss.Err() != nil {
+			s.done = true
+			s.err = ss.Err()
+			break
+		}
+	}
+	s.all, s.buf = s.buf, s.all
+}
+
+// nextWithID is to call Next() for the SeriesSet with the indices of s.ids.
+// Because the order of the SeriesSet slice will affect the results,
+// we need to use an buffer slice to hold the order.
+func (s *mergedSeriesSet) nextWithID() {
+	if len(s.ids) == 0 {
+		return
+	}
+
+	s.buf = s.buf[:0]
+	i1 := 0
+	i2 := 0
+	for i1 < len(s.all) {
+		if i2 < len(s.ids) && i1 == s.ids[i2] {
+			if !s.all[s.ids[i2]].Next() {
+				if s.all[s.ids[i2]].Err() != nil {
+					s.done = true
+					s.err = s.all[s.ids[i2]].Err()
+					break
+				}
+				i2++
+				i1++
+				continue
+			}
+			i2++
+		}
+		s.buf = append(s.buf, s.all[i1])
+		i1++
+	}
+	s.all, s.buf = s.buf, s.all
+}
+
+func (s *mergedSeriesSet) Next() bool {
+	if s.done {
+		return false
+	}
+
+	s.nextWithID()
+	if s.done {
+		return false
+	}
+	s.ids = s.ids[:0]
+	if len(s.all) == 0 {
+		s.done = true
+		return false
+	}
+
+	// Here we are looking for a set of series sets with the lowest labels,
+	// and we will cache their indexes in s.ids.
+	s.ids = append(s.ids, 0)
+	for i := 1; i < len(s.all); i++ {
+		cmp := labels.Compare(s.all[s.ids[0]].At().Labels(), s.all[i].At().Labels())
+		if cmp > 0 {
+			s.ids = s.ids[:1]
+			s.ids[0] = i
+		} else if cmp == 0 {
+			s.ids = append(s.ids, i)
+		}
+	}
+
+	if len(s.ids) > 1 {
+		series := make([]storage.Series, len(s.ids))
+		for i, idx := range s.ids {
+			series[i] = s.all[idx].At()
+		}
+		s.cur = &chainedSeries{series: series}
+	} else {
+		s.cur = s.all[s.ids[0]].At()
+	}
+	return true
+}
+
+type mergedVerticalSeriesSet struct {
+	a, b         storage.SeriesSet
+	cur          storage.Series
+	adone, bdone bool
+}
+
+// NewMergedVerticalSeriesSet takes two series sets as a single series set.
+// The input series sets must be sorted and
+// the time ranges of the series can be overlapping.
+func NewMergedVerticalSeriesSet(a, b storage.SeriesSet) storage.SeriesSet {
+	return newMergedVerticalSeriesSet(a, b)
+}
+
+func newMergedVerticalSeriesSet(a, b storage.SeriesSet) *mergedVerticalSeriesSet {
+	s := &mergedVerticalSeriesSet{a: a, b: b}
+	// Initialize first elements of both sets as Next() needs
+	// one element look-ahead.
+	s.adone = !s.a.Next()
+	s.bdone = !s.b.Next()
+
+	return s
+}
+
+func (s *mergedVerticalSeriesSet) At() storage.Series {
+	return s.cur
+}
+
+func (s *mergedVerticalSeriesSet) Err() error {
+	if s.a.Err() != nil {
+		return s.a.Err()
+	}
+	return s.b.Err()
+}
+
+func (s *mergedVerticalSeriesSet) compare() int {
+	if s.adone {
+		return 1
+	}
+	if s.bdone {
+		return -1
+	}
+	return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
+}
+
+func (s *mergedVerticalSeriesSet) Next() bool {
+	if s.adone && s.bdone || s.Err() != nil {
+		return false
+	}
+
+	d := s.compare()
+
+	// Both sets contain the current series. Chain them into a single one.
+	if d > 0 {
+		s.cur = s.b.At()
+		s.bdone = !s.b.Next()
+	} else if d < 0 {
+		s.cur = s.a.At()
+		s.adone = !s.a.Next()
+	} else {
+		s.cur = &verticalChainedSeries{series: []storage.Series{s.a.At(), s.b.At()}}
+		s.adone = !s.a.Next()
+		s.bdone = !s.b.Next()
+	}
+	return true
+}
+
+// baseChunkSeries loads the label set and chunk references for a postings
+// list from an index. It filters out series that have labels set that should be unset.
+type baseChunkSeries struct {
+	p          index.Postings
+	index      IndexReader
+	tombstones tombstones.Reader
+
+	lset      labels.Labels
+	chks      []chunks.Meta
+	intervals tombstones.Intervals
+	err       error
+}
+
+// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
+// over them. It drops chunks based on tombstones in the given reader.
+func LookupChunkSeries(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.DeprecatedChunkSeriesSet, error) {
+	return lookupChunkSeries(false, ir, tr, ms...)
+}
+
+// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
+// over them. It drops chunks based on tombstones in the given reader. Series will be in order.
+func LookupChunkSeriesSorted(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.DeprecatedChunkSeriesSet, error) {
+	return lookupChunkSeries(true, ir, tr, ms...)
+}
+
+func lookupChunkSeries(sorted bool, ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.DeprecatedChunkSeriesSet, error) {
+	if tr == nil {
+		tr = tombstones.NewMemTombstones()
+	}
+	p, err := PostingsForMatchers(ir, ms...)
+	if err != nil {
+		return nil, err
+	}
+	if sorted {
+		p = ir.SortedPostings(p)
+	}
+	return &baseChunkSeries{
+		p:          p,
+		index:      ir,
+		tombstones: tr,
+	}, nil
+}
+
+func (s *baseChunkSeries) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
+	return s.lset, s.chks, s.intervals
+}
+
+func (s *baseChunkSeries) Err() error { return s.err }
+
+func (s *baseChunkSeries) Next() bool {
+	var (
+		lset     = make(labels.Labels, len(s.lset))
+		chkMetas = make([]chunks.Meta, len(s.chks))
+		err      error
+	)
+
+	for s.p.Next() {
+		ref := s.p.At()
+		if err := s.index.Series(ref, &lset, &chkMetas); err != nil {
+			// Postings may be stale. Skip if no underlying series exists.
+			if errors.Cause(err) == storage.ErrNotFound {
+				continue
+			}
+			s.err = err
+			return false
+		}
+
+		s.lset = lset
+		s.chks = chkMetas
+		s.intervals, err = s.tombstones.Get(s.p.At())
+		if err != nil {
+			s.err = errors.Wrap(err, "get tombstones")
+			return false
+		}
+
+		if len(s.intervals) > 0 {
+			// Only those chunks that are not entirely deleted.
+			chks := make([]chunks.Meta, 0, len(s.chks))
+			for _, chk := range s.chks {
+				if !(tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(s.intervals)) {
+					chks = append(chks, chk)
+				}
+			}
+
+			s.chks = chks
+		}
+
+		return true
+	}
+	if err := s.p.Err(); err != nil {
+		s.err = err
+	}
+	return false
+}
+
+// populatedChunkSeries loads chunk data from a store for a set of series
+// with known chunk references. It filters out chunks that do not fit the
+// given time range.
+type populatedChunkSeries struct {
+	set        storage.DeprecatedChunkSeriesSet
+	chunks     ChunkReader
+	mint, maxt int64
+
+	err       error
+	chks      []chunks.Meta
+	lset      labels.Labels
+	intervals tombstones.Intervals
+}
+
+func (s *populatedChunkSeries) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
+	return s.lset, s.chks, s.intervals
+}
+
+func (s *populatedChunkSeries) Err() error { return s.err }
+
+func (s *populatedChunkSeries) Next() bool {
+	for s.set.Next() {
+		lset, chks, dranges := s.set.At()
+
+		for len(chks) > 0 {
+			if chks[0].MaxTime >= s.mint {
+				break
+			}
+			chks = chks[1:]
+		}
+
+		// This is to delete in place while iterating.
+		for i, rlen := 0, len(chks); i < rlen; i++ {
+			j := i - (rlen - len(chks))
+			c := &chks[j]
+
+			// Break out at the first chunk that has no overlap with mint, maxt.
+			if c.MinTime > s.maxt {
+				chks = chks[:j]
+				break
+			}
+
+			c.Chunk, s.err = s.chunks.Chunk(c.Ref)
+			if s.err != nil {
+				// This means that the chunk has be garbage collected. Remove it from the list.
+				if s.err == storage.ErrNotFound {
+					s.err = nil
+					// Delete in-place.
+					s.chks = append(chks[:j], chks[j+1:]...)
+				}
+				return false
+			}
+		}
+
+		if len(chks) == 0 {
+			continue
+		}
+
+		s.lset = lset
+		s.chks = chks
+		s.intervals = dranges
+
+		return true
+	}
+	if err := s.set.Err(); err != nil {
+		s.err = err
+	}
+	return false
+}
+
+// blockSeriesSet is a set of series from an inverted index query.
+type blockSeriesSet struct {
+	set storage.DeprecatedChunkSeriesSet
+	err error
+	cur storage.Series
+
+	mint, maxt int64
+}
+
+func (s *blockSeriesSet) Next() bool {
+	for s.set.Next() {
+		lset, chunks, dranges := s.set.At()
+		s.cur = &chunkSeries{
+			labels: lset,
+			chunks: chunks,
+			mint:   s.mint,
+			maxt:   s.maxt,
+
+			intervals: dranges,
+		}
+		return true
+	}
+	if s.set.Err() != nil {
+		s.err = s.set.Err()
+	}
+	return false
+}
+
+func (s *blockSeriesSet) At() storage.Series { return s.cur }
+func (s *blockSeriesSet) Err() error         { return s.err }
+
+// chunkSeries is a series that is backed by a sequence of chunks holding
+// time series data.
+type chunkSeries struct {
+	labels labels.Labels
+	chunks []chunks.Meta // in-order chunk refs
+
+	mint, maxt int64
+
+	intervals tombstones.Intervals
+}
+
+func (s *chunkSeries) Labels() labels.Labels {
+	return s.labels
+}
+
+func (s *chunkSeries) Iterator() chunkenc.Iterator {
+	return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
+}
+
+// chainedSeries implements a series for a list of time-sorted series.
+// They all must have the same labels.
+type chainedSeries struct {
+	series []storage.Series
+}
+
+func (s *chainedSeries) Labels() labels.Labels {
+	return s.series[0].Labels()
+}
+
+func (s *chainedSeries) Iterator() chunkenc.Iterator {
+	return newChainedSeriesIterator(s.series...)
+}
+
+// chainedSeriesIterator implements a series iterator over a list
+// of time-sorted, non-overlapping iterators.
+type chainedSeriesIterator struct {
+	series []storage.Series // series in time order
+
+	i   int
+	cur chunkenc.Iterator
+}
+
+func newChainedSeriesIterator(s ...storage.Series) *chainedSeriesIterator {
+	return &chainedSeriesIterator{
+		series: s,
+		i:      0,
+		cur:    s[0].Iterator(),
+	}
+}
+
+func (it *chainedSeriesIterator) Seek(t int64) bool {
+	// We just scan the chained series sequentially as they are already
+	// pre-selected by relevant time and should be accessed sequentially anyway.
+	for i, s := range it.series[it.i:] {
+		cur := s.Iterator()
+		if !cur.Seek(t) {
+			continue
+		}
+		it.cur = cur
+		it.i += i
+		return true
+	}
+	return false
+}
+
+func (it *chainedSeriesIterator) Next() bool {
+	if it.cur.Next() {
+		return true
+	}
+	if err := it.cur.Err(); err != nil {
+		return false
+	}
+	if it.i == len(it.series)-1 {
+		return false
+	}
+
+	it.i++
+	it.cur = it.series[it.i].Iterator()
+
+	return it.Next()
+}
+
+func (it *chainedSeriesIterator) At() (t int64, v float64) {
+	return it.cur.At()
+}
+
+func (it *chainedSeriesIterator) Err() error {
+	return it.cur.Err()
+}
+
+// verticalChainedSeries implements a series for a list of time-sorted, time-overlapping series.
+// They all must have the same labels.
+type verticalChainedSeries struct {
+	series []storage.Series
+}
+
+func (s *verticalChainedSeries) Labels() labels.Labels {
+	return s.series[0].Labels()
+}
+
+func (s *verticalChainedSeries) Iterator() chunkenc.Iterator {
+	return newVerticalMergeSeriesIterator(s.series...)
+}
+
+// verticalMergeSeriesIterator implements a series iterator over a list
+// of time-sorted, time-overlapping iterators.
+type verticalMergeSeriesIterator struct {
+	a, b                  chunkenc.Iterator
+	aok, bok, initialized bool
+
+	curT int64
+	curV float64
+}
+
+func newVerticalMergeSeriesIterator(s ...storage.Series) chunkenc.Iterator {
+	if len(s) == 1 {
+		return s[0].Iterator()
+	} else if len(s) == 2 {
+		return &verticalMergeSeriesIterator{
+			a: s[0].Iterator(),
+			b: s[1].Iterator(),
+		}
+	}
+	return &verticalMergeSeriesIterator{
+		a: s[0].Iterator(),
+		b: newVerticalMergeSeriesIterator(s[1:]...),
+	}
+}
+
+func (it *verticalMergeSeriesIterator) Seek(t int64) bool {
+	it.aok, it.bok = it.a.Seek(t), it.b.Seek(t)
+	it.initialized = true
+	return it.Next()
+}
+
+func (it *verticalMergeSeriesIterator) Next() bool {
+	if !it.initialized {
+		it.aok = it.a.Next()
+		it.bok = it.b.Next()
+		it.initialized = true
+	}
+
+	if !it.aok && !it.bok {
+		return false
+	}
+
+	if !it.aok {
+		it.curT, it.curV = it.b.At()
+		it.bok = it.b.Next()
+		return true
+	}
+	if !it.bok {
+		it.curT, it.curV = it.a.At()
+		it.aok = it.a.Next()
+		return true
+	}
+
+	acurT, acurV := it.a.At()
+	bcurT, bcurV := it.b.At()
+	if acurT < bcurT {
+		it.curT, it.curV = acurT, acurV
+		it.aok = it.a.Next()
+	} else if acurT > bcurT {
+		it.curT, it.curV = bcurT, bcurV
+		it.bok = it.b.Next()
+	} else {
+		it.curT, it.curV = bcurT, bcurV
+		it.aok = it.a.Next()
+		it.bok = it.b.Next()
+	}
+	return true
+}
+
+func (it *verticalMergeSeriesIterator) At() (t int64, v float64) {
+	return it.curT, it.curV
+}
+
+func (it *verticalMergeSeriesIterator) Err() error {
+	if it.a.Err() != nil {
+		return it.a.Err()
+	}
+	return it.b.Err()
+}
+
+// chunkSeriesIterator implements a series iterator on top
+// of a list of time-sorted, non-overlapping chunks.
+type chunkSeriesIterator struct {
+	chunks []chunks.Meta
+
+	i          int
+	cur        chunkenc.Iterator
+	bufDelIter *deletedIterator
+
+	maxt, mint int64
+
+	intervals tombstones.Intervals
+}
+
+func newChunkSeriesIterator(cs []chunks.Meta, dranges tombstones.Intervals, mint, maxt int64) *chunkSeriesIterator {
+	csi := &chunkSeriesIterator{
+		chunks: cs,
+		i:      0,
+
+		mint: mint,
+		maxt: maxt,
+
+		intervals: dranges,
+	}
+	csi.resetCurIterator()
+
+	return csi
+}
+
+func (it *chunkSeriesIterator) resetCurIterator() {
+	if len(it.intervals) == 0 {
+		it.cur = it.chunks[it.i].Chunk.Iterator(it.cur)
+		return
+	}
+	if it.bufDelIter == nil {
+		it.bufDelIter = &deletedIterator{
+			intervals: it.intervals,
+		}
+	}
+	it.bufDelIter.it = it.chunks[it.i].Chunk.Iterator(it.bufDelIter.it)
+	it.cur = it.bufDelIter
+}
+
+func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
+	if t > it.maxt {
+		return false
+	}
+
+	// Seek to the first valid value after t.
+	if t < it.mint {
+		t = it.mint
+	}
+
+	for ; it.chunks[it.i].MaxTime < t; it.i++ {
+		if it.i == len(it.chunks)-1 {
+			return false
+		}
+	}
+
+	it.resetCurIterator()
+
+	for it.cur.Next() {
+		t0, _ := it.cur.At()
+		if t0 >= t {
+			return true
+		}
+	}
+	return false
+}
+
+func (it *chunkSeriesIterator) At() (t int64, v float64) {
+	return it.cur.At()
+}
+
+func (it *chunkSeriesIterator) Next() bool {
+	if it.cur.Next() {
+		t, _ := it.cur.At()
+
+		if t < it.mint {
+			if !it.Seek(it.mint) {
+				return false
+			}
+			t, _ = it.At()
+
+			return t <= it.maxt
+		}
+		if t > it.maxt {
+			return false
+		}
+		return true
+	}
+	if err := it.cur.Err(); err != nil {
+		return false
+	}
+	if it.i == len(it.chunks)-1 {
+		return false
+	}
+
+	it.i++
+	it.resetCurIterator()
+
+	return it.Next()
+}
+
+func (it *chunkSeriesIterator) Err() error {
+	return it.cur.Err()
+}
+
+// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
+// returned.
+type deletedIterator struct {
+	it chunkenc.Iterator
+
+	intervals tombstones.Intervals
+}
+
+func (it *deletedIterator) At() (int64, float64) {
+	return it.it.At()
+}
+
+func (it *deletedIterator) Seek(t int64) bool {
+	if it.it.Err() != nil {
+		return false
+	}
+	if ok := it.it.Seek(t); !ok {
+		return false
+	}
+
+	// Now double check if the entry falls into a deleted interval.
+	ts, _ := it.At()
+	for _, itv := range it.intervals {
+		if ts < itv.Mint {
+			return true
+		}
+
+		if ts > itv.Maxt {
+			it.intervals = it.intervals[1:]
+			continue
+		}
+
+		// We're in the middle of an interval, we can now call Next().
+		return it.Next()
+	}
+
+	// The timestamp is greater than all the deleted intervals.
+	return true
+}
+
+func (it *deletedIterator) Next() bool {
+Outer:
+	for it.it.Next() {
+		ts, _ := it.it.At()
+
+		for _, tr := range it.intervals {
+			if tr.InBounds(ts) {
+				continue Outer
+			}
+
+			if ts <= tr.Maxt {
+				return true
+
+			}
+			it.intervals = it.intervals[1:]
+		}
+		return true
+	}
+	return false
+}
+
+func (it *deletedIterator) Err() error { return it.it.Err() }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go
new file mode 100644
index 0000000000000000000000000000000000000000..d63198f977a9a6d827b3502517c94ddf13eba3e2
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go
@@ -0,0 +1,227 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package record
+
+import (
+	"math"
+	"sort"
+
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/tsdb/encoding"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+)
+
+// Type represents the data type of a record.
+type Type uint8
+
+const (
+	// Invalid is returned for unrecognised WAL record types.
+	Invalid Type = 255
+	// Series is used to match WAL records of type Series.
+	Series Type = 1
+	// Samples is used to match WAL records of type Samples.
+	Samples Type = 2
+	// Tombstones is used to match WAL records of type Tombstones.
+	Tombstones Type = 3
+)
+
+var (
+	// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go.
+	ErrNotFound = errors.New("not found")
+)
+
+// RefSeries is the series labels with the series ID.
+type RefSeries struct {
+	Ref    uint64
+	Labels labels.Labels
+}
+
+// RefSample is a timestamp/value pair associated with a reference to a series.
+type RefSample struct {
+	Ref uint64
+	T   int64
+	V   float64
+}
+
+// Decoder decodes series, sample, and tombstone records.
+// The zero value is ready to use.
+type Decoder struct {
+}
+
+// Type returns the type of the record.
+// Returns RecordInvalid if no valid record type is found.
+func (d *Decoder) Type(rec []byte) Type {
+	if len(rec) < 1 {
+		return Invalid
+	}
+	switch t := Type(rec[0]); t {
+	case Series, Samples, Tombstones:
+		return t
+	}
+	return Invalid
+}
+
+// Series appends series in rec to the given slice.
+func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
+	dec := encoding.Decbuf{B: rec}
+
+	if Type(dec.Byte()) != Series {
+		return nil, errors.New("invalid record type")
+	}
+	for len(dec.B) > 0 && dec.Err() == nil {
+		ref := dec.Be64()
+
+		lset := make(labels.Labels, dec.Uvarint())
+
+		for i := range lset {
+			lset[i].Name = dec.UvarintStr()
+			lset[i].Value = dec.UvarintStr()
+		}
+		sort.Sort(lset)
+
+		series = append(series, RefSeries{
+			Ref:    ref,
+			Labels: lset,
+		})
+	}
+	if dec.Err() != nil {
+		return nil, dec.Err()
+	}
+	if len(dec.B) > 0 {
+		return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return series, nil
+}
+
+// Samples appends samples in rec to the given slice.
+func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
+	dec := encoding.Decbuf{B: rec}
+
+	if Type(dec.Byte()) != Samples {
+		return nil, errors.New("invalid record type")
+	}
+	if dec.Len() == 0 {
+		return samples, nil
+	}
+	var (
+		baseRef  = dec.Be64()
+		baseTime = dec.Be64int64()
+	)
+	for len(dec.B) > 0 && dec.Err() == nil {
+		dref := dec.Varint64()
+		dtime := dec.Varint64()
+		val := dec.Be64()
+
+		samples = append(samples, RefSample{
+			Ref: uint64(int64(baseRef) + dref),
+			T:   baseTime + dtime,
+			V:   math.Float64frombits(val),
+		})
+	}
+
+	if dec.Err() != nil {
+		return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples))
+	}
+	if len(dec.B) > 0 {
+		return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return samples, nil
+}
+
+// Tombstones appends tombstones in rec to the given slice.
+func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
+	dec := encoding.Decbuf{B: rec}
+
+	if Type(dec.Byte()) != Tombstones {
+		return nil, errors.New("invalid record type")
+	}
+	for dec.Len() > 0 && dec.Err() == nil {
+		tstones = append(tstones, tombstones.Stone{
+			Ref: dec.Be64(),
+			Intervals: tombstones.Intervals{
+				{Mint: dec.Varint64(), Maxt: dec.Varint64()},
+			},
+		})
+	}
+	if dec.Err() != nil {
+		return nil, dec.Err()
+	}
+	if len(dec.B) > 0 {
+		return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return tstones, nil
+}
+
+// Encoder encodes series, sample, and tombstones records.
+// The zero value is ready to use.
+type Encoder struct {
+}
+
+// Series appends the encoded series to b and returns the resulting slice.
+func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
+	buf := encoding.Encbuf{B: b}
+	buf.PutByte(byte(Series))
+
+	for _, s := range series {
+		buf.PutBE64(s.Ref)
+		buf.PutUvarint(len(s.Labels))
+
+		for _, l := range s.Labels {
+			buf.PutUvarintStr(l.Name)
+			buf.PutUvarintStr(l.Value)
+		}
+	}
+	return buf.Get()
+}
+
+// Samples appends the encoded samples to b and returns the resulting slice.
+func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
+	buf := encoding.Encbuf{B: b}
+	buf.PutByte(byte(Samples))
+
+	if len(samples) == 0 {
+		return buf.Get()
+	}
+
+	// Store base timestamp and base reference number of first sample.
+	// All samples encode their timestamp and ref as delta to those.
+	first := samples[0]
+
+	buf.PutBE64(first.Ref)
+	buf.PutBE64int64(first.T)
+
+	for _, s := range samples {
+		buf.PutVarint64(int64(s.Ref) - int64(first.Ref))
+		buf.PutVarint64(s.T - first.T)
+		buf.PutBE64(math.Float64bits(s.V))
+	}
+	return buf.Get()
+}
+
+// Tombstones appends the encoded tombstones to b and returns the resulting slice.
+func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
+	buf := encoding.Encbuf{B: b}
+	buf.PutByte(byte(Tombstones))
+
+	for _, s := range tstones {
+		for _, iv := range s.Intervals {
+			buf.PutBE64(s.Ref)
+			buf.PutVarint64(iv.Mint)
+			buf.PutVarint64(iv.Maxt)
+		}
+	}
+	return buf.Get()
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go
new file mode 100644
index 0000000000000000000000000000000000000000..efc81967e96a52f7df079ac2704dc3714e4814eb
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go
@@ -0,0 +1,133 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+)
+
+// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
+// commit 129773b41a565fde5156301e37f9a87158030443.
+func repairBadIndexVersion(logger log.Logger, dir string) error {
+	// All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected.
+	// We must actually set the index file version to 2 and revert the meta.json version back to 1.
+	dirs, err := blockDirs(dir)
+	if err != nil {
+		return errors.Wrapf(err, "list block dirs in %q", dir)
+	}
+
+	wrapErr := func(err error, d string) error {
+		return errors.Wrapf(err, "block dir: %q", d)
+	}
+
+	tmpFiles := make([]string, 0, len(dirs))
+	defer func() {
+		for _, tmp := range tmpFiles {
+			if err := os.RemoveAll(tmp); err != nil {
+				level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
+			}
+		}
+	}()
+
+	for _, d := range dirs {
+		meta, err := readBogusMetaFile(d)
+		if err != nil {
+			return wrapErr(err, d)
+		}
+		if meta.Version == metaVersion1 {
+			level.Info(logger).Log(
+				"msg", "Found healthy block",
+				"mint", meta.MinTime,
+				"maxt", meta.MaxTime,
+				"ulid", meta.ULID,
+			)
+			continue
+		}
+		level.Info(logger).Log(
+			"msg", "Fixing broken block",
+			"mint", meta.MinTime,
+			"maxt", meta.MaxTime,
+			"ulid", meta.ULID,
+		)
+
+		repl, err := os.Create(filepath.Join(d, "index.repaired"))
+		if err != nil {
+			return wrapErr(err, d)
+		}
+		tmpFiles = append(tmpFiles, repl.Name())
+
+		broken, err := os.Open(filepath.Join(d, indexFilename))
+		if err != nil {
+			return wrapErr(err, d)
+		}
+		if _, err := io.Copy(repl, broken); err != nil {
+			return wrapErr(err, d)
+		}
+
+		var merr tsdb_errors.MultiError
+
+		// Set the 5th byte to 2 to indicate the correct file format version.
+		if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
+			merr.Add(wrapErr(err, d))
+			merr.Add(wrapErr(repl.Close(), d))
+			return merr.Err()
+		}
+		if err := repl.Sync(); err != nil {
+			merr.Add(wrapErr(err, d))
+			merr.Add(wrapErr(repl.Close(), d))
+			return merr.Err()
+		}
+		if err := repl.Close(); err != nil {
+			return wrapErr(err, d)
+		}
+		if err := broken.Close(); err != nil {
+			return wrapErr(err, d)
+		}
+		if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil {
+			return wrapErr(err, d)
+		}
+		// Reset version of meta.json to 1.
+		meta.Version = metaVersion1
+		if _, err := writeMetaFile(logger, d, meta); err != nil {
+			return wrapErr(err, d)
+		}
+	}
+	return nil
+}
+
+func readBogusMetaFile(dir string) (*BlockMeta, error) {
+	b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
+	if err != nil {
+		return nil, err
+	}
+	var m BlockMeta
+
+	if err := json.Unmarshal(b, &m); err != nil {
+		return nil, err
+	}
+	if m.Version != metaVersion1 && m.Version != 2 {
+		return nil, errors.Errorf("unexpected meta file version %d", m.Version)
+	}
+	return &m, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
new file mode 100644
index 0000000000000000000000000000000000000000..5246edd64f7f015b39619c77086328da628f04b6
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
@@ -0,0 +1,84 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/prometheus/pkg/labels"
+)
+
+var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time")
+
+type MetricSample struct {
+	TimestampMs int64
+	Value       float64
+	Labels      labels.Labels
+}
+
+// CreateHead creates a TSDB writer head to write the sample data to.
+func CreateHead(samples []*MetricSample, chunkRange int64, logger log.Logger) (*Head, error) {
+	head, err := NewHead(nil, logger, nil, chunkRange, DefaultStripeSize)
+	if err != nil {
+		return nil, err
+	}
+	app := head.Appender()
+	for _, sample := range samples {
+		_, err = app.Add(sample.Labels, sample.TimestampMs, sample.Value)
+		if err != nil {
+			return nil, err
+		}
+	}
+	err = app.Commit()
+	if err != nil {
+		return nil, err
+	}
+	return head, nil
+}
+
+// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
+func CreateBlock(samples []*MetricSample, dir string, mint, maxt int64, logger log.Logger) (string, error) {
+	chunkRange := maxt - mint
+	if chunkRange == 0 {
+		chunkRange = DefaultBlockDuration
+	}
+	if chunkRange < 0 {
+		return "", ErrInvalidTimes
+	}
+	head, err := CreateHead(samples, chunkRange, logger)
+	if err != nil {
+		return "", err
+	}
+
+	compactor, err := NewLeveledCompactor(context.Background(), nil, logger, ExponentialBlockRanges(DefaultBlockDuration, 3, 5), nil)
+	if err != nil {
+		return "", err
+	}
+
+	err = os.MkdirAll(dir, 0777)
+	if err != nil {
+		return "", err
+	}
+
+	ulid, err := compactor.Write(dir, head, mint, maxt, nil)
+	if err != nil {
+		return "", err
+	}
+
+	return filepath.Join(dir, ulid.String()), nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e503a1481b51fa6b015b1153279a7efcc9e2ab2
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go
@@ -0,0 +1,1306 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"hash/crc32"
+	"io"
+	"math"
+	"os"
+	"path/filepath"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/tsdb/encoding"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+	"github.com/prometheus/prometheus/tsdb/record"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+	"github.com/prometheus/prometheus/tsdb/wal"
+)
+
+// WALEntryType indicates what data a WAL entry contains.
+type WALEntryType uint8
+
+const (
+	// WALMagic is a 4 byte number every WAL segment file starts with.
+	WALMagic = uint32(0x43AF00EF)
+
+	// WALFormatDefault is the version flag for the default outer segment file format.
+	WALFormatDefault = byte(1)
+)
+
+// Entry types in a segment file.
+const (
+	WALEntrySymbols WALEntryType = 1
+	WALEntrySeries  WALEntryType = 2
+	WALEntrySamples WALEntryType = 3
+	WALEntryDeletes WALEntryType = 4
+)
+
+type walMetrics struct {
+	fsyncDuration prometheus.Summary
+	corruptions   prometheus.Counter
+}
+
+func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics {
+	m := &walMetrics{}
+
+	m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:       "prometheus_tsdb_wal_fsync_duration_seconds",
+		Help:       "Duration of WAL fsync.",
+		Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+	})
+	m.corruptions = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_corruptions_total",
+		Help: "Total number of WAL corruptions.",
+	})
+
+	if r != nil {
+		r.MustRegister(
+			m.fsyncDuration,
+			m.corruptions,
+		)
+	}
+	return m
+}
+
+// WAL is a write ahead log that can log new series labels and samples.
+// It must be completely read before new entries are logged.
+//
+// DEPRECATED: use wal pkg combined with the record codex instead.
+type WAL interface {
+	Reader() WALReader
+	LogSeries([]record.RefSeries) error
+	LogSamples([]record.RefSample) error
+	LogDeletes([]tombstones.Stone) error
+	Truncate(mint int64, keep func(uint64) bool) error
+	Close() error
+}
+
+// WALReader reads entries from a WAL.
+type WALReader interface {
+	Read(
+		seriesf func([]record.RefSeries),
+		samplesf func([]record.RefSample),
+		deletesf func([]tombstones.Stone),
+	) error
+}
+
+// segmentFile wraps a file object of a segment and tracks the highest timestamp
+// it contains. During WAL truncating, all segments with no higher timestamp than
+// the truncation threshold can be compacted.
+type segmentFile struct {
+	*os.File
+	maxTime   int64  // highest tombstone or sample timestamp in segment
+	minSeries uint64 // lowerst series ID in segment
+}
+
+func newSegmentFile(f *os.File) *segmentFile {
+	return &segmentFile{
+		File:      f,
+		maxTime:   math.MinInt64,
+		minSeries: math.MaxUint64,
+	}
+}
+
+const (
+	walSegmentSizeBytes = 256 * 1024 * 1024 // 256 MB
+)
+
+// The table gets initialized with sync.Once but may still cause a race
+// with any other use of the crc32 package anywhere. Thus we initialize it
+// before.
+var castagnoliTable *crc32.Table
+
+func init() {
+	castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+}
+
+// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
+// polynomial may be easily changed in one location at a later time, if necessary.
+func newCRC32() hash.Hash32 {
+	return crc32.New(castagnoliTable)
+}
+
+// SegmentWAL is a write ahead log for series data.
+//
+// DEPRECATED: use wal pkg combined with the record coders instead.
+type SegmentWAL struct {
+	mtx     sync.Mutex
+	metrics *walMetrics
+
+	dirFile *os.File
+	files   []*segmentFile
+
+	logger        log.Logger
+	flushInterval time.Duration
+	segmentSize   int64
+
+	crc32 hash.Hash32
+	cur   *bufio.Writer
+	curN  int64
+
+	stopc   chan struct{}
+	donec   chan struct{}
+	actorc  chan func() error // sequentialized background operations
+	buffers sync.Pool
+}
+
+// OpenSegmentWAL opens or creates a write ahead log in the given directory.
+// The WAL must be read completely before new data is written.
+func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) {
+	if err := os.MkdirAll(dir, 0777); err != nil {
+		return nil, err
+	}
+	df, err := fileutil.OpenDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+
+	w := &SegmentWAL{
+		dirFile:       df,
+		logger:        logger,
+		flushInterval: flushInterval,
+		donec:         make(chan struct{}),
+		stopc:         make(chan struct{}),
+		actorc:        make(chan func() error, 1),
+		segmentSize:   walSegmentSizeBytes,
+		crc32:         newCRC32(),
+	}
+	w.metrics = newWalMetrics(w, r)
+
+	fns, err := sequenceFiles(w.dirFile.Name())
+	if err != nil {
+		return nil, err
+	}
+
+	for i, fn := range fns {
+		f, err := w.openSegmentFile(fn)
+		if err == nil {
+			w.files = append(w.files, newSegmentFile(f))
+			continue
+		}
+		level.Warn(logger).Log("msg", "Invalid segment file detected, truncating WAL", "err", err, "file", fn)
+
+		for _, fn := range fns[i:] {
+			if err := os.Remove(fn); err != nil {
+				return w, errors.Wrap(err, "removing segment failed")
+			}
+		}
+		break
+	}
+
+	go w.run(flushInterval)
+
+	return w, nil
+}
+
+// repairingWALReader wraps a WAL reader and truncates its underlying SegmentWAL after the last
+// valid entry if it encounters corruption.
+type repairingWALReader struct {
+	wal *SegmentWAL
+	r   WALReader
+}
+
+func (r *repairingWALReader) Read(
+	seriesf func([]record.RefSeries),
+	samplesf func([]record.RefSample),
+	deletesf func([]tombstones.Stone),
+) error {
+	err := r.r.Read(seriesf, samplesf, deletesf)
+	if err == nil {
+		return nil
+	}
+	cerr, ok := errors.Cause(err).(walCorruptionErr)
+	if !ok {
+		return err
+	}
+	r.wal.metrics.corruptions.Inc()
+	return r.wal.truncate(cerr.err, cerr.file, cerr.lastOffset)
+}
+
+// truncate the WAL after the last valid entry.
+func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error {
+	level.Error(w.logger).Log("msg", "WAL corruption detected; truncating",
+		"err", err, "file", w.files[file].Name(), "pos", lastOffset)
+
+	// Close and delete all files after the current one.
+	for _, f := range w.files[file+1:] {
+		if err := f.Close(); err != nil {
+			return err
+		}
+		if err := os.Remove(f.Name()); err != nil {
+			return err
+		}
+	}
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	w.files = w.files[:file+1]
+
+	// Seek the current file to the last valid offset where we continue writing from.
+	_, err = w.files[file].Seek(lastOffset, io.SeekStart)
+	return err
+}
+
+// Reader returns a new reader over the write ahead log data.
+// It must be completely consumed before writing to the WAL.
+func (w *SegmentWAL) Reader() WALReader {
+	return &repairingWALReader{
+		wal: w,
+		r:   newWALReader(w.files, w.logger),
+	}
+}
+
+func (w *SegmentWAL) getBuffer() *encoding.Encbuf {
+	b := w.buffers.Get()
+	if b == nil {
+		return &encoding.Encbuf{B: make([]byte, 0, 64*1024)}
+	}
+	return b.(*encoding.Encbuf)
+}
+
+func (w *SegmentWAL) putBuffer(b *encoding.Encbuf) {
+	b.Reset()
+	w.buffers.Put(b)
+}
+
+// Truncate deletes the values prior to mint and the series which the keep function
+// does not indicate to preserve.
+func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
+	// The last segment is always active.
+	if len(w.files) < 2 {
+		return nil
+	}
+	var candidates []*segmentFile
+
+	// All files have to be traversed as there could be two segments for a block
+	// with first block having times (10000, 20000) and SECOND one having (0, 10000).
+	for _, sf := range w.files[:len(w.files)-1] {
+		if sf.maxTime >= mint {
+			break
+		}
+		// Past WAL files are closed. We have to reopen them for another read.
+		f, err := w.openSegmentFile(sf.Name())
+		if err != nil {
+			return errors.Wrap(err, "open old WAL segment for read")
+		}
+		candidates = append(candidates, &segmentFile{
+			File:      f,
+			minSeries: sf.minSeries,
+			maxTime:   sf.maxTime,
+		})
+	}
+	if len(candidates) == 0 {
+		return nil
+	}
+
+	r := newWALReader(candidates, w.logger)
+
+	// Create a new tmp file.
+	f, err := w.createSegmentFile(filepath.Join(w.dirFile.Name(), "compact.tmp"))
+	if err != nil {
+		return errors.Wrap(err, "create compaction segment")
+	}
+	defer func() {
+		if err := os.RemoveAll(f.Name()); err != nil {
+			level.Error(w.logger).Log("msg", "remove tmp file", "err", err.Error())
+		}
+	}()
+
+	var (
+		csf          = newSegmentFile(f)
+		crc32        = newCRC32()
+		decSeries    = []record.RefSeries{}
+		activeSeries = []record.RefSeries{}
+	)
+
+	for r.next() {
+		rt, flag, byt := r.at()
+
+		if rt != WALEntrySeries {
+			continue
+		}
+		decSeries = decSeries[:0]
+		activeSeries = activeSeries[:0]
+
+		err := r.decodeSeries(flag, byt, &decSeries)
+		if err != nil {
+			return errors.Wrap(err, "decode samples while truncating")
+		}
+		for _, s := range decSeries {
+			if keep(s.Ref) {
+				activeSeries = append(activeSeries, s)
+			}
+		}
+
+		buf := w.getBuffer()
+		flag = w.encodeSeries(buf, activeSeries)
+
+		_, err = w.writeTo(csf, crc32, WALEntrySeries, flag, buf.Get())
+		w.putBuffer(buf)
+
+		if err != nil {
+			return errors.Wrap(err, "write to compaction segment")
+		}
+	}
+	if r.Err() != nil {
+		return errors.Wrap(r.Err(), "read candidate WAL files")
+	}
+
+	off, err := csf.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	if err := csf.Truncate(off); err != nil {
+		return err
+	}
+	if err := csf.Sync(); err != nil {
+		return nil
+	}
+	if err := csf.Close(); err != nil {
+		return nil
+	}
+
+	_ = candidates[0].Close() // need close before remove on platform windows
+	if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil {
+		return errors.Wrap(err, "rename compaction segment")
+	}
+	for _, f := range candidates[1:] {
+		f.Close() // need close before remove on platform windows
+		if err := os.RemoveAll(f.Name()); err != nil {
+			return errors.Wrap(err, "delete WAL segment file")
+		}
+	}
+	if err := w.dirFile.Sync(); err != nil {
+		return err
+	}
+
+	// The file object of csf still holds the name before rename. Recreate it so
+	// subsequent truncations do not look at a non-existent file name.
+	csf.File, err = w.openSegmentFile(candidates[0].Name())
+	if err != nil {
+		return err
+	}
+	// We don't need it to be open.
+	if err := csf.Close(); err != nil {
+		return err
+	}
+
+	w.mtx.Lock()
+	w.files = append([]*segmentFile{csf}, w.files[len(candidates):]...)
+	w.mtx.Unlock()
+
+	return nil
+}
+
+// LogSeries writes a batch of new series labels to the log.
+// The series have to be ordered.
+func (w *SegmentWAL) LogSeries(series []record.RefSeries) error {
+	buf := w.getBuffer()
+
+	flag := w.encodeSeries(buf, series)
+
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	err := w.write(WALEntrySeries, flag, buf.Get())
+
+	w.putBuffer(buf)
+
+	if err != nil {
+		return errors.Wrap(err, "log series")
+	}
+
+	tf := w.head()
+
+	for _, s := range series {
+		if tf.minSeries > s.Ref {
+			tf.minSeries = s.Ref
+		}
+	}
+	return nil
+}
+
+// LogSamples writes a batch of new samples to the log.
+func (w *SegmentWAL) LogSamples(samples []record.RefSample) error {
+	buf := w.getBuffer()
+
+	flag := w.encodeSamples(buf, samples)
+
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	err := w.write(WALEntrySamples, flag, buf.Get())
+
+	w.putBuffer(buf)
+
+	if err != nil {
+		return errors.Wrap(err, "log series")
+	}
+	tf := w.head()
+
+	for _, s := range samples {
+		if tf.maxTime < s.T {
+			tf.maxTime = s.T
+		}
+	}
+	return nil
+}
+
+// LogDeletes write a batch of new deletes to the log.
+func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error {
+	buf := w.getBuffer()
+
+	flag := w.encodeDeletes(buf, stones)
+
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	err := w.write(WALEntryDeletes, flag, buf.Get())
+
+	w.putBuffer(buf)
+
+	if err != nil {
+		return errors.Wrap(err, "log series")
+	}
+	tf := w.head()
+
+	for _, s := range stones {
+		for _, iv := range s.Intervals {
+			if tf.maxTime < iv.Maxt {
+				tf.maxTime = iv.Maxt
+			}
+		}
+	}
+	return nil
+}
+
+// openSegmentFile opens the given segment file and consumes and validates header.
+func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
+	// We must open all files in read/write mode as we may have to truncate along
+	// the way and any file may become the head.
+	f, err := os.OpenFile(name, os.O_RDWR, 0666)
+	if err != nil {
+		return nil, err
+	}
+	metab := make([]byte, 8)
+
+	// If there is an error, we need close f for platform windows before gc.
+	// Otherwise, file op may fail.
+	hasError := true
+	defer func() {
+		if hasError {
+			f.Close()
+		}
+	}()
+
+	if n, err := f.Read(metab); err != nil {
+		return nil, errors.Wrapf(err, "validate meta %q", f.Name())
+	} else if n != 8 {
+		return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
+	}
+
+	if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {
+		return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name())
+	}
+	if metab[4] != WALFormatDefault {
+		return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
+	}
+	hasError = false
+	return f, nil
+}
+
+// createSegmentFile creates a new segment file with the given name. It preallocates
+// the standard segment size if possible and writes the header.
+func (w *SegmentWAL) createSegmentFile(name string) (*os.File, error) {
+	f, err := os.Create(name)
+	if err != nil {
+		return nil, err
+	}
+	if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
+		return nil, err
+	}
+	// Write header metadata for new file.
+	metab := make([]byte, 8)
+	binary.BigEndian.PutUint32(metab[:4], WALMagic)
+	metab[4] = WALFormatDefault
+
+	if _, err := f.Write(metab); err != nil {
+		return nil, err
+	}
+	return f, err
+}
+
+// cut finishes the currently active segments and opens the next one.
+// The encoder is reset to point to the new segment.
+func (w *SegmentWAL) cut() error {
+	// Sync current head to disk and close.
+	if hf := w.head(); hf != nil {
+		if err := w.flush(); err != nil {
+			return err
+		}
+		// Finish last segment asynchronously to not block the WAL moving along
+		// in the new segment.
+		go func() {
+			w.actorc <- func() error {
+				off, err := hf.Seek(0, io.SeekCurrent)
+				if err != nil {
+					return errors.Wrapf(err, "finish old segment %s", hf.Name())
+				}
+				if err := hf.Truncate(off); err != nil {
+					return errors.Wrapf(err, "finish old segment %s", hf.Name())
+				}
+				if err := hf.Sync(); err != nil {
+					return errors.Wrapf(err, "finish old segment %s", hf.Name())
+				}
+				if err := hf.Close(); err != nil {
+					return errors.Wrapf(err, "finish old segment %s", hf.Name())
+				}
+				return nil
+			}
+		}()
+	}
+
+	p, _, err := nextSequenceFile(w.dirFile.Name())
+	if err != nil {
+		return err
+	}
+	f, err := w.createSegmentFile(p)
+	if err != nil {
+		return err
+	}
+
+	go func() {
+		w.actorc <- func() error {
+			return errors.Wrap(w.dirFile.Sync(), "sync WAL directory")
+		}
+	}()
+
+	w.files = append(w.files, newSegmentFile(f))
+
+	// TODO(gouthamve): make the buffer size a constant.
+	w.cur = bufio.NewWriterSize(f, 8*1024*1024)
+	w.curN = 8
+
+	return nil
+}
+
+func (w *SegmentWAL) head() *segmentFile {
+	if len(w.files) == 0 {
+		return nil
+	}
+	return w.files[len(w.files)-1]
+}
+
+// Sync flushes the changes to disk.
+func (w *SegmentWAL) Sync() error {
+	var head *segmentFile
+	var err error
+
+	// Flush the writer and retrieve the reference to the head segment under mutex lock.
+	func() {
+		w.mtx.Lock()
+		defer w.mtx.Unlock()
+		if err = w.flush(); err != nil {
+			return
+		}
+		head = w.head()
+	}()
+	if err != nil {
+		return errors.Wrap(err, "flush buffer")
+	}
+	if head != nil {
+		// But only fsync the head segment after releasing the mutex as it will block on disk I/O.
+		start := time.Now()
+		err := fileutil.Fdatasync(head.File)
+		w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
+		return err
+	}
+	return nil
+}
+
+func (w *SegmentWAL) sync() error {
+	if err := w.flush(); err != nil {
+		return err
+	}
+	if w.head() == nil {
+		return nil
+	}
+
+	start := time.Now()
+	err := fileutil.Fdatasync(w.head().File)
+	w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
+	return err
+}
+
+func (w *SegmentWAL) flush() error {
+	if w.cur == nil {
+		return nil
+	}
+	return w.cur.Flush()
+}
+
+func (w *SegmentWAL) run(interval time.Duration) {
+	var tick <-chan time.Time
+
+	if interval > 0 {
+		ticker := time.NewTicker(interval)
+		defer ticker.Stop()
+		tick = ticker.C
+	}
+	defer close(w.donec)
+
+	for {
+		// Processing all enqueued operations has precedence over shutdown and
+		// background syncs.
+		select {
+		case f := <-w.actorc:
+			if err := f(); err != nil {
+				level.Error(w.logger).Log("msg", "operation failed", "err", err)
+			}
+			continue
+		default:
+		}
+		select {
+		case <-w.stopc:
+			return
+		case f := <-w.actorc:
+			if err := f(); err != nil {
+				level.Error(w.logger).Log("msg", "operation failed", "err", err)
+			}
+		case <-tick:
+			if err := w.Sync(); err != nil {
+				level.Error(w.logger).Log("msg", "sync failed", "err", err)
+			}
+		}
+	}
+}
+
+// Close syncs all data and closes the underlying resources.
+func (w *SegmentWAL) Close() error {
+	// Make sure you can call Close() multiple times.
+	select {
+	case <-w.stopc:
+		return nil // Already closed.
+	default:
+	}
+
+	close(w.stopc)
+	<-w.donec
+
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	if err := w.sync(); err != nil {
+		return err
+	}
+	// On opening, a WAL must be fully consumed once. Afterwards
+	// only the current segment will still be open.
+	if hf := w.head(); hf != nil {
+		if err := hf.Close(); err != nil {
+			return errors.Wrapf(err, "closing WAL head %s", hf.Name())
+		}
+	}
+
+	return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name())
+}
+
+func (w *SegmentWAL) write(t WALEntryType, flag uint8, buf []byte) error {
+	// Cut to the next segment if the entry exceeds the file size unless it would also
+	// exceed the size of a new segment.
+	// TODO(gouthamve): Add a test for this case where the commit is greater than segmentSize.
+	var (
+		sz    = int64(len(buf)) + 6
+		newsz = w.curN + sz
+	)
+	// XXX(fabxc): this currently cuts a new file whenever the WAL was newly opened.
+	// Probably fine in general but may yield a lot of short files in some cases.
+	if w.cur == nil || w.curN > w.segmentSize || newsz > w.segmentSize && sz <= w.segmentSize {
+		if err := w.cut(); err != nil {
+			return err
+		}
+	}
+	n, err := w.writeTo(w.cur, w.crc32, t, flag, buf)
+
+	w.curN += int64(n)
+
+	return err
+}
+
+func (w *SegmentWAL) writeTo(wr io.Writer, crc32 hash.Hash, t WALEntryType, flag uint8, buf []byte) (int, error) {
+	if len(buf) == 0 {
+		return 0, nil
+	}
+	crc32.Reset()
+	wr = io.MultiWriter(crc32, wr)
+
+	var b [6]byte
+	b[0] = byte(t)
+	b[1] = flag
+
+	binary.BigEndian.PutUint32(b[2:], uint32(len(buf)))
+
+	n1, err := wr.Write(b[:])
+	if err != nil {
+		return n1, err
+	}
+	n2, err := wr.Write(buf)
+	if err != nil {
+		return n1 + n2, err
+	}
+	n3, err := wr.Write(crc32.Sum(b[:0]))
+
+	return n1 + n2 + n3, err
+}
+
+const (
+	walSeriesSimple  = 1
+	walSamplesSimple = 1
+	walDeletesSimple = 1
+)
+
+func (w *SegmentWAL) encodeSeries(buf *encoding.Encbuf, series []record.RefSeries) uint8 {
+	for _, s := range series {
+		buf.PutBE64(s.Ref)
+		buf.PutUvarint(len(s.Labels))
+
+		for _, l := range s.Labels {
+			buf.PutUvarintStr(l.Name)
+			buf.PutUvarintStr(l.Value)
+		}
+	}
+	return walSeriesSimple
+}
+
+func (w *SegmentWAL) encodeSamples(buf *encoding.Encbuf, samples []record.RefSample) uint8 {
+	if len(samples) == 0 {
+		return walSamplesSimple
+	}
+	// Store base timestamp and base reference number of first sample.
+	// All samples encode their timestamp and ref as delta to those.
+	//
+	// TODO(fabxc): optimize for all samples having the same timestamp.
+	first := samples[0]
+
+	buf.PutBE64(first.Ref)
+	buf.PutBE64int64(first.T)
+
+	for _, s := range samples {
+		buf.PutVarint64(int64(s.Ref) - int64(first.Ref))
+		buf.PutVarint64(s.T - first.T)
+		buf.PutBE64(math.Float64bits(s.V))
+	}
+	return walSamplesSimple
+}
+
+func (w *SegmentWAL) encodeDeletes(buf *encoding.Encbuf, stones []tombstones.Stone) uint8 {
+	for _, s := range stones {
+		for _, iv := range s.Intervals {
+			buf.PutBE64(s.Ref)
+			buf.PutVarint64(iv.Mint)
+			buf.PutVarint64(iv.Maxt)
+		}
+	}
+	return walDeletesSimple
+}
+
+// walReader decodes and emits write ahead log entries.
+type walReader struct {
+	logger log.Logger
+
+	files []*segmentFile
+	cur   int
+	buf   []byte
+	crc32 hash.Hash32
+
+	curType    WALEntryType
+	curFlag    byte
+	curBuf     []byte
+	lastOffset int64 // offset after last successfully read entry
+
+	err error
+}
+
+func newWALReader(files []*segmentFile, l log.Logger) *walReader {
+	if l == nil {
+		l = log.NewNopLogger()
+	}
+	return &walReader{
+		logger: l,
+		files:  files,
+		buf:    make([]byte, 0, 128*4096),
+		crc32:  newCRC32(),
+	}
+}
+
+// Err returns the last error the reader encountered.
+func (r *walReader) Err() error {
+	return r.err
+}
+
+func (r *walReader) Read(
+	seriesf func([]record.RefSeries),
+	samplesf func([]record.RefSample),
+	deletesf func([]tombstones.Stone),
+) error {
+	// Concurrency for replaying the WAL is very limited. We at least split out decoding and
+	// processing into separate threads.
+	// Historically, the processing is the bottleneck with reading and decoding using only
+	// 15% of the CPU.
+	var (
+		seriesPool sync.Pool
+		samplePool sync.Pool
+		deletePool sync.Pool
+	)
+	donec := make(chan struct{})
+	datac := make(chan interface{}, 100)
+
+	go func() {
+		defer close(donec)
+
+		for x := range datac {
+			switch v := x.(type) {
+			case []record.RefSeries:
+				if seriesf != nil {
+					seriesf(v)
+				}
+				//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+				seriesPool.Put(v[:0])
+			case []record.RefSample:
+				if samplesf != nil {
+					samplesf(v)
+				}
+				//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+				samplePool.Put(v[:0])
+			case []tombstones.Stone:
+				if deletesf != nil {
+					deletesf(v)
+				}
+				//lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
+				deletePool.Put(v[:0])
+			default:
+				level.Error(r.logger).Log("msg", "unexpected data type")
+			}
+		}
+	}()
+
+	var err error
+
+	for r.next() {
+		et, flag, b := r.at()
+
+		// In decoding below we never return a walCorruptionErr for now.
+		// Those should generally be caught by entry decoding before.
+		switch et {
+		case WALEntrySeries:
+			var series []record.RefSeries
+			if v := seriesPool.Get(); v == nil {
+				series = make([]record.RefSeries, 0, 512)
+			} else {
+				series = v.([]record.RefSeries)
+			}
+
+			err = r.decodeSeries(flag, b, &series)
+			if err != nil {
+				err = errors.Wrap(err, "decode series entry")
+				break
+			}
+			datac <- series
+
+			cf := r.current()
+			for _, s := range series {
+				if cf.minSeries > s.Ref {
+					cf.minSeries = s.Ref
+				}
+			}
+		case WALEntrySamples:
+			var samples []record.RefSample
+			if v := samplePool.Get(); v == nil {
+				samples = make([]record.RefSample, 0, 512)
+			} else {
+				samples = v.([]record.RefSample)
+			}
+
+			err = r.decodeSamples(flag, b, &samples)
+			if err != nil {
+				err = errors.Wrap(err, "decode samples entry")
+				break
+			}
+			datac <- samples
+
+			// Update the times for the WAL segment file.
+			cf := r.current()
+			for _, s := range samples {
+				if cf.maxTime < s.T {
+					cf.maxTime = s.T
+				}
+			}
+		case WALEntryDeletes:
+			var deletes []tombstones.Stone
+			if v := deletePool.Get(); v == nil {
+				deletes = make([]tombstones.Stone, 0, 512)
+			} else {
+				deletes = v.([]tombstones.Stone)
+			}
+
+			err = r.decodeDeletes(flag, b, &deletes)
+			if err != nil {
+				err = errors.Wrap(err, "decode delete entry")
+				break
+			}
+			datac <- deletes
+
+			// Update the times for the WAL segment file.
+			cf := r.current()
+			for _, s := range deletes {
+				for _, iv := range s.Intervals {
+					if cf.maxTime < iv.Maxt {
+						cf.maxTime = iv.Maxt
+					}
+				}
+			}
+		}
+	}
+	close(datac)
+	<-donec
+
+	if err != nil {
+		return err
+	}
+	if r.Err() != nil {
+		return errors.Wrap(r.Err(), "read entry")
+	}
+	return nil
+}
+
+func (r *walReader) at() (WALEntryType, byte, []byte) {
+	return r.curType, r.curFlag, r.curBuf
+}
+
+// next returns decodes the next entry pair and returns true
+// if it was successful.
+func (r *walReader) next() bool {
+	if r.cur >= len(r.files) {
+		return false
+	}
+	cf := r.files[r.cur]
+
+	// Remember the offset after the last correctly read entry. If the next one
+	// is corrupted, this is where we can safely truncate.
+	r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent)
+	if r.err != nil {
+		return false
+	}
+
+	et, flag, b, err := r.entry(cf)
+	// If we reached the end of the reader, advance to the next one
+	// and close.
+	// Do not close on the last one as it will still be appended to.
+	if err == io.EOF {
+		if r.cur == len(r.files)-1 {
+			return false
+		}
+		// Current reader completed, close and move to the next one.
+		if err := cf.Close(); err != nil {
+			r.err = err
+			return false
+		}
+		r.cur++
+		return r.next()
+	}
+	if err != nil {
+		r.err = err
+		return false
+	}
+
+	r.curType = et
+	r.curFlag = flag
+	r.curBuf = b
+	return r.err == nil
+}
+
+func (r *walReader) current() *segmentFile {
+	return r.files[r.cur]
+}
+
+// walCorruptionErr is a type wrapper for errors that indicate WAL corruption
+// and trigger a truncation.
+type walCorruptionErr struct {
+	err        error
+	file       int
+	lastOffset int64
+}
+
+func (e walCorruptionErr) Error() string {
+	return fmt.Sprintf("%s ", e.err, e.file, e.lastOffset)
+}
+
+func (r *walReader) corruptionErr(s string, args ...interface{}) error {
+	return walCorruptionErr{
+		err:        errors.Errorf(s, args...),
+		file:       r.cur,
+		lastOffset: r.lastOffset,
+	}
+}
+
+func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
+	r.crc32.Reset()
+	tr := io.TeeReader(cr, r.crc32)
+
+	b := make([]byte, 6)
+	if n, err := tr.Read(b); err != nil {
+		return 0, 0, nil, err
+	} else if n != 6 {
+		return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
+	}
+
+	var (
+		etype  = WALEntryType(b[0])
+		flag   = b[1]
+		length = int(binary.BigEndian.Uint32(b[2:]))
+	)
+	// Exit if we reached pre-allocated space.
+	if etype == 0 {
+		return 0, 0, nil, io.EOF
+	}
+	if etype != WALEntrySeries && etype != WALEntrySamples && etype != WALEntryDeletes {
+		return 0, 0, nil, r.corruptionErr("invalid entry type %d", etype)
+	}
+
+	if length > len(r.buf) {
+		r.buf = make([]byte, length)
+	}
+	buf := r.buf[:length]
+
+	if n, err := tr.Read(buf); err != nil {
+		return 0, 0, nil, err
+	} else if n != length {
+		return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
+	}
+
+	if n, err := cr.Read(b[:4]); err != nil {
+		return 0, 0, nil, err
+	} else if n != 4 {
+		return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
+	}
+	if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {
+		return 0, 0, nil, r.corruptionErr("unexpected CRC32 checksum %x, want %x", has, exp)
+	}
+
+	return etype, flag, buf, nil
+}
+
+func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) error {
+	dec := encoding.Decbuf{B: b}
+
+	for len(dec.B) > 0 && dec.Err() == nil {
+		ref := dec.Be64()
+
+		lset := make(labels.Labels, dec.Uvarint())
+
+		for i := range lset {
+			lset[i].Name = dec.UvarintStr()
+			lset[i].Value = dec.UvarintStr()
+		}
+		sort.Sort(lset)
+
+		*res = append(*res, record.RefSeries{
+			Ref:    ref,
+			Labels: lset,
+		})
+	}
+	if dec.Err() != nil {
+		return dec.Err()
+	}
+	if len(dec.B) > 0 {
+		return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return nil
+}
+
+func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) error {
+	if len(b) == 0 {
+		return nil
+	}
+	dec := encoding.Decbuf{B: b}
+
+	var (
+		baseRef  = dec.Be64()
+		baseTime = dec.Be64int64()
+	)
+
+	for len(dec.B) > 0 && dec.Err() == nil {
+		dref := dec.Varint64()
+		dtime := dec.Varint64()
+		val := dec.Be64()
+
+		*res = append(*res, record.RefSample{
+			Ref: uint64(int64(baseRef) + dref),
+			T:   baseTime + dtime,
+			V:   math.Float64frombits(val),
+		})
+	}
+
+	if dec.Err() != nil {
+		return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res))
+	}
+	if len(dec.B) > 0 {
+		return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return nil
+}
+
+func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]tombstones.Stone) error {
+	dec := &encoding.Decbuf{B: b}
+
+	for dec.Len() > 0 && dec.Err() == nil {
+		*res = append(*res, tombstones.Stone{
+			Ref: dec.Be64(),
+			Intervals: tombstones.Intervals{
+				{Mint: dec.Varint64(), Maxt: dec.Varint64()},
+			},
+		})
+	}
+	if dec.Err() != nil {
+		return dec.Err()
+	}
+	if len(dec.B) > 0 {
+		return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
+	}
+	return nil
+}
+
+func deprecatedWALExists(logger log.Logger, dir string) (bool, error) {
+	// Detect whether we still have the old WAL.
+	fns, err := sequenceFiles(dir)
+	if err != nil && !os.IsNotExist(err) {
+		return false, errors.Wrap(err, "list sequence files")
+	}
+	if len(fns) == 0 {
+		return false, nil // No WAL at all yet.
+	}
+	// Check header of first segment to see whether we are still dealing with an
+	// old WAL.
+	f, err := os.Open(fns[0])
+	if err != nil {
+		return false, errors.Wrap(err, "check first existing segment")
+	}
+	defer f.Close()
+
+	var hdr [4]byte
+	if _, err := f.Read(hdr[:]); err != nil && err != io.EOF {
+		return false, errors.Wrap(err, "read header from first segment")
+	}
+	// If we cannot read the magic header for segments of the old WAL, abort.
+	// Either it's migrated already or there's a corruption issue with which
+	// we cannot deal here anyway. Subsequent attempts to open the WAL will error in that case.
+	if binary.BigEndian.Uint32(hdr[:]) != WALMagic {
+		return false, nil
+	}
+	return true, nil
+}
+
+// MigrateWAL rewrites the deprecated write ahead log into the new format.
+func MigrateWAL(logger log.Logger, dir string) (err error) {
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists {
+		return err
+	}
+	level.Info(logger).Log("msg", "Migrating WAL format")
+
+	tmpdir := dir + ".tmp"
+	if err := os.RemoveAll(tmpdir); err != nil {
+		return errors.Wrap(err, "cleanup replacement dir")
+	}
+	repl, err := wal.New(logger, nil, tmpdir, false)
+	if err != nil {
+		return errors.Wrap(err, "open new WAL")
+	}
+
+	// It should've already been closed as part of the previous finalization.
+	// Do it once again in case of prior errors.
+	defer func() {
+		if err != nil {
+			repl.Close()
+		}
+	}()
+
+	w, err := OpenSegmentWAL(dir, logger, time.Minute, nil)
+	if err != nil {
+		return errors.Wrap(err, "open old WAL")
+	}
+	defer w.Close()
+
+	rdr := w.Reader()
+
+	var (
+		enc record.Encoder
+		b   []byte
+	)
+	decErr := rdr.Read(
+		func(s []record.RefSeries) {
+			if err != nil {
+				return
+			}
+			err = repl.Log(enc.Series(s, b[:0]))
+		},
+		func(s []record.RefSample) {
+			if err != nil {
+				return
+			}
+			err = repl.Log(enc.Samples(s, b[:0]))
+		},
+		func(s []tombstones.Stone) {
+			if err != nil {
+				return
+			}
+			err = repl.Log(enc.Tombstones(s, b[:0]))
+		},
+	)
+	if decErr != nil {
+		return errors.Wrap(err, "decode old entries")
+	}
+	if err != nil {
+		return errors.Wrap(err, "write new entries")
+	}
+	// We explicitly close even when there is a defer for Windows to be
+	// able to delete it. The defer is in place to close it in-case there
+	// are errors above.
+	if err := w.Close(); err != nil {
+		return errors.Wrap(err, "close old WAL")
+	}
+	if err := repl.Close(); err != nil {
+		return errors.Wrap(err, "close new WAL")
+	}
+	if err := fileutil.Replace(tmpdir, dir); err != nil {
+		return errors.Wrap(err, "replace old WAL")
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go
new file mode 100644
index 0000000000000000000000000000000000000000..d205c61212dc011443da20d7f9d5644fa6709bb1
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go
@@ -0,0 +1,291 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+	"github.com/prometheus/prometheus/tsdb/record"
+	"github.com/prometheus/prometheus/tsdb/tombstones"
+)
+
+// CheckpointStats returns stats about a created checkpoint.
+type CheckpointStats struct {
+	DroppedSeries     int
+	DroppedSamples    int
+	DroppedTombstones int
+	TotalSeries       int // Processed series including dropped ones.
+	TotalSamples      int // Processed samples including dropped ones.
+	TotalTombstones   int // Processed tombstones including dropped ones.
+}
+
+// LastCheckpoint returns the directory name and index of the most recent checkpoint.
+// If dir does not contain any checkpoints, ErrNotFound is returned.
+func LastCheckpoint(dir string) (string, int, error) {
+	checkpoints, err := listCheckpoints(dir)
+	if err != nil {
+		return "", 0, err
+	}
+
+	if len(checkpoints) == 0 {
+		return "", 0, record.ErrNotFound
+	}
+
+	checkpoint := checkpoints[len(checkpoints)-1]
+	return filepath.Join(dir, checkpoint.name), checkpoint.index, nil
+}
+
+// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
+func DeleteCheckpoints(dir string, maxIndex int) error {
+	checkpoints, err := listCheckpoints(dir)
+	if err != nil {
+		return err
+	}
+
+	var errs tsdb_errors.MultiError
+	for _, checkpoint := range checkpoints {
+		if checkpoint.index >= maxIndex {
+			break
+		}
+		if err := os.RemoveAll(filepath.Join(dir, checkpoint.name)); err != nil {
+			errs.Add(err)
+		}
+	}
+	return errs.Err()
+}
+
+const checkpointPrefix = "checkpoint."
+
+// Checkpoint creates a compacted checkpoint of segments in range [first, last] in the given WAL.
+// It includes the most recent checkpoint if it exists.
+// All series not satisfying keep and samples below mint are dropped.
+//
+// The checkpoint is stored in a directory named checkpoint.N in the same
+// segmented format as the original WAL itself.
+// This makes it easy to read it through the WAL package and concatenate
+// it with the original WAL.
+func Checkpoint(w *WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) {
+	stats := &CheckpointStats{}
+	var sgmReader io.ReadCloser
+
+	{
+
+		var sgmRange []SegmentRange
+		dir, idx, err := LastCheckpoint(w.Dir())
+		if err != nil && err != record.ErrNotFound {
+			return nil, errors.Wrap(err, "find last checkpoint")
+		}
+		last := idx + 1
+		if err == nil {
+			if from > last {
+				return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from)
+			}
+			// Ignore WAL files below the checkpoint. They shouldn't exist to begin with.
+			from = last
+
+			sgmRange = append(sgmRange, SegmentRange{Dir: dir, Last: math.MaxInt32})
+		}
+
+		sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to})
+		sgmReader, err = NewSegmentsRangeReader(sgmRange...)
+		if err != nil {
+			return nil, errors.Wrap(err, "create segment reader")
+		}
+		defer sgmReader.Close()
+	}
+
+	cpdir := checkpointDir(w.Dir(), to)
+	cpdirtmp := cpdir + ".tmp"
+
+	if err := os.RemoveAll(cpdirtmp); err != nil {
+		return nil, errors.Wrap(err, "remove previous temporary checkpoint dir")
+	}
+
+	if err := os.MkdirAll(cpdirtmp, 0777); err != nil {
+		return nil, errors.Wrap(err, "create checkpoint dir")
+	}
+	cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled())
+	if err != nil {
+		return nil, errors.Wrap(err, "open checkpoint")
+	}
+
+	// Ensures that an early return caused by an error doesn't leave any tmp files.
+	defer func() {
+		cp.Close()
+		os.RemoveAll(cpdirtmp)
+	}()
+
+	r := NewReader(sgmReader)
+
+	var (
+		series  []record.RefSeries
+		samples []record.RefSample
+		tstones []tombstones.Stone
+		dec     record.Decoder
+		enc     record.Encoder
+		buf     []byte
+		recs    [][]byte
+	)
+	for r.Next() {
+		series, samples, tstones = series[:0], samples[:0], tstones[:0]
+
+		// We don't reset the buffer since we batch up multiple records
+		// before writing them to the checkpoint.
+		// Remember where the record for this iteration starts.
+		start := len(buf)
+		rec := r.Record()
+
+		switch dec.Type(rec) {
+		case record.Series:
+			series, err = dec.Series(rec, series)
+			if err != nil {
+				return nil, errors.Wrap(err, "decode series")
+			}
+			// Drop irrelevant series in place.
+			repl := series[:0]
+			for _, s := range series {
+				if keep(s.Ref) {
+					repl = append(repl, s)
+				}
+			}
+			if len(repl) > 0 {
+				buf = enc.Series(repl, buf)
+			}
+			stats.TotalSeries += len(series)
+			stats.DroppedSeries += len(series) - len(repl)
+
+		case record.Samples:
+			samples, err = dec.Samples(rec, samples)
+			if err != nil {
+				return nil, errors.Wrap(err, "decode samples")
+			}
+			// Drop irrelevant samples in place.
+			repl := samples[:0]
+			for _, s := range samples {
+				if s.T >= mint {
+					repl = append(repl, s)
+				}
+			}
+			if len(repl) > 0 {
+				buf = enc.Samples(repl, buf)
+			}
+			stats.TotalSamples += len(samples)
+			stats.DroppedSamples += len(samples) - len(repl)
+
+		case record.Tombstones:
+			tstones, err = dec.Tombstones(rec, tstones)
+			if err != nil {
+				return nil, errors.Wrap(err, "decode deletes")
+			}
+			// Drop irrelevant tombstones in place.
+			repl := tstones[:0]
+			for _, s := range tstones {
+				for _, iv := range s.Intervals {
+					if iv.Maxt >= mint {
+						repl = append(repl, s)
+						break
+					}
+				}
+			}
+			if len(repl) > 0 {
+				buf = enc.Tombstones(repl, buf)
+			}
+			stats.TotalTombstones += len(tstones)
+			stats.DroppedTombstones += len(tstones) - len(repl)
+
+		default:
+			return nil, errors.New("invalid record type")
+		}
+		if len(buf[start:]) == 0 {
+			continue // All contents discarded.
+		}
+		recs = append(recs, buf[start:])
+
+		// Flush records in 1 MB increments.
+		if len(buf) > 1*1024*1024 {
+			if err := cp.Log(recs...); err != nil {
+				return nil, errors.Wrap(err, "flush records")
+			}
+			buf, recs = buf[:0], recs[:0]
+		}
+	}
+	// If we hit any corruption during checkpointing, repairing is not an option.
+	// The head won't know which series records are lost.
+	if r.Err() != nil {
+		return nil, errors.Wrap(r.Err(), "read segments")
+	}
+
+	// Flush remaining records.
+	if err := cp.Log(recs...); err != nil {
+		return nil, errors.Wrap(err, "flush records")
+	}
+	if err := cp.Close(); err != nil {
+		return nil, errors.Wrap(err, "close checkpoint")
+	}
+	if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
+		return nil, errors.Wrap(err, "rename checkpoint directory")
+	}
+
+	return stats, nil
+}
+
+func checkpointDir(dir string, i int) string {
+	return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
+}
+
+type checkpointRef struct {
+	name  string
+	index int
+}
+
+func listCheckpoints(dir string) (refs []checkpointRef, err error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+
+	for i := 0; i < len(files); i++ {
+		fi := files[i]
+		if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
+			continue
+		}
+		if !fi.IsDir() {
+			return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name())
+		}
+		idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
+		if err != nil {
+			continue
+		}
+
+		refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
+	}
+
+	sort.Slice(refs, func(i, j int) bool {
+		return refs[i].index < refs[j].index
+	})
+
+	return refs, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..54c4a584e22008a8d2d2350c5392003e64720b05
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go
@@ -0,0 +1,322 @@
+// Copyright 2019 The Prometheus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/golang/snappy"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// liveReaderMetrics holds all metrics exposed by the LiveReader.
+type LiveReaderMetrics struct {
+	readerCorruptionErrors *prometheus.CounterVec
+}
+
+// NewLiveReaderMetrics instantiates, registers and returns metrics to be injected
+// at LiveReader instantiation.
+func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics {
+	m := &LiveReaderMetrics{
+		readerCorruptionErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
+			Name: "prometheus_tsdb_wal_reader_corruption_errors_total",
+			Help: "Errors encountered when reading the WAL.",
+		}, []string{"error"}),
+	}
+
+	if reg != nil {
+		reg.MustRegister(m.readerCorruptionErrors)
+	}
+
+	return m
+}
+
+// NewLiveReader returns a new live reader.
+func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader {
+	lr := &LiveReader{
+		logger:  logger,
+		rdr:     r,
+		metrics: metrics,
+
+		// Until we understand how they come about, make readers permissive
+		// to records spanning pages.
+		permissive: true,
+	}
+
+	return lr
+}
+
+// LiveReader reads WAL records from an io.Reader. It allows reading of WALs
+// that are still in the process of being written, and returns records as soon
+// as they can be read.
+type LiveReader struct {
+	logger     log.Logger
+	rdr        io.Reader
+	err        error
+	rec        []byte
+	snappyBuf  []byte
+	hdr        [recordHeaderSize]byte
+	buf        [pageSize]byte
+	readIndex  int   // Index in buf to start at for next read.
+	writeIndex int   // Index in buf to start at for next write.
+	total      int64 // Total bytes processed during reading in calls to Next().
+	index      int   // Used to track partial records, should be 0 at the start of every new record.
+
+	// For testing, we can treat EOF as a non-error.
+	eofNonErr bool
+
+	// We sometime see records span page boundaries.  Should never happen, but it
+	// does.  Until we track down why, set permissive to true to tolerate it.
+	// NB the non-ive Reader implementation allows for this.
+	permissive bool
+
+	metrics *LiveReaderMetrics
+}
+
+// Err returns any errors encountered reading the WAL.  io.EOFs are not terminal
+// and Next can be tried again.  Non-EOFs are terminal, and the reader should
+// not be used again.  It is up to the user to decide when to stop trying should
+// io.EOF be returned.
+func (r *LiveReader) Err() error {
+	if r.eofNonErr && r.err == io.EOF {
+		return nil
+	}
+	return r.err
+}
+
+// Offset returns the number of bytes consumed from this segment.
+func (r *LiveReader) Offset() int64 {
+	return r.total
+}
+
+func (r *LiveReader) fillBuffer() (int, error) {
+	n, err := r.rdr.Read(r.buf[r.writeIndex:len(r.buf)])
+	r.writeIndex += n
+	return n, err
+}
+
+// Next returns true if Record() will contain a full record.
+// If Next returns false, you should always checked the contents of Error().
+// Return false guarantees there are no more records if the segment is closed
+// and not corrupt, otherwise if Err() == io.EOF you should try again when more
+// data has been written.
+func (r *LiveReader) Next() bool {
+	for {
+		// If buildRecord returns a non-EOF error, its game up - the segment is
+		// corrupt. If buildRecord returns an EOF, we try and read more in
+		// fillBuffer later on. If that fails to read anything (n=0 && err=EOF),
+		// we return  EOF and the user can try again later. If we have a full
+		// page, buildRecord is guaranteed to return a record or a non-EOF; it
+		// has checks the records fit in pages.
+		if ok, err := r.buildRecord(); ok {
+			return true
+		} else if err != nil && err != io.EOF {
+			r.err = err
+			return false
+		}
+
+		// If we've filled the page and not found a record, this
+		// means records have started to span pages.  Shouldn't happen
+		// but does and until we found out why, we need to deal with this.
+		if r.permissive && r.writeIndex == pageSize && r.readIndex > 0 {
+			copy(r.buf[:], r.buf[r.readIndex:])
+			r.writeIndex -= r.readIndex
+			r.readIndex = 0
+			continue
+		}
+
+		if r.readIndex == pageSize {
+			r.writeIndex = 0
+			r.readIndex = 0
+		}
+
+		if r.writeIndex != pageSize {
+			n, err := r.fillBuffer()
+			if n == 0 || (err != nil && err != io.EOF) {
+				r.err = err
+				return false
+			}
+		}
+	}
+}
+
+// Record returns the current record.
+// The returned byte slice is only valid until the next call to Next.
+func (r *LiveReader) Record() []byte {
+	return r.rec
+}
+
+// Rebuild a full record from potentially partial records. Returns false
+// if there was an error or if we weren't able to read a record for any reason.
+// Returns true if we read a full record. Any record data is appended to
+// LiveReader.rec
+func (r *LiveReader) buildRecord() (bool, error) {
+	for {
+		// Check that we have data in the internal buffer to read.
+		if r.writeIndex <= r.readIndex {
+			return false, nil
+		}
+
+		// Attempt to read a record, partial or otherwise.
+		temp, n, err := r.readRecord()
+		if err != nil {
+			return false, err
+		}
+
+		r.readIndex += n
+		r.total += int64(n)
+		if temp == nil {
+			return false, nil
+		}
+
+		rt := recTypeFromHeader(r.hdr[0])
+		if rt == recFirst || rt == recFull {
+			r.rec = r.rec[:0]
+			r.snappyBuf = r.snappyBuf[:0]
+		}
+
+		compressed := r.hdr[0]&snappyMask != 0
+		if compressed {
+			r.snappyBuf = append(r.snappyBuf, temp...)
+		} else {
+			r.rec = append(r.rec, temp...)
+		}
+
+		if err := validateRecord(rt, r.index); err != nil {
+			r.index = 0
+			return false, err
+		}
+		if rt == recLast || rt == recFull {
+			r.index = 0
+			if compressed && len(r.snappyBuf) > 0 {
+				// The snappy library uses `len` to calculate if we need a new buffer.
+				// In order to allocate as few buffers as possible make the length
+				// equal to the capacity.
+				r.rec = r.rec[:cap(r.rec)]
+				r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
+				if err != nil {
+					return false, err
+				}
+			}
+			return true, nil
+		}
+		// Only increment i for non-zero records since we use it
+		// to determine valid content record sequences.
+		r.index++
+	}
+}
+
+// Returns an error if the recType and i indicate an invalid record sequence.
+// As an example, if i is > 0 because we've read some amount of a partial record
+// (recFirst, recMiddle, etc. but not recLast) and then we get another recFirst or recFull
+// instead of a recLast or recMiddle we would have an invalid record.
+func validateRecord(typ recType, i int) error {
+	switch typ {
+	case recFull:
+		if i != 0 {
+			return errors.New("unexpected full record")
+		}
+		return nil
+	case recFirst:
+		if i != 0 {
+			return errors.New("unexpected first record, dropping buffer")
+		}
+		return nil
+	case recMiddle:
+		if i == 0 {
+			return errors.New("unexpected middle record, dropping buffer")
+		}
+		return nil
+	case recLast:
+		if i == 0 {
+			return errors.New("unexpected last record, dropping buffer")
+		}
+		return nil
+	default:
+		return errors.Errorf("unexpected record type %d", typ)
+	}
+}
+
+// Read a sub-record (see recType) from the buffer. It could potentially
+// be a full record (recFull) if the record fits within the bounds of a single page.
+// Returns a byte slice of the record data read, the number of bytes read, and an error
+// if there's a non-zero byte in a page term record or the record checksum fails.
+// This is a non-method function to make it clear it does not mutate the reader.
+func (r *LiveReader) readRecord() ([]byte, int, error) {
+	// Special case: for recPageTerm, check that are all zeros to end of page,
+	// consume them but don't return them.
+	if r.buf[r.readIndex] == byte(recPageTerm) {
+		// End of page won't necessarily be end of buffer, as we may have
+		// got misaligned by records spanning page boundaries.
+		// r.total % pageSize is the offset into the current page
+		// that r.readIndex points to in buf.  Therefore
+		// pageSize - (r.total % pageSize) is the amount left to read of
+		// the current page.
+		remaining := int(pageSize - (r.total % pageSize))
+		if r.readIndex+remaining > r.writeIndex {
+			return nil, 0, io.EOF
+		}
+
+		for i := r.readIndex; i < r.readIndex+remaining; i++ {
+			if r.buf[i] != 0 {
+				return nil, 0, errors.New("unexpected non-zero byte in page term bytes")
+			}
+		}
+
+		return nil, remaining, nil
+	}
+
+	// Not a recPageTerm; read the record and check the checksum.
+	if r.writeIndex-r.readIndex < recordHeaderSize {
+		return nil, 0, io.EOF
+	}
+
+	copy(r.hdr[:], r.buf[r.readIndex:r.readIndex+recordHeaderSize])
+	length := int(binary.BigEndian.Uint16(r.hdr[1:]))
+	crc := binary.BigEndian.Uint32(r.hdr[3:])
+	if r.readIndex+recordHeaderSize+length > pageSize {
+		if !r.permissive {
+			return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize)
+		}
+		r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc()
+		level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize)
+	}
+	if recordHeaderSize+length > pageSize {
+		return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize)
+	}
+	if r.readIndex+recordHeaderSize+length > r.writeIndex {
+		return nil, 0, io.EOF
+	}
+
+	rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length]
+	if c := crc32.Checksum(rec, castagnoliTable); c != crc {
+		return nil, 0, errors.Errorf("unexpected checksum %x, expected %x", c, crc)
+	}
+
+	return rec, length + recordHeaderSize, nil
+}
+
+func min(i, j int) int {
+	if i < j {
+		return i
+	}
+	return j
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..7612f8775fa4124e379a1ad614406ede63423d21
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go
@@ -0,0 +1,200 @@
+// Copyright 2019 The Prometheus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"encoding/binary"
+	"hash/crc32"
+	"io"
+
+	"github.com/golang/snappy"
+	"github.com/pkg/errors"
+)
+
+// Reader reads WAL records from an io.Reader.
+type Reader struct {
+	rdr       io.Reader
+	err       error
+	rec       []byte
+	snappyBuf []byte
+	buf       [pageSize]byte
+	total     int64   // Total bytes processed.
+	curRecTyp recType // Used for checking that the last record is not torn.
+}
+
+// NewReader returns a new reader.
+func NewReader(r io.Reader) *Reader {
+	return &Reader{rdr: r}
+}
+
+// Next advances the reader to the next records and returns true if it exists.
+// It must not be called again after it returned false.
+func (r *Reader) Next() bool {
+	err := r.next()
+	if errors.Cause(err) == io.EOF {
+		// The last WAL segment record shouldn't be torn(should be full or last).
+		// The last record would be torn after a crash just before
+		// the last record part could be persisted to disk.
+		if r.curRecTyp == recFirst || r.curRecTyp == recMiddle {
+			r.err = errors.New("last record is torn")
+		}
+		return false
+	}
+	r.err = err
+	return r.err == nil
+}
+
+func (r *Reader) next() (err error) {
+	// We have to use r.buf since allocating byte arrays here fails escape
+	// analysis and ends up on the heap, even though it seemingly should not.
+	hdr := r.buf[:recordHeaderSize]
+	buf := r.buf[recordHeaderSize:]
+
+	r.rec = r.rec[:0]
+	r.snappyBuf = r.snappyBuf[:0]
+
+	i := 0
+	for {
+		if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil {
+			return errors.Wrap(err, "read first header byte")
+		}
+		r.total++
+		r.curRecTyp = recTypeFromHeader(hdr[0])
+		compressed := hdr[0]&snappyMask != 0
+
+		// Gobble up zero bytes.
+		if r.curRecTyp == recPageTerm {
+			// recPageTerm is a single byte that indicates the rest of the page is padded.
+			// If it's the first byte in a page, buf is too small and
+			// needs to be resized to fit pageSize-1 bytes.
+			buf = r.buf[1:]
+
+			// We are pedantic and check whether the zeros are actually up
+			// to a page boundary.
+			// It's not strictly necessary but may catch sketchy state early.
+			k := pageSize - (r.total % pageSize)
+			if k == pageSize {
+				continue // Initial 0 byte was last page byte.
+			}
+			n, err := io.ReadFull(r.rdr, buf[:k])
+			if err != nil {
+				return errors.Wrap(err, "read remaining zeros")
+			}
+			r.total += int64(n)
+
+			for _, c := range buf[:k] {
+				if c != 0 {
+					return errors.New("unexpected non-zero byte in padded page")
+				}
+			}
+			continue
+		}
+		n, err := io.ReadFull(r.rdr, hdr[1:])
+		if err != nil {
+			return errors.Wrap(err, "read remaining header")
+		}
+		r.total += int64(n)
+
+		var (
+			length = binary.BigEndian.Uint16(hdr[1:])
+			crc    = binary.BigEndian.Uint32(hdr[3:])
+		)
+
+		if length > pageSize-recordHeaderSize {
+			return errors.Errorf("invalid record size %d", length)
+		}
+		n, err = io.ReadFull(r.rdr, buf[:length])
+		if err != nil {
+			return err
+		}
+		r.total += int64(n)
+
+		if n != int(length) {
+			return errors.Errorf("invalid size: expected %d, got %d", length, n)
+		}
+		if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc {
+			return errors.Errorf("unexpected checksum %x, expected %x", c, crc)
+		}
+
+		if compressed {
+			r.snappyBuf = append(r.snappyBuf, buf[:length]...)
+		} else {
+			r.rec = append(r.rec, buf[:length]...)
+		}
+
+		if err := validateRecord(r.curRecTyp, i); err != nil {
+			return err
+		}
+		if r.curRecTyp == recLast || r.curRecTyp == recFull {
+			if compressed && len(r.snappyBuf) > 0 {
+				// The snappy library uses `len` to calculate if we need a new buffer.
+				// In order to allocate as few buffers as possible make the length
+				// equal to the capacity.
+				r.rec = r.rec[:cap(r.rec)]
+				r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
+				return err
+			}
+			return nil
+		}
+
+		// Only increment i for non-zero records since we use it
+		// to determine valid content record sequences.
+		i++
+	}
+}
+
+// Err returns the last encountered error wrapped in a corruption error.
+// If the reader does not allow to infer a segment index and offset, a total
+// offset in the reader stream will be provided.
+func (r *Reader) Err() error {
+	if r.err == nil {
+		return nil
+	}
+	if b, ok := r.rdr.(*segmentBufReader); ok {
+		return &CorruptionErr{
+			Err:     r.err,
+			Dir:     b.segs[b.cur].Dir(),
+			Segment: b.segs[b.cur].Index(),
+			Offset:  int64(b.off),
+		}
+	}
+	return &CorruptionErr{
+		Err:     r.err,
+		Segment: -1,
+		Offset:  r.total,
+	}
+}
+
+// Record returns the current record. The returned byte slice is only
+// valid until the next call to Next.
+func (r *Reader) Record() []byte {
+	return r.rec
+}
+
+// Segment returns the current segment being read.
+func (r *Reader) Segment() int {
+	if b, ok := r.rdr.(*segmentBufReader); ok {
+		return b.segs[b.cur].Index()
+	}
+	return -1
+}
+
+// Offset returns the current position of the segment being read.
+func (r *Reader) Offset() int64 {
+	if b, ok := r.rdr.(*segmentBufReader); ok {
+		return int64(b.off)
+	}
+	return r.total
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
new file mode 100644
index 0000000000000000000000000000000000000000..83cbb61b8d4cae2704ba2f6c795f3f63ffd1cfd8
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
@@ -0,0 +1,899 @@
+// Copyright 2017 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/golang/snappy"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/tsdb/fileutil"
+)
+
+const (
+	DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB
+	pageSize           = 32 * 1024         // 32KB
+	recordHeaderSize   = 7
+)
+
+// The table gets initialized with sync.Once but may still cause a race
+// with any other use of the crc32 package anywhere. Thus we initialize it
+// before.
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// page is an in memory buffer used to batch disk writes.
+// Records bigger than the page size are split and flushed separately.
+// A flush is triggered when a single records doesn't fit the page size or
+// when the next record can't fit in the remaining free page space.
+type page struct {
+	alloc   int
+	flushed int
+	buf     [pageSize]byte
+}
+
+func (p *page) remaining() int {
+	return pageSize - p.alloc
+}
+
+func (p *page) full() bool {
+	return pageSize-p.alloc < recordHeaderSize
+}
+
+func (p *page) reset() {
+	for i := range p.buf {
+		p.buf[i] = 0
+	}
+	p.alloc = 0
+	p.flushed = 0
+}
+
+// Segment represents a segment file.
+type Segment struct {
+	*os.File
+	dir string
+	i   int
+}
+
+// Index returns the index of the segment.
+func (s *Segment) Index() int {
+	return s.i
+}
+
+// Dir returns the directory of the segment.
+func (s *Segment) Dir() string {
+	return s.dir
+}
+
+// CorruptionErr is an error that's returned when corruption is encountered.
+type CorruptionErr struct {
+	Dir     string
+	Segment int
+	Offset  int64
+	Err     error
+}
+
+func (e *CorruptionErr) Error() string {
+	if e.Segment < 0 {
+		return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err)
+	}
+	return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err)
+}
+
+// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
+func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
+	segName := SegmentName(dir, k)
+	f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
+	if err != nil {
+		return nil, err
+	}
+	stat, err := f.Stat()
+	if err != nil {
+		f.Close()
+		return nil, err
+	}
+	// If the last page is torn, fill it with zeros.
+	// In case it was torn after all records were written successfully, this
+	// will just pad the page and everything will be fine.
+	// If it was torn mid-record, a full read (which the caller should do anyway
+	// to ensure integrity) will detect it as a corruption by the end.
+	if d := stat.Size() % pageSize; d != 0 {
+		level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName)
+		if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
+			f.Close()
+			return nil, errors.Wrap(err, "zero-pad torn page")
+		}
+	}
+	return &Segment{File: f, i: k, dir: dir}, nil
+}
+
+// CreateSegment creates a new segment k in dir.
+func CreateSegment(dir string, k int) (*Segment, error) {
+	f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+	if err != nil {
+		return nil, err
+	}
+	return &Segment{File: f, i: k, dir: dir}, nil
+}
+
+// OpenReadSegment opens the segment with the given filename.
+func OpenReadSegment(fn string) (*Segment, error) {
+	k, err := strconv.Atoi(filepath.Base(fn))
+	if err != nil {
+		return nil, errors.New("not a valid filename")
+	}
+	f, err := os.Open(fn)
+	if err != nil {
+		return nil, err
+	}
+	return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
+}
+
+// WAL is a write ahead log that stores records in segment files.
+// It must be read from start to end once before logging new data.
+// If an error occurs during read, the repair procedure must be called
+// before it's safe to do further writes.
+//
+// Segments are written to in pages of 32KB, with records possibly split
+// across page boundaries.
+// Records are never split across segments to allow full segments to be
+// safely truncated. It also ensures that torn writes never corrupt records
+// beyond the most recent segment.
+type WAL struct {
+	dir         string
+	logger      log.Logger
+	segmentSize int
+	mtx         sync.RWMutex
+	segment     *Segment // Active segment.
+	donePages   int      // Pages written to the segment.
+	page        *page    // Active page.
+	stopc       chan chan struct{}
+	actorc      chan func()
+	closed      bool // To allow calling Close() more than once without blocking.
+	compress    bool
+	snappyBuf   []byte
+
+	metrics *walMetrics
+}
+
+type walMetrics struct {
+	fsyncDuration   prometheus.Summary
+	pageFlushes     prometheus.Counter
+	pageCompletions prometheus.Counter
+	truncateFail    prometheus.Counter
+	truncateTotal   prometheus.Counter
+	currentSegment  prometheus.Gauge
+	writesFailed    prometheus.Counter
+}
+
+func newWALMetrics(r prometheus.Registerer) *walMetrics {
+	m := &walMetrics{}
+
+	m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:       "prometheus_tsdb_wal_fsync_duration_seconds",
+		Help:       "Duration of WAL fsync.",
+		Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+	})
+	m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_page_flushes_total",
+		Help: "Total number of page flushes.",
+	})
+	m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_completed_pages_total",
+		Help: "Total number of completed pages.",
+	})
+	m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_truncations_failed_total",
+		Help: "Total number of WAL truncations that failed.",
+	})
+	m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_truncations_total",
+		Help: "Total number of WAL truncations attempted.",
+	})
+	m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "prometheus_tsdb_wal_segment_current",
+		Help: "WAL segment index that TSDB is currently writing to.",
+	})
+	m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{
+		Name: "prometheus_tsdb_wal_writes_failed_total",
+		Help: "Total number of WAL writes that failed.",
+	})
+
+	if r != nil {
+		r.MustRegister(
+			m.fsyncDuration,
+			m.pageFlushes,
+			m.pageCompletions,
+			m.truncateFail,
+			m.truncateTotal,
+			m.currentSegment,
+			m.writesFailed,
+		)
+	}
+
+	return m
+}
+
+// New returns a new WAL over the given directory.
+func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) {
+	return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
+}
+
+// NewSize returns a new WAL over the given directory.
+// New segments are created with the specified size.
+func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) {
+	if segmentSize%pageSize != 0 {
+		return nil, errors.New("invalid segment size")
+	}
+	if err := os.MkdirAll(dir, 0777); err != nil {
+		return nil, errors.Wrap(err, "create dir")
+	}
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	w := &WAL{
+		dir:         dir,
+		logger:      logger,
+		segmentSize: segmentSize,
+		page:        &page{},
+		actorc:      make(chan func(), 100),
+		stopc:       make(chan chan struct{}),
+		compress:    compress,
+	}
+	w.metrics = newWALMetrics(reg)
+
+	_, last, err := w.Segments()
+	if err != nil {
+		return nil, errors.Wrap(err, "get segment range")
+	}
+
+	// Index of the Segment we want to open and write to.
+	writeSegmentIndex := 0
+	// If some segments already exist create one with a higher index than the last segment.
+	if last != -1 {
+		writeSegmentIndex = last + 1
+	}
+
+	segment, err := CreateSegment(w.dir, writeSegmentIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := w.setSegment(segment); err != nil {
+		return nil, err
+	}
+
+	go w.run()
+
+	return w, nil
+}
+
+// Open an existing WAL.
+func Open(logger log.Logger, dir string) (*WAL, error) {
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	w := &WAL{
+		dir:    dir,
+		logger: logger,
+	}
+
+	return w, nil
+}
+
+// CompressionEnabled returns if compression is enabled on this WAL.
+func (w *WAL) CompressionEnabled() bool {
+	return w.compress
+}
+
+// Dir returns the directory of the WAL.
+func (w *WAL) Dir() string {
+	return w.dir
+}
+
+func (w *WAL) run() {
+Loop:
+	for {
+		select {
+		case f := <-w.actorc:
+			f()
+		case donec := <-w.stopc:
+			close(w.actorc)
+			defer close(donec)
+			break Loop
+		}
+	}
+	// Drain and process any remaining functions.
+	for f := range w.actorc {
+		f()
+	}
+}
+
+// Repair attempts to repair the WAL based on the error.
+// It discards all data after the corruption.
+func (w *WAL) Repair(origErr error) error {
+	// We could probably have a mode that only discards torn records right around
+	// the corruption to preserve as data much as possible.
+	// But that's not generally applicable if the records have any kind of causality.
+	// Maybe as an extra mode in the future if mid-WAL corruptions become
+	// a frequent concern.
+	err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
+
+	cerr, ok := err.(*CorruptionErr)
+	if !ok {
+		return errors.Wrap(origErr, "cannot handle error")
+	}
+	if cerr.Segment < 0 {
+		return errors.New("corruption error does not specify position")
+	}
+	level.Warn(w.logger).Log("msg", "Starting corruption repair",
+		"segment", cerr.Segment, "offset", cerr.Offset)
+
+	// All segments behind the corruption can no longer be used.
+	segs, err := listSegments(w.dir)
+	if err != nil {
+		return errors.Wrap(err, "list segments")
+	}
+	level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment)
+
+	for _, s := range segs {
+		if w.segment.i == s.index {
+			// The active segment needs to be removed,
+			// close it first (Windows!). Can be closed safely
+			// as we set the current segment to repaired file
+			// below.
+			if err := w.segment.Close(); err != nil {
+				return errors.Wrap(err, "close active segment")
+			}
+		}
+		if s.index <= cerr.Segment {
+			continue
+		}
+		if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
+			return errors.Wrapf(err, "delete segment:%v", s.index)
+		}
+	}
+	// Regardless of the corruption offset, no record reaches into the previous segment.
+	// So we can safely repair the WAL by removing the segment and re-inserting all
+	// its records up to the corruption.
+	level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment)
+
+	fn := SegmentName(w.dir, cerr.Segment)
+	tmpfn := fn + ".repair"
+
+	if err := fileutil.Rename(fn, tmpfn); err != nil {
+		return err
+	}
+	// Create a clean segment and make it the active one.
+	s, err := CreateSegment(w.dir, cerr.Segment)
+	if err != nil {
+		return err
+	}
+	if err := w.setSegment(s); err != nil {
+		return err
+	}
+
+	f, err := os.Open(tmpfn)
+	if err != nil {
+		return errors.Wrap(err, "open segment")
+	}
+	defer f.Close()
+
+	r := NewReader(bufio.NewReader(f))
+
+	for r.Next() {
+		// Add records only up to the where the error was.
+		if r.Offset() >= cerr.Offset {
+			break
+		}
+		if err := w.Log(r.Record()); err != nil {
+			return errors.Wrap(err, "insert record")
+		}
+	}
+	// We expect an error here from r.Err(), so nothing to handle.
+
+	// We need to pad to the end of the last page in the repaired segment
+	if err := w.flushPage(true); err != nil {
+		return errors.Wrap(err, "flush page in repair")
+	}
+
+	// We explicitly close even when there is a defer for Windows to be
+	// able to delete it. The defer is in place to close it in-case there
+	// are errors above.
+	if err := f.Close(); err != nil {
+		return errors.Wrap(err, "close corrupted file")
+	}
+	if err := os.Remove(tmpfn); err != nil {
+		return errors.Wrap(err, "delete corrupted segment")
+	}
+
+	// Explicitly close the segment we just repaired to avoid issues with Windows.
+	s.Close()
+
+	// We always want to start writing to a new Segment rather than an existing
+	// Segment, which is handled by NewSize, but earlier in Repair we're deleting
+	// all segments that come after the corrupted Segment. Recreate a new Segment here.
+	s, err = CreateSegment(w.dir, cerr.Segment+1)
+	if err != nil {
+		return err
+	}
+	if err := w.setSegment(s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// SegmentName builds a segment name for the directory.
+func SegmentName(dir string, i int) string {
+	return filepath.Join(dir, fmt.Sprintf("%08d", i))
+}
+
+// NextSegment creates the next segment and closes the previous one.
+func (w *WAL) NextSegment() error {
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+	return w.nextSegment()
+}
+
+// nextSegment creates the next segment and closes the previous one.
+func (w *WAL) nextSegment() error {
+	// Only flush the current page if it actually holds data.
+	if w.page.alloc > 0 {
+		if err := w.flushPage(true); err != nil {
+			return err
+		}
+	}
+	next, err := CreateSegment(w.dir, w.segment.Index()+1)
+	if err != nil {
+		return errors.Wrap(err, "create new segment file")
+	}
+	prev := w.segment
+	if err := w.setSegment(next); err != nil {
+		return err
+	}
+
+	// Don't block further writes by fsyncing the last segment.
+	w.actorc <- func() {
+		if err := w.fsync(prev); err != nil {
+			level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
+		}
+		if err := prev.Close(); err != nil {
+			level.Error(w.logger).Log("msg", "close previous segment", "err", err)
+		}
+	}
+	return nil
+}
+
+func (w *WAL) setSegment(segment *Segment) error {
+	w.segment = segment
+
+	// Correctly initialize donePages.
+	stat, err := segment.Stat()
+	if err != nil {
+		return err
+	}
+	w.donePages = int(stat.Size() / pageSize)
+	w.metrics.currentSegment.Set(float64(segment.Index()))
+	return nil
+}
+
+// flushPage writes the new contents of the page to disk. If no more records will fit into
+// the page, the remaining bytes will be set to zero and a new page will be started.
+// If clear is true, this is enforced regardless of how many bytes are left in the page.
+func (w *WAL) flushPage(clear bool) error {
+	w.metrics.pageFlushes.Inc()
+
+	p := w.page
+	clear = clear || p.full()
+
+	// No more data will fit into the page or an implicit clear.
+	// Enqueue and clear it.
+	if clear {
+		p.alloc = pageSize // Write till end of page.
+	}
+	n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
+	if err != nil {
+		return err
+	}
+	p.flushed += n
+
+	// We flushed an entire page, prepare a new one.
+	if clear {
+		p.reset()
+		w.donePages++
+		w.metrics.pageCompletions.Inc()
+	}
+	return nil
+}
+
+// First Byte of header format:
+// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ]
+const (
+	snappyMask  = 1 << 3
+	recTypeMask = snappyMask - 1
+)
+
+type recType uint8
+
+const (
+	recPageTerm recType = 0 // Rest of page is empty.
+	recFull     recType = 1 // Full record.
+	recFirst    recType = 2 // First fragment of a record.
+	recMiddle   recType = 3 // Middle fragments of a record.
+	recLast     recType = 4 // Final fragment of a record.
+)
+
+func recTypeFromHeader(header byte) recType {
+	return recType(header & recTypeMask)
+}
+
+func (t recType) String() string {
+	switch t {
+	case recPageTerm:
+		return "zero"
+	case recFull:
+		return "full"
+	case recFirst:
+		return "first"
+	case recMiddle:
+		return "middle"
+	case recLast:
+		return "last"
+	default:
+		return ""
+	}
+}
+
+func (w *WAL) pagesPerSegment() int {
+	return w.segmentSize / pageSize
+}
+
+// Log writes the records into the log.
+// Multiple records can be passed at once to reduce writes and increase throughput.
+func (w *WAL) Log(recs ...[]byte) error {
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+	// Callers could just implement their own list record format but adding
+	// a bit of extra logic here frees them from that overhead.
+	for i, r := range recs {
+		if err := w.log(r, i == len(recs)-1); err != nil {
+			w.metrics.writesFailed.Inc()
+			return err
+		}
+	}
+	return nil
+}
+
+// log writes rec to the log and forces a flush of the current page if:
+// - the final record of a batch
+// - the record is bigger than the page size
+// - the current page is full.
+func (w *WAL) log(rec []byte, final bool) error {
+	// When the last page flush failed the page will remain full.
+	// When the page is full, need to flush it before trying to add more records to it.
+	if w.page.full() {
+		if err := w.flushPage(true); err != nil {
+			return err
+		}
+	}
+	// If the record is too big to fit within the active page in the current
+	// segment, terminate the active segment and advance to the next one.
+	// This ensures that records do not cross segment boundaries.
+	left := w.page.remaining() - recordHeaderSize                                   // Free space in the active page.
+	left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
+
+	if len(rec) > left {
+		if err := w.nextSegment(); err != nil {
+			return err
+		}
+	}
+
+	compressed := false
+	if w.compress && len(rec) > 0 {
+		// The snappy library uses `len` to calculate if we need a new buffer.
+		// In order to allocate as few buffers as possible make the length
+		// equal to the capacity.
+		w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)]
+		w.snappyBuf = snappy.Encode(w.snappyBuf, rec)
+		if len(w.snappyBuf) < len(rec) {
+			rec = w.snappyBuf
+			compressed = true
+		}
+	}
+
+	// Populate as many pages as necessary to fit the record.
+	// Be careful to always do one pass to ensure we write zero-length records.
+	for i := 0; i == 0 || len(rec) > 0; i++ {
+		p := w.page
+
+		// Find how much of the record we can fit into the page.
+		var (
+			l    = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
+			part = rec[:l]
+			buf  = p.buf[p.alloc:]
+			typ  recType
+		)
+
+		switch {
+		case i == 0 && len(part) == len(rec):
+			typ = recFull
+		case len(part) == len(rec):
+			typ = recLast
+		case i == 0:
+			typ = recFirst
+		default:
+			typ = recMiddle
+		}
+		if compressed {
+			typ |= snappyMask
+		}
+
+		buf[0] = byte(typ)
+		crc := crc32.Checksum(part, castagnoliTable)
+		binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
+		binary.BigEndian.PutUint32(buf[3:], crc)
+
+		copy(buf[recordHeaderSize:], part)
+		p.alloc += len(part) + recordHeaderSize
+
+		if w.page.full() {
+			if err := w.flushPage(true); err != nil {
+				return err
+			}
+		}
+		rec = rec[l:]
+	}
+
+	// If it's the final record of the batch and the page is not empty, flush it.
+	if final && w.page.alloc > 0 {
+		if err := w.flushPage(false); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Segments returns the range [first, n] of currently existing segments.
+// If no segments are found, first and n are -1.
+func (w *WAL) Segments() (first, last int, err error) {
+	refs, err := listSegments(w.dir)
+	if err != nil {
+		return 0, 0, err
+	}
+	if len(refs) == 0 {
+		return -1, -1, nil
+	}
+	return refs[0].index, refs[len(refs)-1].index, nil
+}
+
+// Truncate drops all segments before i.
+func (w *WAL) Truncate(i int) (err error) {
+	w.metrics.truncateTotal.Inc()
+	defer func() {
+		if err != nil {
+			w.metrics.truncateFail.Inc()
+		}
+	}()
+	refs, err := listSegments(w.dir)
+	if err != nil {
+		return err
+	}
+	for _, r := range refs {
+		if r.index >= i {
+			break
+		}
+		if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (w *WAL) fsync(f *Segment) error {
+	start := time.Now()
+	err := f.File.Sync()
+	w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
+	return err
+}
+
+// Close flushes all writes and closes active segment.
+func (w *WAL) Close() (err error) {
+	w.mtx.Lock()
+	defer w.mtx.Unlock()
+
+	if w.closed {
+		return errors.New("wal already closed")
+	}
+
+	if w.segment == nil {
+		w.closed = true
+		return nil
+	}
+
+	// Flush the last page and zero out all its remaining size.
+	// We must not flush an empty page as it would falsely signal
+	// the segment is done if we start writing to it again after opening.
+	if w.page.alloc > 0 {
+		if err := w.flushPage(true); err != nil {
+			return err
+		}
+	}
+
+	donec := make(chan struct{})
+	w.stopc <- donec
+	<-donec
+
+	if err = w.fsync(w.segment); err != nil {
+		level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
+	}
+	if err := w.segment.Close(); err != nil {
+		level.Error(w.logger).Log("msg", "close previous segment", "err", err)
+	}
+	w.closed = true
+	return nil
+}
+
+type segmentRef struct {
+	name  string
+	index int
+}
+
+func listSegments(dir string) (refs []segmentRef, err error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+	for _, f := range files {
+		fn := f.Name()
+		k, err := strconv.Atoi(fn)
+		if err != nil {
+			continue
+		}
+		refs = append(refs, segmentRef{name: fn, index: k})
+	}
+	sort.Slice(refs, func(i, j int) bool {
+		return refs[i].index < refs[j].index
+	})
+	for i := 0; i < len(refs)-1; i++ {
+		if refs[i].index+1 != refs[i+1].index {
+			return nil, errors.New("segments are not sequential")
+		}
+	}
+	return refs, nil
+}
+
+// SegmentRange groups segments by the directory and the first and last index it includes.
+type SegmentRange struct {
+	Dir         string
+	First, Last int
+}
+
+// NewSegmentsReader returns a new reader over all segments in the directory.
+func NewSegmentsReader(dir string) (io.ReadCloser, error) {
+	return NewSegmentsRangeReader(SegmentRange{dir, -1, -1})
+}
+
+// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
+// If first or last are -1, the range is open on the respective end.
+func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
+	var segs []*Segment
+
+	for _, sgmRange := range sr {
+		refs, err := listSegments(sgmRange.Dir)
+		if err != nil {
+			return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
+		}
+
+		for _, r := range refs {
+			if sgmRange.First >= 0 && r.index < sgmRange.First {
+				continue
+			}
+			if sgmRange.Last >= 0 && r.index > sgmRange.Last {
+				break
+			}
+			s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
+			if err != nil {
+				return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
+			}
+			segs = append(segs, s)
+		}
+	}
+	return NewSegmentBufReader(segs...), nil
+}
+
+// segmentBufReader is a buffered reader that reads in multiples of pages.
+// The main purpose is that we are able to track segment and offset for
+// corruption reporting.  We have to be careful not to increment curr too
+// early, as it is used by Reader.Err() to tell Repair which segment is corrupt.
+// As such we pad the end of non-page align segments with zeros.
+type segmentBufReader struct {
+	buf  *bufio.Reader
+	segs []*Segment
+	cur  int // Index into segs.
+	off  int // Offset of read data into current segment.
+}
+
+// nolint:golint // TODO: Consider exporting segmentBufReader
+func NewSegmentBufReader(segs ...*Segment) *segmentBufReader {
+	return &segmentBufReader{
+		buf:  bufio.NewReaderSize(segs[0], 16*pageSize),
+		segs: segs,
+	}
+}
+
+func (r *segmentBufReader) Close() (err error) {
+	for _, s := range r.segs {
+		if e := s.Close(); e != nil {
+			err = e
+		}
+	}
+	return err
+}
+
+// Read implements io.Reader.
+func (r *segmentBufReader) Read(b []byte) (n int, err error) {
+	n, err = r.buf.Read(b)
+	r.off += n
+
+	// If we succeeded, or hit a non-EOF, we can stop.
+	if err == nil || err != io.EOF {
+		return n, err
+	}
+
+	// We hit EOF; fake out zero padding at the end of short segments, so we
+	// don't increment curr too early and report the wrong segment as corrupt.
+	if r.off%pageSize != 0 {
+		i := 0
+		for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ {
+			b[n+i] = 0
+		}
+
+		// Return early, even if we didn't fill b.
+		r.off += i
+		return n + i, nil
+	}
+
+	// There is no more deta left in the curr segment and there are no more
+	// segments left.  Return EOF.
+	if r.cur+1 >= len(r.segs) {
+		return n, io.EOF
+	}
+
+	// Move to next segment.
+	r.cur++
+	r.off = 0
+	r.buf.Reset(r.segs[r.cur])
+	return n, nil
+}
+
+// Computing size of the WAL.
+// We do this by adding the sizes of all the files under the WAL dir.
+func (w *WAL) Size() (int64, error) {
+	return fileutil.DirSize(w.Dir())
+}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..abd7bdc77e7d2a955bba717ecf0a94c16edc6daa
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go
@@ -0,0 +1,614 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/tsdb/record"
+)
+
+const (
+	readPeriod         = 10 * time.Millisecond
+	checkpointPeriod   = 5 * time.Second
+	segmentCheckPeriod = 100 * time.Millisecond
+	consumer           = "consumer"
+)
+
+// WriteTo is an interface used by the Watcher to send the samples it's read
+// from the WAL on to somewhere else. Functions will be called concurrently
+// and it is left to the implementer to make sure they are safe.
+type WriteTo interface {
+	Append([]record.RefSample) bool
+	StoreSeries([]record.RefSeries, int)
+	// SeriesReset is called after reading a checkpoint to allow the deletion
+	// of all series created in a segment lower than the argument.
+	SeriesReset(int)
+}
+
+type WatcherMetrics struct {
+	recordsRead           *prometheus.CounterVec
+	recordDecodeFails     *prometheus.CounterVec
+	samplesSentPreTailing *prometheus.CounterVec
+	currentSegment        *prometheus.GaugeVec
+}
+
+// Watcher watches the TSDB WAL for a given WriteTo.
+type Watcher struct {
+	name           string
+	writer         WriteTo
+	logger         log.Logger
+	walDir         string
+	lastCheckpoint string
+	metrics        *WatcherMetrics
+	readerMetrics  *LiveReaderMetrics
+
+	startTime      time.Time
+	startTimestamp int64 // the start time as a Prometheus timestamp
+	sendSamples    bool
+
+	recordsReadMetric       *prometheus.CounterVec
+	recordDecodeFailsMetric prometheus.Counter
+	samplesSentPreTailing   prometheus.Counter
+	currentSegmentMetric    prometheus.Gauge
+
+	quit chan struct{}
+	done chan struct{}
+
+	// For testing, stop when we hit this segment.
+	MaxSegment int
+}
+
+func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
+	m := &WatcherMetrics{
+		recordsRead: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: "prometheus",
+				Subsystem: "wal_watcher",
+				Name:      "records_read_total",
+				Help:      "Number of records read by the WAL watcher from the WAL.",
+			},
+			[]string{consumer, "type"},
+		),
+		recordDecodeFails: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: "prometheus",
+				Subsystem: "wal_watcher",
+				Name:      "record_decode_failures_total",
+				Help:      "Number of records read by the WAL watcher that resulted in an error when decoding.",
+			},
+			[]string{consumer},
+		),
+		samplesSentPreTailing: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: "prometheus",
+				Subsystem: "wal_watcher",
+				Name:      "samples_sent_pre_tailing_total",
+				Help:      "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL.",
+			},
+			[]string{consumer},
+		),
+		currentSegment: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: "prometheus",
+				Subsystem: "wal_watcher",
+				Name:      "current_segment",
+				Help:      "Current segment the WAL watcher is reading records from.",
+			},
+			[]string{consumer},
+		),
+	}
+
+	if reg != nil {
+		reg.MustRegister(m.recordsRead)
+		reg.MustRegister(m.recordDecodeFails)
+		reg.MustRegister(m.samplesSentPreTailing)
+		reg.MustRegister(m.currentSegment)
+	}
+
+	return m
+}
+
+// NewWatcher creates a new WAL watcher for a given WriteTo.
+func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string) *Watcher {
+	if logger == nil {
+		logger = log.NewNopLogger()
+	}
+	return &Watcher{
+		logger:        logger,
+		writer:        writer,
+		metrics:       metrics,
+		readerMetrics: readerMetrics,
+		walDir:        path.Join(walDir, "wal"),
+		name:          name,
+		quit:          make(chan struct{}),
+		done:          make(chan struct{}),
+
+		MaxSegment: -1,
+	}
+}
+
+func (w *Watcher) setMetrics() {
+	// Setup the WAL Watchers metrics. We do this here rather than in the
+	// constructor because of the ordering of creating Queue Managers's,
+	// stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig.
+	if w.metrics != nil {
+		w.recordsReadMetric = w.metrics.recordsRead.MustCurryWith(prometheus.Labels{consumer: w.name})
+		w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name)
+		w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
+		w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
+	}
+}
+
+// Start the Watcher.
+func (w *Watcher) Start() {
+	w.setMetrics()
+	level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name)
+
+	go w.loop()
+}
+
+// Stop the Watcher.
+func (w *Watcher) Stop() {
+	close(w.quit)
+	<-w.done
+
+	// Records read metric has series and samples.
+	if w.metrics != nil {
+		w.metrics.recordsRead.DeleteLabelValues(w.name, "series")
+		w.metrics.recordsRead.DeleteLabelValues(w.name, "samples")
+		w.metrics.recordDecodeFails.DeleteLabelValues(w.name)
+		w.metrics.samplesSentPreTailing.DeleteLabelValues(w.name)
+		w.metrics.currentSegment.DeleteLabelValues(w.name)
+	}
+
+	level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name)
+}
+
+func (w *Watcher) loop() {
+	defer close(w.done)
+
+	// We may encounter failures processing the WAL; we should wait and retry.
+	for !isClosed(w.quit) {
+		w.SetStartTime(time.Now())
+		if err := w.Run(); err != nil {
+			level.Error(w.logger).Log("msg", "error tailing WAL", "err", err)
+		}
+
+		select {
+		case <-w.quit:
+			return
+		case <-time.After(5 * time.Second):
+		}
+	}
+}
+
+// Run the watcher, which will tail the WAL until the quit channel is closed
+// or an error case is hit.
+func (w *Watcher) Run() error {
+	_, lastSegment, err := w.firstAndLast()
+	if err != nil {
+		return errors.Wrap(err, "wal.Segments")
+	}
+
+	// We want to ensure this is false across iterations since
+	// Run will be called again if there was a failure to read the WAL.
+	w.sendSamples = false
+
+	level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name)
+
+	// Backfill from the checkpoint first if it exists.
+	lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir)
+	if err != nil && err != record.ErrNotFound {
+		return errors.Wrap(err, "tsdb.LastCheckpoint")
+	}
+
+	if err == nil {
+		if err = w.readCheckpoint(lastCheckpoint); err != nil {
+			return errors.Wrap(err, "readCheckpoint")
+		}
+	}
+	w.lastCheckpoint = lastCheckpoint
+
+	currentSegment, err := w.findSegmentForIndex(checkpointIndex)
+	if err != nil {
+		return err
+	}
+
+	level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
+	for !isClosed(w.quit) {
+		w.currentSegmentMetric.Set(float64(currentSegment))
+		level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
+
+		// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
+		// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
+		if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil {
+			return err
+		}
+
+		// For testing: stop when you hit a specific segment.
+		if currentSegment == w.MaxSegment {
+			return nil
+		}
+
+		currentSegment++
+	}
+
+	return nil
+}
+
+// findSegmentForIndex finds the first segment greater than or equal to index.
+func (w *Watcher) findSegmentForIndex(index int) (int, error) {
+	refs, err := w.segments(w.walDir)
+	if err != nil {
+		return -1, err
+	}
+
+	for _, r := range refs {
+		if r >= index {
+			return r, nil
+		}
+	}
+
+	return -1, errors.New("failed to find segment for index")
+}
+
+func (w *Watcher) firstAndLast() (int, int, error) {
+	refs, err := w.segments(w.walDir)
+	if err != nil {
+		return -1, -1, err
+	}
+
+	if len(refs) == 0 {
+		return -1, -1, nil
+	}
+	return refs[0], refs[len(refs)-1], nil
+}
+
+// Copied from tsdb/wal/wal.go so we do not have to open a WAL.
+// Plan is to move WAL watcher to TSDB and dedupe these implementations.
+func (w *Watcher) segments(dir string) ([]int, error) {
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return nil, err
+	}
+
+	var refs []int
+	var last int
+	for _, f := range files {
+		k, err := strconv.Atoi(f.Name())
+		if err != nil {
+			continue
+		}
+		if len(refs) > 0 && k > last+1 {
+			return nil, errors.New("segments are not sequential")
+		}
+		refs = append(refs, k)
+		last = k
+	}
+	sort.Ints(refs)
+
+	return refs, nil
+}
+
+// Use tail true to indicate that the reader is currently on a segment that is
+// actively being written to. If false, assume it's a full segment and we're
+// replaying it on start to cache the series records.
+func (w *Watcher) watch(segmentNum int, tail bool) error {
+	segment, err := OpenReadSegment(SegmentName(w.walDir, segmentNum))
+	if err != nil {
+		return err
+	}
+	defer segment.Close()
+
+	reader := NewLiveReader(w.logger, w.readerMetrics, segment)
+
+	readTicker := time.NewTicker(readPeriod)
+	defer readTicker.Stop()
+
+	checkpointTicker := time.NewTicker(checkpointPeriod)
+	defer checkpointTicker.Stop()
+
+	segmentTicker := time.NewTicker(segmentCheckPeriod)
+	defer segmentTicker.Stop()
+
+	// If we're replaying the segment we need to know the size of the file to know
+	// when to return from watch and move on to the next segment.
+	size := int64(math.MaxInt64)
+	if !tail {
+		segmentTicker.Stop()
+		checkpointTicker.Stop()
+		var err error
+		size, err = getSegmentSize(w.walDir, segmentNum)
+		if err != nil {
+			return errors.Wrap(err, "getSegmentSize")
+		}
+	}
+
+	gcSem := make(chan struct{}, 1)
+	for {
+		select {
+		case <-w.quit:
+			return nil
+
+		case <-checkpointTicker.C:
+			// Periodically check if there is a new checkpoint so we can garbage
+			// collect labels. As this is considered an optimisation, we ignore
+			// errors during checkpoint processing. Doing the process asynchronously
+			// allows the current WAL segment to be processed while reading the
+			// checkpoint.
+			select {
+			case gcSem <- struct{}{}:
+				go func() {
+					defer func() {
+						<-gcSem
+					}()
+					if err := w.garbageCollectSeries(segmentNum); err != nil {
+						level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err)
+					}
+				}()
+			default:
+				// Currently doing a garbage collect, try again later.
+			}
+
+		case <-segmentTicker.C:
+			_, last, err := w.firstAndLast()
+			if err != nil {
+				return errors.Wrap(err, "segments")
+			}
+
+			// Check if new segments exists.
+			if last <= segmentNum {
+				continue
+			}
+
+			err = w.readSegment(reader, segmentNum, tail)
+
+			// Ignore errors reading to end of segment whilst replaying the WAL.
+			if !tail {
+				if err != nil && err != io.EOF {
+					level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
+				} else if reader.Offset() != size {
+					level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
+				}
+				return nil
+			}
+
+			// Otherwise, when we are tailing, non-EOFs are fatal.
+			if err != io.EOF {
+				return err
+			}
+
+			return nil
+
+		case <-readTicker.C:
+			err = w.readSegment(reader, segmentNum, tail)
+
+			// Ignore all errors reading to end of segment whilst replaying the WAL.
+			if !tail {
+				if err != nil && err != io.EOF {
+					level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
+				} else if reader.Offset() != size {
+					level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
+				}
+				return nil
+			}
+
+			// Otherwise, when we are tailing, non-EOFs are fatal.
+			if err != io.EOF {
+				return err
+			}
+		}
+	}
+}
+
+func (w *Watcher) garbageCollectSeries(segmentNum int) error {
+	dir, _, err := LastCheckpoint(w.walDir)
+	if err != nil && err != record.ErrNotFound {
+		return errors.Wrap(err, "tsdb.LastCheckpoint")
+	}
+
+	if dir == "" || dir == w.lastCheckpoint {
+		return nil
+	}
+	w.lastCheckpoint = dir
+
+	index, err := checkpointNum(dir)
+	if err != nil {
+		return errors.Wrap(err, "error parsing checkpoint filename")
+	}
+
+	if index >= segmentNum {
+		level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir)
+		return nil
+	}
+
+	level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum)
+
+	if err = w.readCheckpoint(dir); err != nil {
+		return errors.Wrap(err, "readCheckpoint")
+	}
+
+	// Clear series with a checkpoint or segment index # lower than the checkpoint we just read.
+	w.writer.SeriesReset(index)
+	return nil
+}
+
+func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
+	var (
+		dec     record.Decoder
+		series  []record.RefSeries
+		samples []record.RefSample
+		send    []record.RefSample
+	)
+	for r.Next() && !isClosed(w.quit) {
+		rec := r.Record()
+		w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc()
+
+		switch dec.Type(rec) {
+		case record.Series:
+			series, err := dec.Series(rec, series[:0])
+			if err != nil {
+				w.recordDecodeFailsMetric.Inc()
+				return err
+			}
+			w.writer.StoreSeries(series, segmentNum)
+
+		case record.Samples:
+			// If we're not tailing a segment we can ignore any samples records we see.
+			// This speeds up replay of the WAL by > 10x.
+			if !tail {
+				break
+			}
+			samples, err := dec.Samples(rec, samples[:0])
+			if err != nil {
+				w.recordDecodeFailsMetric.Inc()
+				return err
+			}
+			for _, s := range samples {
+				if s.T > w.startTimestamp {
+					if !w.sendSamples {
+						w.sendSamples = true
+						duration := time.Since(w.startTime)
+						level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
+					}
+					send = append(send, s)
+				}
+			}
+			if len(send) > 0 {
+				// Blocks  until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
+				w.writer.Append(send)
+				send = send[:0]
+			}
+
+		case record.Tombstones:
+			// noop
+		case record.Invalid:
+			return errors.New("invalid record")
+
+		default:
+			w.recordDecodeFailsMetric.Inc()
+			return errors.New("unknown TSDB record type")
+		}
+	}
+	return r.Err()
+}
+
+func (w *Watcher) SetStartTime(t time.Time) {
+	w.startTime = t
+	w.startTimestamp = timestamp.FromTime(t)
+}
+
+func recordType(rt record.Type) string {
+	switch rt {
+	case record.Invalid:
+		return "invalid"
+	case record.Series:
+		return "series"
+	case record.Samples:
+		return "samples"
+	case record.Tombstones:
+		return "tombstones"
+	default:
+		return "unknown"
+	}
+}
+
+// Read all the series records from a Checkpoint directory.
+func (w *Watcher) readCheckpoint(checkpointDir string) error {
+	level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir)
+	index, err := checkpointNum(checkpointDir)
+	if err != nil {
+		return errors.Wrap(err, "checkpointNum")
+	}
+
+	// Ensure we read the whole contents of every segment in the checkpoint dir.
+	segs, err := w.segments(checkpointDir)
+	if err != nil {
+		return errors.Wrap(err, "Unable to get segments checkpoint dir")
+	}
+	for _, seg := range segs {
+		size, err := getSegmentSize(checkpointDir, seg)
+		if err != nil {
+			return errors.Wrap(err, "getSegmentSize")
+		}
+
+		sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
+		if err != nil {
+			return errors.Wrap(err, "unable to open segment")
+		}
+		defer sr.Close()
+
+		r := NewLiveReader(w.logger, w.readerMetrics, sr)
+		if err := w.readSegment(r, index, false); err != io.EOF && err != nil {
+			return errors.Wrap(err, "readSegment")
+		}
+
+		if r.Offset() != size {
+			return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
+		}
+	}
+
+	level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir)
+	return nil
+}
+
+func checkpointNum(dir string) (int, error) {
+	// Checkpoint dir names are in the format checkpoint.000001
+	// dir may contain a hidden directory, so only check the base directory
+	chunks := strings.Split(path.Base(dir), ".")
+	if len(chunks) != 2 {
+		return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
+	}
+
+	result, err := strconv.Atoi(chunks[1])
+	if err != nil {
+		return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
+	}
+
+	return result, nil
+}
+
+// Get size of segment.
+func getSegmentSize(dir string, index int) (int64, error) {
+	i := int64(-1)
+	fi, err := os.Stat(SegmentName(dir, index))
+	if err == nil {
+		i = fi.Size()
+	}
+	return i, err
+}
+
+func isClosed(c chan struct{}) bool {
+	select {
+	case <-c:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
new file mode 100644
index 0000000000000000000000000000000000000000..3fea3921fa661569201f54f582ad7549e6daa649
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
@@ -0,0 +1,157 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+	"context"
+
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// QueryTiming identifies the code area or functionality in which time is spent
+// during a query.
+type QueryTiming int
+
+// Query timings.
+const (
+	EvalTotalTime QueryTiming = iota
+	ResultSortTime
+	QueryPreparationTime
+	InnerEvalTime
+	ExecQueueTime
+	ExecTotalTime
+)
+
+// Return a string representation of a QueryTiming identifier.
+func (s QueryTiming) String() string {
+	switch s {
+	case EvalTotalTime:
+		return "Eval total time"
+	case ResultSortTime:
+		return "Result sorting time"
+	case QueryPreparationTime:
+		return "Query preparation time"
+	case InnerEvalTime:
+		return "Inner eval time"
+	case ExecQueueTime:
+		return "Exec queue wait time"
+	case ExecTotalTime:
+		return "Exec total time"
+	default:
+		return "Unknown query timing"
+	}
+}
+
+// SpanOperation returns a string representation of a QueryTiming span operation.
+func (s QueryTiming) SpanOperation() string {
+	switch s {
+	case EvalTotalTime:
+		return "promqlEval"
+	case ResultSortTime:
+		return "promqlSort"
+	case QueryPreparationTime:
+		return "promqlPrepare"
+	case InnerEvalTime:
+		return "promqlInnerEval"
+	case ExecQueueTime:
+		return "promqlExecQueue"
+	case ExecTotalTime:
+		return "promqlExec"
+	default:
+		return "Unknown query timing"
+	}
+}
+
+// queryTimings with all query timers mapped to durations.
+type queryTimings struct {
+	EvalTotalTime        float64 `json:"evalTotalTime"`
+	ResultSortTime       float64 `json:"resultSortTime"`
+	QueryPreparationTime float64 `json:"queryPreparationTime"`
+	InnerEvalTime        float64 `json:"innerEvalTime"`
+	ExecQueueTime        float64 `json:"execQueueTime"`
+	ExecTotalTime        float64 `json:"execTotalTime"`
+}
+
+// QueryStats currently only holding query timings.
+type QueryStats struct {
+	Timings queryTimings `json:"timings,omitempty"`
+}
+
+// NewQueryStats makes a QueryStats struct with all QueryTimings found in the
+// given TimerGroup.
+func NewQueryStats(tg *QueryTimers) *QueryStats {
+	var qt queryTimings
+
+	for s, timer := range tg.TimerGroup.timers {
+		switch s {
+		case EvalTotalTime:
+			qt.EvalTotalTime = timer.Duration()
+		case ResultSortTime:
+			qt.ResultSortTime = timer.Duration()
+		case QueryPreparationTime:
+			qt.QueryPreparationTime = timer.Duration()
+		case InnerEvalTime:
+			qt.InnerEvalTime = timer.Duration()
+		case ExecQueueTime:
+			qt.ExecQueueTime = timer.Duration()
+		case ExecTotalTime:
+			qt.ExecTotalTime = timer.Duration()
+		}
+	}
+
+	qs := QueryStats{Timings: qt}
+	return &qs
+}
+
+// SpanTimer unifies tracing and timing, to reduce repetition.
+type SpanTimer struct {
+	timer     *Timer
+	observers []prometheus.Observer
+
+	span opentracing.Span
+}
+
+func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
+	span, ctx := opentracing.StartSpanFromContext(ctx, operation)
+	timer.Start()
+
+	return &SpanTimer{
+		timer:     timer,
+		observers: observers,
+
+		span: span,
+	}, ctx
+}
+
+func (s *SpanTimer) Finish() {
+	s.timer.Stop()
+	s.span.Finish()
+
+	for _, obs := range s.observers {
+		obs.Observe(s.timer.ElapsedTime().Seconds())
+	}
+}
+
+type QueryTimers struct {
+	*TimerGroup
+}
+
+func NewQueryTimers() *QueryTimers {
+	return &QueryTimers{NewTimerGroup()}
+}
+
+func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
+	return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...)
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/stats/timer.go b/vendor/github.com/prometheus/prometheus/util/stats/timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..e47162680e59aa41b96c17e40ac8d54812ac331d
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/stats/timer.go
@@ -0,0 +1,113 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"time"
+)
+
+// A Timer that can be started and stopped and accumulates the total time it
+// was running (the time between Start() and Stop()).
+type Timer struct {
+	name     fmt.Stringer
+	created  int
+	start    time.Time
+	duration time.Duration
+}
+
+// Start the timer.
+func (t *Timer) Start() *Timer {
+	t.start = time.Now()
+	return t
+}
+
+// Stop the timer.
+func (t *Timer) Stop() {
+	t.duration += time.Since(t.start)
+}
+
+// ElapsedTime returns the time that passed since starting the timer.
+func (t *Timer) ElapsedTime() time.Duration {
+	return time.Since(t.start)
+}
+
+// Duration returns the duration value of the timer in seconds.
+func (t *Timer) Duration() float64 {
+	return t.duration.Seconds()
+}
+
+// Return a string representation of the Timer.
+func (t *Timer) String() string {
+	return fmt.Sprintf("%s: %s", t.name, t.duration)
+}
+
+// A TimerGroup represents a group of timers relevant to a single query.
+type TimerGroup struct {
+	timers map[fmt.Stringer]*Timer
+}
+
+// NewTimerGroup constructs a new TimerGroup.
+func NewTimerGroup() *TimerGroup {
+	return &TimerGroup{timers: map[fmt.Stringer]*Timer{}}
+}
+
+// GetTimer gets (and creates, if necessary) the Timer for a given code section.
+func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer {
+	if timer, exists := t.timers[name]; exists {
+		return timer
+	}
+	timer := &Timer{
+		name:    name,
+		created: len(t.timers),
+	}
+	t.timers[name] = timer
+	return timer
+}
+
+// Timers is a slice of Timer pointers that implements Len and Swap from
+// sort.Interface.
+type Timers []*Timer
+
+type byCreationTimeSorter struct{ Timers }
+
+// Len implements sort.Interface.
+func (t Timers) Len() int {
+	return len(t)
+}
+
+// Swap implements sort.Interface.
+func (t Timers) Swap(i, j int) {
+	t[i], t[j] = t[j], t[i]
+}
+
+func (s byCreationTimeSorter) Less(i, j int) bool {
+	return s.Timers[i].created < s.Timers[j].created
+}
+
+// Return a string representation of a TimerGroup.
+func (t *TimerGroup) String() string {
+	timers := byCreationTimeSorter{}
+	for _, timer := range t.timers {
+		timers.Timers = append(timers.Timers, timer)
+	}
+	sort.Sort(timers)
+	result := &bytes.Buffer{}
+	for _, timer := range timers.Timers {
+		fmt.Fprintf(result, "%s\n", timer)
+	}
+	return result.String()
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..6fa90781e1b1652a2b49da8bee3c22c64989c402
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
@@ -0,0 +1,55 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package teststorage
+
+import (
+	"io/ioutil"
+	"os"
+	"time"
+
+	"github.com/prometheus/prometheus/tsdb"
+	"github.com/prometheus/prometheus/util/testutil"
+)
+
+// New returns a new TestStorage for testing purposes
+// that removes all associated files on closing.
+func New(t testutil.T) *TestStorage {
+	dir, err := ioutil.TempDir("", "test_storage")
+	if err != nil {
+		t.Fatalf("Opening test dir failed: %s", err)
+	}
+
+	// Tests just load data for a series sequentially. Thus we
+	// need a long appendable window.
+	opts := tsdb.DefaultOptions()
+	opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
+	opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
+	db, err := tsdb.Open(dir, nil, nil, opts)
+	if err != nil {
+		t.Fatalf("Opening test storage failed: %s", err)
+	}
+	return &TestStorage{DB: db, dir: dir}
+}
+
+type TestStorage struct {
+	*tsdb.DB
+	dir string
+}
+
+func (s TestStorage) Close() error {
+	if err := s.DB.Close(); err != nil {
+		return err
+	}
+	return os.RemoveAll(s.dir)
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf730421b421a5378041a01b931e6e9d64e417d9
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go
@@ -0,0 +1,42 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import "time"
+
+// A MockContext provides a simple stub implementation of a Context
+type MockContext struct {
+	Error  error
+	DoneCh chan struct{}
+}
+
+// Deadline always will return not set
+func (c *MockContext) Deadline() (deadline time.Time, ok bool) {
+	return time.Time{}, false
+}
+
+// Done returns a read channel for listening to the Done event
+func (c *MockContext) Done() <-chan struct{} {
+	return c.DoneCh
+}
+
+// Err returns the error, is nil if not set.
+func (c *MockContext) Err() error {
+	return c.Error
+}
+
+// Value ignores the Value and always returns nil
+func (c *MockContext) Value(key interface{}) interface{} {
+	return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f0f08fe8f34611ecf75c225547da139ab96687a
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go
@@ -0,0 +1,168 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+	"crypto/sha256"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"testing"
+)
+
+const (
+	// The base directory used for test emissions, which instructs the operating
+	// system to use the default temporary directory as the base or TMPDIR
+	// environment variable.
+	defaultDirectory = ""
+
+	// NilCloser is a no-op Closer.
+	NilCloser = nilCloser(true)
+
+	// The number of times that a TemporaryDirectory will retry its removal
+	temporaryDirectoryRemoveRetries = 2
+)
+
+type (
+	// Closer is the interface that wraps the Close method.
+	Closer interface {
+		// Close reaps the underlying directory and its children. The directory
+		// could be deleted by its users already.
+		Close()
+	}
+
+	nilCloser bool
+
+	// TemporaryDirectory models a closeable path for transient POSIX disk
+	// activities.
+	TemporaryDirectory interface {
+		Closer
+
+		// Path returns the underlying path for access.
+		Path() string
+	}
+
+	// temporaryDirectory is kept as a private type due to private fields and
+	// their interactions.
+	temporaryDirectory struct {
+		path   string
+		tester T
+	}
+
+	callbackCloser struct {
+		fn func()
+	}
+
+	// T implements the needed methods of testing.TB so that we do not need
+	// to actually import testing (which has the side effect of adding all
+	// the test flags, which we do not want in non-test binaries even if
+	// they make use of these utilities for some reason).
+	T interface {
+		Fatal(args ...interface{})
+		Fatalf(format string, args ...interface{})
+	}
+)
+
+func (c nilCloser) Close() {
+}
+
+func (c callbackCloser) Close() {
+	c.fn()
+}
+
+// NewCallbackCloser returns a Closer that calls the provided function upon
+// closing.
+func NewCallbackCloser(fn func()) Closer {
+	return &callbackCloser{
+		fn: fn,
+	}
+}
+
+func (t temporaryDirectory) Close() {
+	retries := temporaryDirectoryRemoveRetries
+	err := os.RemoveAll(t.path)
+	for err != nil && retries > 0 {
+		switch {
+		case os.IsNotExist(err):
+			err = nil
+		default:
+			retries--
+			err = os.RemoveAll(t.path)
+		}
+	}
+	if err != nil {
+		t.tester.Fatal(err)
+	}
+}
+
+func (t temporaryDirectory) Path() string {
+	return t.path
+}
+
+// NewTemporaryDirectory creates a new temporary directory for transient POSIX
+// activities.
+func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
+	var (
+		directory string
+		err       error
+	)
+
+	directory, err = ioutil.TempDir(defaultDirectory, name)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	handler = temporaryDirectory{
+		path:   directory,
+		tester: t,
+	}
+
+	return
+}
+
+// DirHash returns a hash of all files attributes and their content within a directory.
+func DirHash(t *testing.T, path string) []byte {
+	hash := sha256.New()
+	err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
+		Ok(t, err)
+
+		if info.IsDir() {
+			return nil
+		}
+		f, err := os.Open(path)
+		Ok(t, err)
+		defer f.Close()
+
+		_, err = io.Copy(hash, f)
+		Ok(t, err)
+
+		_, err = io.WriteString(hash, strconv.Itoa(int(info.Size())))
+		Ok(t, err)
+
+		_, err = io.WriteString(hash, info.Name())
+		Ok(t, err)
+
+		modTime, err := info.ModTime().GobEncode()
+		Ok(t, err)
+
+		_, err = io.WriteString(hash, string(modTime))
+		Ok(t, err)
+		return nil
+	})
+	Ok(t, err)
+
+	return hash.Sum(nil)
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go
new file mode 100644
index 0000000000000000000000000000000000000000..839b86690be76cafe6e272dc1a1387fb80c8169f
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+	"testing"
+
+	"github.com/go-kit/kit/log"
+)
+
+type logger struct {
+	t *testing.T
+}
+
+// NewLogger returns a gokit compatible Logger which calls t.Log.
+func NewLogger(t *testing.T) log.Logger {
+	return logger{t: t}
+}
+
+// Log implements log.Logger.
+func (t logger) Log(keyvals ...interface{}) error {
+	t.t.Log(keyvals...)
+	return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go b/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go
new file mode 100644
index 0000000000000000000000000000000000000000..996d11f3680b3eef207564b0cb85e0eef12bbe31
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go
@@ -0,0 +1,47 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+	"net/http"
+)
+
+type roundTrip struct {
+	theResponse *http.Response
+	theError    error
+}
+
+func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) {
+	return rt.theResponse, rt.theError
+}
+
+type roundTripCheckRequest struct {
+	checkRequest func(*http.Request)
+	roundTrip
+}
+
+func (rt *roundTripCheckRequest) RoundTrip(r *http.Request) (*http.Response, error) {
+	rt.checkRequest(r)
+	return rt.theResponse, rt.theError
+}
+
+// NewRoundTripCheckRequest creates a new instance of a type that implements http.RoundTripper,
+// which before returning theResponse and theError, executes checkRequest against a http.Request.
+func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *http.Response, theError error) http.RoundTripper {
+	return &roundTripCheckRequest{
+		checkRequest: checkRequest,
+		roundTrip: roundTrip{
+			theResponse: theResponse,
+			theError:    theError}}
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go
new file mode 100644
index 0000000000000000000000000000000000000000..f2335034d4777e12647e77ae00a230b647fb12d6
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go
@@ -0,0 +1,156 @@
+// The MIT License (MIT)
+
+// Copyright (c) 2014 Ben Johnson
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package testutil
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/pmezard/go-difflib/difflib"
+)
+
+// This package is imported by non-test code and therefore cannot import the
+// testing package, which has side effects such as adding flags. Hence we use an
+// interface to testing.{T,B}.
+type TB interface {
+	Helper()
+	Fatalf(string, ...interface{})
+}
+
+// Assert fails the test if the condition is false.
+func Assert(tb TB, condition bool, format string, a ...interface{}) {
+	tb.Helper()
+	if !condition {
+		tb.Fatalf("\033[31m"+format+"\033[39m\n", a...)
+	}
+}
+
+// Ok fails the test if an err is not nil.
+func Ok(tb TB, err error) {
+	tb.Helper()
+	if err != nil {
+		tb.Fatalf("\033[31munexpected error: %v\033[39m\n", err)
+	}
+}
+
+// NotOk fails the test if an err is nil.
+func NotOk(tb TB, err error, a ...interface{}) {
+	tb.Helper()
+	if err == nil {
+		if len(a) != 0 {
+			format := a[0].(string)
+			tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a[1:]...)
+		}
+		tb.Fatalf("\033[31mexpected error, got none\033[39m")
+	}
+}
+
+// Equals fails the test if exp is not equal to act.
+func Equals(tb TB, exp, act interface{}, msgAndArgs ...interface{}) {
+	tb.Helper()
+	if !reflect.DeepEqual(exp, act) {
+		tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v%s\033[39m\n", formatMessage(msgAndArgs), exp, act, diff(exp, act))
+	}
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+	t := reflect.TypeOf(v)
+	k := t.Kind()
+
+	if k == reflect.Ptr {
+		t = t.Elem()
+		k = t.Kind()
+	}
+	return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+	if expected == nil || actual == nil {
+		return ""
+	}
+
+	et, ek := typeAndKind(expected)
+	at, _ := typeAndKind(actual)
+	if et != at {
+		return ""
+	}
+
+	if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+		return ""
+	}
+
+	var e, a string
+	c := spew.ConfigState{
+		Indent:                  " ",
+		DisablePointerAddresses: true,
+		DisableCapacities:       true,
+		SortKeys:                true,
+	}
+	if et != reflect.TypeOf("") {
+		e = c.Sdump(expected)
+		a = c.Sdump(actual)
+	} else {
+		e = reflect.ValueOf(expected).String()
+		a = reflect.ValueOf(actual).String()
+	}
+
+	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+		A:        difflib.SplitLines(e),
+		B:        difflib.SplitLines(a),
+		FromFile: "Expected",
+		FromDate: "",
+		ToFile:   "Actual",
+		ToDate:   "",
+		Context:  1,
+	})
+	return "\n\nDiff:\n" + diff
+}
+
+// ErrorEqual compares Go errors for equality.
+func ErrorEqual(tb TB, left, right error, msgAndArgs ...interface{}) {
+	tb.Helper()
+	if left == right {
+		return
+	}
+
+	if left != nil && right != nil {
+		Equals(tb, left.Error(), right.Error(), msgAndArgs...)
+		return
+	}
+
+	tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v\033[39m\n", formatMessage(msgAndArgs), left, right)
+}
+
+func formatMessage(msgAndArgs []interface{}) string {
+	if len(msgAndArgs) == 0 {
+		return ""
+	}
+
+	if msg, ok := msgAndArgs[0].(string); ok {
+		return fmt.Sprintf("\n\nmsg: "+msg, msgAndArgs[1:]...)
+	}
+	return ""
+}
diff --git a/vendor/github.com/sony/sonyflake/go.mod b/vendor/github.com/sony/sonyflake/go.mod
deleted file mode 100644
index 99ab181580258f1d2f0169cea3dbc764e708f780..0000000000000000000000000000000000000000
--- a/vendor/github.com/sony/sonyflake/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module github.com/sony/sonyflake
-
-go 1.12
-
-require github.com/deckarep/golang-set v1.7.1
diff --git a/vendor/github.com/sony/sonyflake/go.sum b/vendor/github.com/sony/sonyflake/go.sum
deleted file mode 100644
index 898e5f9b8d99be19b743644a6fd84ff589dd6019..0000000000000000000000000000000000000000
--- a/vendor/github.com/sony/sonyflake/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
-github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
index c7b459e4dd0f206724b1e4793d7191ab8a1acdaa..3b053c59ec2914cd4089f8b499b3bab015610b5c 100644
--- a/vendor/github.com/spf13/cobra/.gitignore
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -32,8 +32,7 @@ Session.vim
 tags
 
 *.exe
+
 cobra.test
-bin
 
-.idea/
-*.iml
+.idea/*
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
index a9bd4e54785c41aa6d758c02ae44e53fae7d549f..38b85f499ca881bfd23bf02051eb9e61fbc7596c 100644
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -3,27 +3,29 @@ language: go
 stages:
   - diff
   - test
-  - build
 
 go:
+  - 1.10.x
+  - 1.11.x
   - 1.12.x
-  - 1.13.x
   - tip
 
-before_install:
-  - go get -u github.com/kyoh86/richgo
-  - go get -u github.com/mitchellh/gox
-
 matrix:
   allow_failures:
     - go: tip
   include:
     - stage: diff
-      go: 1.13.x
-      script: make fmt
-    - stage: build
-      go: 1.13.x
-      script: make cobra_generator
+      go: 1.12.x
+      script: diff -u <(echo -n) <(gofmt -d -s .)
 
-script: 
- - make test
+before_install:
+  - mkdir -p bin
+  - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
+  - chmod +x bin/shellcheck
+  - go get -u github.com/kyoh86/richgo
+script:
+  - PATH=$PATH:$PWD/bin richgo test -v ./...
+  - go build
+  - if [ -z $NOVET ]; then
+      diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
+    fi
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
deleted file mode 100644
index e9740d1e1752d40f018855f3effb0be815d56092..0000000000000000000000000000000000000000
--- a/vendor/github.com/spf13/cobra/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
-BIN="./bin"
-SRC=$(shell find . -name "*.go")
-
-ifeq (, $(shell which richgo))
-$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
-endif
-
-.PHONY: fmt vet test cobra_generator install_deps clean
-
-default: all
-
-all: fmt vet test cobra_generator	
-
-fmt:
-	$(info ******************** checking formatting ********************)
-	@test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
-
-test: install_deps vet
-	$(info ******************** running tests ********************)
-	richgo test -v ./...
-
-cobra_generator: install_deps
-	$(info ******************** building generator ********************)
-	mkdir -p $(BIN)
-	make -C cobra all
-
-install_deps:
-	$(info ******************** downloading dependencies ********************)
-	go get -v ./...
-
-vet:
-	$(info ******************** vetting ********************)
-	go vet ./...
-
-clean:
-	rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 9d79934260fac1220a01d9397e7f17951d4c530e..60c5a425bc99515ca77e6fc8be88d04b430566de 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -24,13 +24,11 @@ Many of the most widely used Go projects are built using Cobra, such as:
 [Prototool](https://github.com/uber/prototool),
 [mattermost-server](https://github.com/mattermost/mattermost-server),
 [Gardener](https://github.com/gardener/gardenctl),
-[Linkerd](https://linkerd.io/),
-[Github CLI](https://github.com/cli/cli)
 etc.
 
 [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
 [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
-[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
 
 # Table of Contents
 
@@ -210,78 +208,51 @@ You will additionally define flags and handle configuration in your init() funct
 For example cmd/root.go:
 
 ```go
-package cmd
-
 import (
-	"fmt"
-	"os"
-
-	homedir "github.com/mitchellh/go-homedir"
-	"github.com/spf13/cobra"
-	"github.com/spf13/viper"
-)
+  "fmt"
+  "os"
 
-var (
-	// Used for flags.
-	cfgFile     string
-	userLicense string
-
-	rootCmd = &cobra.Command{
-		Use:   "cobra",
-		Short: "A generator for Cobra based Applications",
-		Long: `Cobra is a CLI library for Go that empowers applications.
-This application is a tool to generate the needed files
-to quickly create a Cobra application.`,
-	}
+  homedir "github.com/mitchellh/go-homedir"
+  "github.com/spf13/cobra"
+  "github.com/spf13/viper"
 )
 
-// Execute executes the root command.
-func Execute() error {
-	return rootCmd.Execute()
-}
-
 func init() {
-	cobra.OnInitialize(initConfig)
-
-	rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
-	rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
-	rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
-	rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
-	viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
-	viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
-	viper.SetDefault("author", "NAME HERE ")
-	viper.SetDefault("license", "apache")
-
-	rootCmd.AddCommand(addCmd)
-	rootCmd.AddCommand(initCmd)
-}
-
-func er(msg interface{}) {
-	fmt.Println("Error:", msg)
-	os.Exit(1)
+  cobra.OnInitialize(initConfig)
+  rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+  rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+  rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+  rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+  rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+  viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+  viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+  viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+  viper.SetDefault("author", "NAME HERE ")
+  viper.SetDefault("license", "apache")
 }
 
 func initConfig() {
-	if cfgFile != "" {
-		// Use config file from the flag.
-		viper.SetConfigFile(cfgFile)
-	} else {
-		// Find home directory.
-		home, err := homedir.Dir()
-		if err != nil {
-			er(err)
-		}
-
-		// Search config in home directory with name ".cobra" (without extension).
-		viper.AddConfigPath(home)
-		viper.SetConfigName(".cobra")
-	}
-
-	viper.AutomaticEnv()
-
-	if err := viper.ReadInConfig(); err == nil {
-		fmt.Println("Using config file:", viper.ConfigFileUsed())
-	}
+  // Don't forget to read config either from cfgFile or from home directory!
+  if cfgFile != "" {
+    // Use config file from the flag.
+    viper.SetConfigFile(cfgFile)
+  } else {
+    // Find home directory.
+    home, err := homedir.Dir()
+    if err != nil {
+      fmt.Println(err)
+      os.Exit(1)
+    }
+
+    // Search config in home directory with name ".cobra" (without extension).
+    viper.AddConfigPath(home)
+    viper.SetConfigName(".cobra")
+  }
+
+  if err := viper.ReadInConfig(); err != nil {
+    fmt.Println("Can't read config:", err)
+    os.Exit(1)
+  }
 }
 ```
 
@@ -488,7 +459,7 @@ For many years people have printed back to the screen.`,
 Echo works a lot like print, except it has a child command.`,
     Args: cobra.MinimumNArgs(1),
     Run: func(cmd *cobra.Command, args []string) {
-      fmt.Println("Echo: " + strings.Join(args, " "))
+      fmt.Println("Print: " + strings.Join(args, " "))
     },
   }
 
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index 70e9b262912e48685b57f19af57796b69174b5b7..c4d820b8531317cea0cf41970552e2bc98bf2811 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -2,7 +2,6 @@ package cobra
 
 import (
 	"fmt"
-	"strings"
 )
 
 type PositionalArgs func(cmd *Command, args []string) error
@@ -35,15 +34,8 @@ func NoArgs(cmd *Command, args []string) error {
 // OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
 func OnlyValidArgs(cmd *Command, args []string) error {
 	if len(cmd.ValidArgs) > 0 {
-		// Remove any description that may be included in ValidArgs.
-		// A description is following a tab character.
-		var validArgs []string
-		for _, v := range cmd.ValidArgs {
-			validArgs = append(validArgs, strings.Split(v, "\t")[0])
-		}
-
 		for _, v := range args {
-			if !stringInSlice(v, validArgs) {
+			if !stringInSlice(v, cmd.ValidArgs) {
 				return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
 			}
 		}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 1e27188c3d2732706bfd065ee7a640220864f231..57bb8e1b3fd5be8110aede97a40699a99dd1da81 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -58,71 +58,9 @@ __%[1]s_contains_word()
     return 1
 }
 
-__%[1]s_handle_go_custom_completion()
-{
-    __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
-
-    local out requestComp lastParam lastChar comp directive args
-
-    # Prepare the command to request completions for the program.
-    # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
-    args=("${words[@]:1}")
-    requestComp="${words[0]} %[2]s ${args[*]}"
-
-    lastParam=${words[$((${#words[@]}-1))]}
-    lastChar=${lastParam:$((${#lastParam}-1)):1}
-    __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
-
-    if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
-        # If the last parameter is complete (there is a space following it)
-        # We add an extra empty parameter so we can indicate this to the go method.
-        __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
-        requestComp="${requestComp} \"\""
-    fi
-
-    __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
-    # Use eval to handle any environment variables and such
-    out=$(eval "${requestComp}" 2>/dev/null)
-
-    # Extract the directive integer at the very end of the output following a colon (:)
-    directive=${out##*:}
-    # Remove the directive
-    out=${out%%:*}
-    if [ "${directive}" = "${out}" ]; then
-        # There is not directive specified
-        directive=0
-    fi
-    __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
-    __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
-
-    if [ $((directive & %[3]d)) -ne 0 ]; then
-        # Error code.  No completion.
-        __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
-        return
-    else
-        if [ $((directive & %[4]d)) -ne 0 ]; then
-            if [[ $(type -t compopt) = "builtin" ]]; then
-                __%[1]s_debug "${FUNCNAME[0]}: activating no space"
-                compopt -o nospace
-            fi
-        fi
-        if [ $((directive & %[5]d)) -ne 0 ]; then
-            if [[ $(type -t compopt) = "builtin" ]]; then
-                __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
-                compopt +o default
-            fi
-        fi
-
-        while IFS='' read -r comp; do
-            COMPREPLY+=("$comp")
-        done < <(compgen -W "${out[*]}" -- "$cur")
-    fi
-}
-
 __%[1]s_handle_reply()
 {
     __%[1]s_debug "${FUNCNAME[0]}"
-    local comp
     case $cur in
         -*)
             if [[ $(type -t compopt) = "builtin" ]]; then
@@ -134,9 +72,7 @@ __%[1]s_handle_reply()
             else
                 allflags=("${flags[*]} ${two_word_flags[*]}")
             fi
-            while IFS='' read -r comp; do
-                COMPREPLY+=("$comp")
-            done < <(compgen -W "${allflags[*]}" -- "$cur")
+            COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
             if [[ $(type -t compopt) = "builtin" ]]; then
                 [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
             fi
@@ -182,22 +118,14 @@ __%[1]s_handle_reply()
     completions=("${commands[@]}")
     if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
         completions=("${must_have_one_noun[@]}")
-    elif [[ -n "${has_completion_function}" ]]; then
-        # if a go completion function is provided, defer to that function
-        completions=()
-        __%[1]s_handle_go_custom_completion
     fi
     if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
         completions+=("${must_have_one_flag[@]}")
     fi
-    while IFS='' read -r comp; do
-        COMPREPLY+=("$comp")
-    done < <(compgen -W "${completions[*]}" -- "$cur")
+    COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
 
     if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
-        while IFS='' read -r comp; do
-            COMPREPLY+=("$comp")
-        done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+        COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
     fi
 
     if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
@@ -232,7 +160,7 @@ __%[1]s_handle_filename_extension_flag()
 __%[1]s_handle_subdirs_in_dir_flag()
 {
     local dir="$1"
-    pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+    pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
 }
 
 __%[1]s_handle_flag()
@@ -344,7 +272,7 @@ __%[1]s_handle_word()
     __%[1]s_handle_word
 }
 
-`, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
+`, name))
 }
 
 func writePostscript(buf *bytes.Buffer, name string) {
@@ -369,7 +297,6 @@ func writePostscript(buf *bytes.Buffer, name string) {
     local commands=("%[1]s")
     local must_have_one_flag=()
     local must_have_one_noun=()
-    local has_completion_function
     local last_command
     local nouns=()
 
@@ -470,22 +397,7 @@ func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
 	buf.WriteString(fmt.Sprintf(format, name))
 }
 
-// Setup annotations for go completions for registered flags
-func prepareCustomAnnotationsForFlags(cmd *Command) {
-	for flag := range flagCompletionFunctions {
-		// Make sure the completion script calls the __*_go_custom_completion function for
-		// every registered flag.  We need to do this here (and not when the flag was registered
-		// for completion) so that we can know the root command name for the prefix
-		// of ___go_custom_completion
-		if flag.Annotations == nil {
-			flag.Annotations = map[string][]string{}
-		}
-		flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
-	}
-}
-
 func writeFlags(buf *bytes.Buffer, cmd *Command) {
-	prepareCustomAnnotationsForFlags(cmd)
 	buf.WriteString(`    flags=()
     two_word_flags=()
     local_nonpersistent_flags=()
@@ -548,14 +460,8 @@ func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
 	buf.WriteString("    must_have_one_noun=()\n")
 	sort.Sort(sort.StringSlice(cmd.ValidArgs))
 	for _, value := range cmd.ValidArgs {
-		// Remove any description that may be included following a tab character.
-		// Descriptions are not supported by bash completion.
-		value = strings.Split(value, "\t")[0]
 		buf.WriteString(fmt.Sprintf("    must_have_one_noun+=(%q)\n", value))
 	}
-	if cmd.ValidArgsFunction != nil {
-		buf.WriteString("    has_completion_function=1\n")
-	}
 }
 
 func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index e61a3a6546b2e38ac0cc71728dcea090d565992b..4ac61ee132a48bebf069a27385e289e28f7c09ac 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -56,149 +56,7 @@ func main() {
 
 `out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
 
-## Have the completions code complete your 'nouns'
-
-### Static completion of nouns
-
-This method allows you to provide a pre-defined list of completion choices for your nouns using the `validArgs` field.
-For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
-
-```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
-
-cmd := &cobra.Command{
-	Use:     "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
-	Short:   "Display one or many resources",
-	Long:    get_long,
-	Example: get_example,
-	Run: func(cmd *cobra.Command, args []string) {
-		err := RunGet(f, out, cmd, args)
-		util.CheckErr(err)
-	},
-	ValidArgs: validArgs,
-}
-```
-
-Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
-
-```bash
-# kubectl get [tab][tab]
-node                 pod                    replicationcontroller  service
-```
-
-### Plural form and shortcuts for nouns
-
-If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
-
-```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
-
-cmd := &cobra.Command{
-    ...
-	ValidArgs:  validArgs,
-	ArgAliases: argAliases
-}
-```
-
-The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
-the completion algorithm if entered manually, e.g. in:
-
-```bash
-# kubectl get rc [tab][tab]
-backend        frontend       database 
-```
-
-Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
-in this example again instead of the replication controllers.
-
-### Dynamic completion of nouns
-
-In some cases it is not possible to provide a list of possible completions in advance.  Instead, the list of completions must be determined at execution-time.  Cobra provides two ways of defining such dynamic completion of nouns. Note that both these methods can be used along-side each other as long as they are not both used for the same command.
-
-**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion.  It is therefore recommended to use *Custom Completions written in Go*.
-
-#### 1. Custom completions of nouns written in Go
-
-In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command.  Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
-Simplified code from `helm status` looks like:
-
-```go
-cmd := &cobra.Command{
-	Use:   "status RELEASE_NAME",
-	Short: "Display the status of the named release",
-	Long:  status_long,
-	RunE: func(cmd *cobra.Command, args []string) {
-		RunGet(args[0])
-	},
-	ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
-		if len(args) != 0 {
-			return nil, cobra.ShellCompDirectiveNoFileComp
-		}
-		return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
-	},
-}
-```
-Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
-Notice we put the `ValidArgsFunction` on the `status` subcommand. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like
-
-```bash
-# helm status [tab][tab]
-harbor notary rook thanos
-```
-You may have noticed the use of `cobra.ShellCompDirective`.  These directives are bit fields allowing to control some shell completion behaviors for your particular completion.  You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
-```go
-// Indicates an error occurred and completions should be ignored.
-ShellCompDirectiveError
-// Indicates that the shell should not add a space after the completion,
-// even if there is a single completion provided.
-ShellCompDirectiveNoSpace
-// Indicates that the shell should not provide file completion even when
-// no completion is provided.
-// This currently does not work for zsh or bash < 4
-ShellCompDirectiveNoFileComp
-// Indicates that the shell will perform its default behavior after completions
-// have been provided (this implies !ShellCompDirectiveNoSpace && !ShellCompDirectiveNoFileComp).
-ShellCompDirectiveDefault
-```
-
-When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line.  You therefore don't need to do this parsing yourself.  For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
-
-##### Debugging
-
-Cobra achieves dynamic completions written in Go through the use of a hidden command called by the completion script.  To debug your Go completion code, you can call this hidden command directly:
-```bash
-# helm __complete status har
-harbor
-:4
-Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
-```
-***Important:*** If the noun to complete is empty, you must pass an empty parameter to the `__complete` command:
-```bash
-# helm __complete status ""
-harbor
-notary
-rook
-thanos
-:4
-Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
-```
-Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code.  You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
-```go
-// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
-// is set to a file path) and optionally prints to stderr.
-cobra.CompDebug(msg string, printToStdErr bool) {
-cobra.CompDebugln(msg string, printToStdErr bool)
-
-// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
-// is set to a file path) and to stderr.
-cobra.CompError(msg string)
-cobra.CompErrorln(msg string)
-```
-***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script.  Instead, use the cobra-provided debugging traces functions mentioned above.
-
-#### 2. Custom completions of nouns written in Bash
-
-This method allows you to inject bash functions into the completion script.  Those bash functions are responsible for providing the completion choices for your own completions.
+## Creating your own custom functions
 
 Some more actual code that works in kubernetes:
 
@@ -253,6 +111,58 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
 
 The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`.  `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`.  So it will call `__kubectl_parse_get pod`.  `__kubectl_parse_get` will actually call out to kubernetes and get any pods.  It will then set `COMPREPLY` to valid pods!
 
+## Have the completions code complete your 'nouns'
+
+In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+	Use:     "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+	Short:   "Display one or many resources",
+	Long:    get_long,
+	Example: get_example,
+	Run: func(cmd *cobra.Command, args []string) {
+		err := RunGet(f, out, cmd, args)
+		util.CheckErr(err)
+	},
+	ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node                 pod                    replicationcontroller  service
+```
+
+## Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+    ...
+	ValidArgs:  validArgs,
+	ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend        frontend       database 
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
 ## Mark flags as required
 
 Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab].  Marking a flag as 'Required' is incredibly easy.
@@ -301,45 +211,8 @@ So while there are many other files in the CWD it only shows me subdirs and thos
 
 # Specify custom flag completion
 
-As for nouns, Cobra provides two ways of defining dynamic completion of flags.  Note that both these methods can be used along-side each other as long as they are not both used for the same flag.
-
-**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion.  It is therefore recommended to use *Custom Completions written in Go*.
-
-## 1. Custom completions of flags written in Go
-
-To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function in the following manner:
-
-```go
-flagName := "output"
-cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
-	return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
-})
-```
-Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated.  In our example this dynamic completion will give results like so:
-
-```bash
-# helm status --output [tab][tab]
-json table yaml
-```
-
-### Debugging
-
-You can also easily debug your Go completion code for flags:
-```bash
-# helm __complete status --output ""
-json
-table
-yaml
-:4
-Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
-```
-***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script.  Instead, use the cobra-provided debugging traces functions mentioned in the above section.
-
-## 2. Custom completions of flags written in Bash
-
-Alternatively, you can use bash code for flag custom completion. Similar to the filename
-completion and filtering using `cobra.BashCompFilenameExt`, you can specify
-a custom flag completion bash function with `cobra.BashCompCustom`:
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
+a custom flag completion function with cobra.BashCompCustom:
 
 ```go
 	annotation := make(map[string][]string)
@@ -353,7 +226,7 @@ a custom flag completion bash function with `cobra.BashCompCustom`:
 	cmd.Flags().AddFlag(flag)
 ```
 
-In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
+In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
 value, e.g.:
 
 ```bash
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index d01becc8fa6c687cd11f6b555d00ab6d87a76a31..6505c070b4037d7e780fa2f3d2594e8b12c66e5f 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -52,7 +52,7 @@ var EnableCommandSorting = true
 // if the CLI is started from explorer.exe.
 // To disable the mousetrap, just set this variable to blank string ("").
 // Works only on Microsoft Windows.
-var MousetrapHelpText = `This is a command line tool.
+var MousetrapHelpText string = `This is a command line tool.
 
 You need to open cmd.exe and run it from there.
 `
@@ -61,7 +61,7 @@ You need to open cmd.exe and run it from there.
 // if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
 // To disable the mousetrap, just set MousetrapHelpText to blank string ("").
 // Works only on Microsoft Windows.
-var MousetrapDisplayDuration = 5 * time.Second
+var MousetrapDisplayDuration time.Duration = 5 * time.Second
 
 // AddTemplateFunc adds a template function that's available to Usage and Help
 // template generation.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 88e6ed77d0e3a118aeb134f17578d86b9570b417..c7e8983034329ff4bf8e208dc7829a5d366a2f5f 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -17,7 +17,6 @@ package cobra
 
 import (
 	"bytes"
-	"context"
 	"fmt"
 	"io"
 	"os"
@@ -57,10 +56,6 @@ type Command struct {
 
 	// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
 	ValidArgs []string
-	// ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
-	// It is a dynamic version of using ValidArgs.
-	// Only one of ValidArgs and ValidArgsFunction can be used for a command.
-	ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
 
 	// Expected arguments
 	Args PositionalArgs
@@ -85,8 +80,7 @@ type Command struct {
 
 	// Version defines the version for this command. If this value is non-empty and the command does not
 	// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
-	// will print content of the "Version" variable. A shorthand "v" flag will also be added if the
-	// command does not define one.
+	// will print content of the "Version" variable.
 	Version string
 
 	// The *Run functions are executed in the following order:
@@ -146,11 +140,9 @@ type Command struct {
 	// TraverseChildren parses flags on all parents before executing child command.
 	TraverseChildren bool
 
-	// FParseErrWhitelist flag parse errors to be ignored
+	//FParseErrWhitelist flag parse errors to be ignored
 	FParseErrWhitelist FParseErrWhitelist
 
-	ctx context.Context
-
 	// commands is the list of commands supported by this program.
 	commands []*Command
 	// parent is a parent command for this command.
@@ -210,12 +202,6 @@ type Command struct {
 	errWriter io.Writer
 }
 
-// Context returns underlying command context. If command wasn't
-// executed with ExecuteContext Context returns Background context.
-func (c *Command) Context() context.Context {
-	return c.ctx
-}
-
 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
 // particularly useful when testing.
 func (c *Command) SetArgs(a []string) {
@@ -242,7 +228,7 @@ func (c *Command) SetErr(newErr io.Writer) {
 	c.errWriter = newErr
 }
 
-// SetIn sets the source for input data
+// SetOut sets the source for input data
 // If newIn is nil, os.Stdin is used.
 func (c *Command) SetIn(newIn io.Reader) {
 	c.inReader = newIn
@@ -311,7 +297,7 @@ func (c *Command) ErrOrStderr() io.Writer {
 	return c.getErr(os.Stderr)
 }
 
-// InOrStdin returns input to stdin
+// ErrOrStderr returns output to stderr
 func (c *Command) InOrStdin() io.Reader {
 	return c.getIn(os.Stdin)
 }
@@ -383,8 +369,6 @@ func (c *Command) HelpFunc() func(*Command, []string) {
 	}
 	return func(c *Command, a []string) {
 		c.mergePersistentFlags()
-		// The help should be sent to stdout
-		// See https://github.com/spf13/cobra/issues/1002
 		err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
 		if err != nil {
 			c.Println(err)
@@ -873,13 +857,6 @@ func (c *Command) preRun() {
 	}
 }
 
-// ExecuteContext is the same as Execute(), but sets the ctx on the command.
-// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
-func (c *Command) ExecuteContext(ctx context.Context) error {
-	c.ctx = ctx
-	return c.Execute()
-}
-
 // Execute uses the args (os.Args[1:] by default)
 // and run through the command tree finding appropriate matches
 // for commands and then corresponding flags.
@@ -890,10 +867,6 @@ func (c *Command) Execute() error {
 
 // ExecuteC executes the command.
 func (c *Command) ExecuteC() (cmd *Command, err error) {
-	if c.ctx == nil {
-		c.ctx = context.Background()
-	}
-
 	// Regardless of what command execute is called on, run on Root only
 	if c.HasParent() {
 		return c.Root().ExecuteC()
@@ -915,9 +888,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
 		args = os.Args[1:]
 	}
 
-	// initialize the hidden command to be used for bash completion
-	c.initCompleteCmd(args)
-
 	var flags []string
 	if c.TraverseChildren {
 		cmd, flags, err = c.Traverse(args)
@@ -941,12 +911,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
 		cmd.commandCalledAs.name = cmd.Name()
 	}
 
-	// We have to pass global context to children command
-	// if context is present on the parent command.
-	if cmd.ctx == nil {
-		cmd.ctx = c.ctx
-	}
-
 	err = cmd.execute(flags)
 	if err != nil {
 		// Always show help if requested, even if SilenceErrors is in
@@ -1030,11 +994,7 @@ func (c *Command) InitDefaultVersionFlag() {
 		} else {
 			usage += c.Name()
 		}
-		if c.Flags().ShorthandLookup("v") == nil {
-			c.Flags().BoolP("version", "v", false, usage)
-		} else {
-			c.Flags().Bool("version", false, usage)
-		}
+		c.Flags().Bool("version", false, usage)
 	}
 }
 
@@ -1587,7 +1547,7 @@ func (c *Command) ParseFlags(args []string) error {
 	beforeErrorBufLen := c.flagErrorBuf.Len()
 	c.mergePersistentFlags()
 
-	// do it here after merging all flags and just before parse
+	//do it here after merging all flags and just before parse
 	c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
 
 	err := c.Flags().Parse(args)
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
deleted file mode 100644
index ba57327c153fb17a7f34f88ee13eadf1030d4833..0000000000000000000000000000000000000000
--- a/vendor/github.com/spf13/cobra/custom_completions.go
+++ /dev/null
@@ -1,384 +0,0 @@
-package cobra
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"strings"
-
-	"github.com/spf13/pflag"
-)
-
-const (
-	// ShellCompRequestCmd is the name of the hidden command that is used to request
-	// completion results from the program.  It is used by the shell completion scripts.
-	ShellCompRequestCmd = "__complete"
-	// ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
-	// completion results without their description.  It is used by the shell completion scripts.
-	ShellCompNoDescRequestCmd = "__completeNoDesc"
-)
-
-// Global map of flag completion functions.
-var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
-
-// ShellCompDirective is a bit map representing the different behaviors the shell
-// can be instructed to have once completions have been provided.
-type ShellCompDirective int
-
-const (
-	// ShellCompDirectiveError indicates an error occurred and completions should be ignored.
-	ShellCompDirectiveError ShellCompDirective = 1 << iota
-
-	// ShellCompDirectiveNoSpace indicates that the shell should not add a space
-	// after the completion even if there is a single completion provided.
-	ShellCompDirectiveNoSpace
-
-	// ShellCompDirectiveNoFileComp indicates that the shell should not provide
-	// file completion even when no completion is provided.
-	// This currently does not work for zsh or bash < 4
-	ShellCompDirectiveNoFileComp
-
-	// ShellCompDirectiveDefault indicates to let the shell perform its default
-	// behavior after completions have been provided.
-	ShellCompDirectiveDefault ShellCompDirective = 0
-)
-
-// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
-func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
-	flag := c.Flag(flagName)
-	if flag == nil {
-		return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
-	}
-	if _, exists := flagCompletionFunctions[flag]; exists {
-		return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
-	}
-	flagCompletionFunctions[flag] = f
-	return nil
-}
-
-// Returns a string listing the different directive enabled in the specified parameter
-func (d ShellCompDirective) string() string {
-	var directives []string
-	if d&ShellCompDirectiveError != 0 {
-		directives = append(directives, "ShellCompDirectiveError")
-	}
-	if d&ShellCompDirectiveNoSpace != 0 {
-		directives = append(directives, "ShellCompDirectiveNoSpace")
-	}
-	if d&ShellCompDirectiveNoFileComp != 0 {
-		directives = append(directives, "ShellCompDirectiveNoFileComp")
-	}
-	if len(directives) == 0 {
-		directives = append(directives, "ShellCompDirectiveDefault")
-	}
-
-	if d > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
-		return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
-	}
-	return strings.Join(directives, ", ")
-}
-
-// Adds a special hidden command that can be used to request custom completions.
-func (c *Command) initCompleteCmd(args []string) {
-	completeCmd := &Command{
-		Use:                   fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
-		Aliases:               []string{ShellCompNoDescRequestCmd},
-		DisableFlagsInUseLine: true,
-		Hidden:                true,
-		DisableFlagParsing:    true,
-		Args:                  MinimumNArgs(1),
-		Short:                 "Request shell completion choices for the specified command-line",
-		Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
-			"to request completion choices for the specified command-line.", ShellCompRequestCmd),
-		Run: func(cmd *Command, args []string) {
-			finalCmd, completions, directive, err := cmd.getCompletions(args)
-			if err != nil {
-				CompErrorln(err.Error())
-				// Keep going for multiple reasons:
-				// 1- There could be some valid completions even though there was an error
-				// 2- Even without completions, we need to print the directive
-			}
-
-			noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
-			for _, comp := range completions {
-				if noDescriptions {
-					// Remove any description that may be included following a tab character.
-					comp = strings.Split(comp, "\t")[0]
-				}
-				// Print each possible completion to stdout for the completion script to consume.
-				fmt.Fprintln(finalCmd.OutOrStdout(), comp)
-			}
-
-			if directive > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
-				directive = ShellCompDirectiveDefault
-			}
-
-			// As the last printout, print the completion directive for the completion script to parse.
-			// The directive integer must be that last character following a single colon (:).
-			// The completion script expects :
-			fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
-
-			// Print some helpful info to stderr for the user to understand.
-			// Output from stderr must be ignored by the completion script.
-			fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
-		},
-	}
-	c.AddCommand(completeCmd)
-	subCmd, _, err := c.Find(args)
-	if err != nil || subCmd.Name() != ShellCompRequestCmd {
-		// Only create this special command if it is actually being called.
-		// This reduces possible side-effects of creating such a command;
-		// for example, having this command would cause problems to a
-		// cobra program that only consists of the root command, since this
-		// command would cause the root command to suddenly have a subcommand.
-		c.RemoveCommand(completeCmd)
-	}
-}
-
-func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
-	var completions []string
-
-	// The last argument, which is not completely typed by the user,
-	// should not be part of the list of arguments
-	toComplete := args[len(args)-1]
-	trimmedArgs := args[:len(args)-1]
-
-	// Find the real command for which completion must be performed
-	finalCmd, finalArgs, err := c.Root().Find(trimmedArgs)
-	if err != nil {
-		// Unable to find the real command. E.g.,  someInvalidCmd 
-		return c, completions, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
-	}
-
-	// When doing completion of a flag name, as soon as an argument starts with
-	// a '-' we know it is a flag.  We cannot use isFlagArg() here as it requires
-	// the flag to be complete
-	if len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
-		// We are completing a flag name
-		finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
-			completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
-		})
-		finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
-			completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
-		})
-
-		directive := ShellCompDirectiveDefault
-		if len(completions) > 0 {
-			if strings.HasSuffix(completions[0], "=") {
-				directive = ShellCompDirectiveNoSpace
-			}
-		}
-		return finalCmd, completions, directive, nil
-	}
-
-	var flag *pflag.Flag
-	if !finalCmd.DisableFlagParsing {
-		// We only do flag completion if we are allowed to parse flags
-		// This is important for commands which have requested to do their own flag completion.
-		flag, finalArgs, toComplete, err = checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
-		if err != nil {
-			// Error while attempting to parse flags
-			return finalCmd, completions, ShellCompDirectiveDefault, err
-		}
-	}
-
-	if flag == nil {
-		// Complete subcommand names
-		for _, subCmd := range finalCmd.Commands() {
-			if subCmd.IsAvailableCommand() && strings.HasPrefix(subCmd.Name(), toComplete) {
-				completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
-			}
-		}
-
-		if len(finalCmd.ValidArgs) > 0 {
-			// Always complete ValidArgs, even if we are completing a subcommand name.
-			// This is for commands that have both subcommands and ValidArgs.
-			for _, validArg := range finalCmd.ValidArgs {
-				if strings.HasPrefix(validArg, toComplete) {
-					completions = append(completions, validArg)
-				}
-			}
-
-			// If there are ValidArgs specified (even if they don't match), we stop completion.
-			// Only one of ValidArgs or ValidArgsFunction can be used for a single command.
-			return finalCmd, completions, ShellCompDirectiveNoFileComp, nil
-		}
-
-		// Always let the logic continue so as to add any ValidArgsFunction completions,
-		// even if we already found sub-commands.
-		// This is for commands that have subcommands but also specify a ValidArgsFunction.
-	}
-
-	// Parse the flags and extract the arguments to prepare for calling the completion function
-	if err = finalCmd.ParseFlags(finalArgs); err != nil {
-		return finalCmd, completions, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
-	}
-
-	// We only remove the flags from the arguments if DisableFlagParsing is not set.
-	// This is important for commands which have requested to do their own flag completion.
-	if !finalCmd.DisableFlagParsing {
-		finalArgs = finalCmd.Flags().Args()
-	}
-
-	// Find the completion function for the flag or command
-	var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
-	if flag != nil {
-		completionFn = flagCompletionFunctions[flag]
-	} else {
-		completionFn = finalCmd.ValidArgsFunction
-	}
-	if completionFn == nil {
-		// Go custom completion not supported/needed for this flag or command
-		return finalCmd, completions, ShellCompDirectiveDefault, nil
-	}
-
-	// Call the registered completion function to get the completions
-	comps, directive := completionFn(finalCmd, finalArgs, toComplete)
-	completions = append(completions, comps...)
-	return finalCmd, completions, directive, nil
-}
-
-func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
-	if nonCompletableFlag(flag) {
-		return []string{}
-	}
-
-	var completions []string
-	flagName := "--" + flag.Name
-	if strings.HasPrefix(flagName, toComplete) {
-		// Flag without the =
-		completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
-
-		if len(flag.NoOptDefVal) == 0 {
-			// Flag requires a value, so it can be suffixed with =
-			flagName += "="
-			completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
-		}
-	}
-
-	flagName = "-" + flag.Shorthand
-	if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
-		completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
-	}
-
-	return completions
-}
-
-func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
-	var flagName string
-	trimmedArgs := args
-	flagWithEqual := false
-	if isFlagArg(lastArg) {
-		if index := strings.Index(lastArg, "="); index >= 0 {
-			flagName = strings.TrimLeft(lastArg[:index], "-")
-			lastArg = lastArg[index+1:]
-			flagWithEqual = true
-		} else {
-			return nil, nil, "", errors.New("Unexpected completion request for flag")
-		}
-	}
-
-	if len(flagName) == 0 {
-		if len(args) > 0 {
-			prevArg := args[len(args)-1]
-			if isFlagArg(prevArg) {
-				// Only consider the case where the flag does not contain an =.
-				// If the flag contains an = it means it has already been fully processed,
-				// so we don't need to deal with it here.
-				if index := strings.Index(prevArg, "="); index < 0 {
-					flagName = strings.TrimLeft(prevArg, "-")
-
-					// Remove the uncompleted flag or else there could be an error created
-					// for an invalid value for that flag
-					trimmedArgs = args[:len(args)-1]
-				}
-			}
-		}
-	}
-
-	if len(flagName) == 0 {
-		// Not doing flag completion
-		return nil, trimmedArgs, lastArg, nil
-	}
-
-	flag := findFlag(finalCmd, flagName)
-	if flag == nil {
-		// Flag not supported by this command, nothing to complete
-		err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
-		return nil, nil, "", err
-	}
-
-	if !flagWithEqual {
-		if len(flag.NoOptDefVal) != 0 {
-			// We had assumed dealing with a two-word flag but the flag is a boolean flag.
-			// In that case, there is no value following it, so we are not really doing flag completion.
-			// Reset everything to do noun completion.
-			trimmedArgs = args
-			flag = nil
-		}
-	}
-
-	return flag, trimmedArgs, lastArg, nil
-}
-
-func findFlag(cmd *Command, name string) *pflag.Flag {
-	flagSet := cmd.Flags()
-	if len(name) == 1 {
-		// First convert the short flag into a long flag
-		// as the cmd.Flag() search only accepts long flags
-		if short := flagSet.ShorthandLookup(name); short != nil {
-			name = short.Name
-		} else {
-			set := cmd.InheritedFlags()
-			if short = set.ShorthandLookup(name); short != nil {
-				name = short.Name
-			} else {
-				return nil
-			}
-		}
-	}
-	return cmd.Flag(name)
-}
-
-// CompDebug prints the specified string to the same file as where the
-// completion script prints its logs.
-// Note that completion printouts should never be on stdout as they would
-// be wrongly interpreted as actual completion choices by the completion script.
-func CompDebug(msg string, printToStdErr bool) {
-	msg = fmt.Sprintf("[Debug] %s", msg)
-
-	// Such logs are only printed when the user has set the environment
-	// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
-	if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
-		f, err := os.OpenFile(path,
-			os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
-		if err == nil {
-			defer f.Close()
-			f.WriteString(msg)
-		}
-	}
-
-	if printToStdErr {
-		// Must print to stderr for this not to be read by the completion script.
-		fmt.Fprintf(os.Stderr, msg)
-	}
-}
-
-// CompDebugln prints the specified string with a newline at the end
-// to the same file as where the completion script prints its logs.
-// Such logs are only printed when the user has set the environment
-// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
-func CompDebugln(msg string, printToStdErr bool) {
-	CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
-}
-
-// CompError prints the specified completion message to stderr.
-func CompError(msg string) {
-	msg = fmt.Sprintf("[Error] %s", msg)
-	CompDebug(msg, true)
-}
-
-// CompErrorln prints the specified completion message to stderr with a newline at the end.
-func CompErrorln(msg string) {
-	CompError(fmt.Sprintf("%s\n", msg))
-}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
deleted file mode 100644
index c83609c83b616504c3f812ebc3789a96260870bd..0000000000000000000000000000000000000000
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package cobra
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-)
-
-func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
-	compCmd := ShellCompRequestCmd
-	if !includeDesc {
-		compCmd = ShellCompNoDescRequestCmd
-	}
-	buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
-	buf.WriteString(fmt.Sprintf(`
-function __%[1]s_debug
-    set file "$BASH_COMP_DEBUG_FILE"
-    if test -n "$file"
-        echo "$argv" >> $file
-    end
-end
-
-function __%[1]s_perform_completion
-    __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
-
-    set args (string split -- " " "$argv")
-    set lastArg "$args[-1]"
-
-    __%[1]s_debug "args: $args"
-    __%[1]s_debug "last arg: $lastArg"
-
-    set emptyArg ""
-    if test -z "$lastArg"
-        __%[1]s_debug "Setting emptyArg"
-        set emptyArg \"\"
-    end
-    __%[1]s_debug "emptyArg: $emptyArg"
-
-    set requestComp "$args[1] %[2]s $args[2..-1] $emptyArg"
-    __%[1]s_debug "Calling $requestComp"
-
-    set results (eval $requestComp 2> /dev/null)
-    set comps $results[1..-2]
-    set directiveLine $results[-1]
-
-    # For Fish, when completing a flag with an = (e.g.,  -n=)
-    # completions must be prefixed with the flag
-    set flagPrefix (string match -r -- '-.*=' "$lastArg")
-
-    __%[1]s_debug "Comps: $comps"
-    __%[1]s_debug "DirectiveLine: $directiveLine"
-    __%[1]s_debug "flagPrefix: $flagPrefix"
-
-    for comp in $comps
-        printf "%%s%%s\n" "$flagPrefix" "$comp"
-    end
-
-    printf "%%s\n" "$directiveLine"
-end
-
-# This function does three things:
-# 1- Obtain the completions and store them in the global __%[1]s_comp_results
-# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
-#    and unset it otherwise
-# 3- Return true if the completion results are not empty
-function __%[1]s_prepare_completions
-    # Start fresh
-    set --erase __%[1]s_comp_do_file_comp
-    set --erase __%[1]s_comp_results
-
-    # Check if the command-line is already provided.  This is useful for testing.
-    if not set --query __%[1]s_comp_commandLine
-        set __%[1]s_comp_commandLine (commandline)
-    end
-    __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
-
-    set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
-    set --erase __%[1]s_comp_commandLine
-    __%[1]s_debug "Completion results: $results"
-
-    if test -z "$results"
-        __%[1]s_debug "No completion, probably due to a failure"
-        # Might as well do file completion, in case it helps
-        set --global __%[1]s_comp_do_file_comp 1
-        return 0
-    end
-
-    set directive (string sub --start 2 $results[-1])
-    set --global __%[1]s_comp_results $results[1..-2]
-
-    __%[1]s_debug "Completions are: $__%[1]s_comp_results"
-    __%[1]s_debug "Directive is: $directive"
-
-    if test -z "$directive"
-        set directive 0
-    end
-
-    set compErr (math (math --scale 0 $directive / %[3]d) %% 2)
-    if test $compErr -eq 1
-        __%[1]s_debug "Received error directive: aborting."
-        # Might as well do file completion, in case it helps
-        set --global __%[1]s_comp_do_file_comp 1
-        return 0
-    end
-
-    set nospace (math (math --scale 0 $directive / %[4]d) %% 2)
-    set nofiles (math (math --scale 0 $directive / %[5]d) %% 2)
-
-    __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
-
-    # Important not to quote the variable for count to work
-    set numComps (count $__%[1]s_comp_results)
-    __%[1]s_debug "numComps: $numComps"
-
-    if test $numComps -eq 1; and test $nospace -ne 0
-        # To support the "nospace" directive we trick the shell
-        # by outputting an extra, longer completion.
-        __%[1]s_debug "Adding second completion to perform nospace directive"
-        set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
-    end
-
-    if test $numComps -eq 0; and test $nofiles -eq 0
-        __%[1]s_debug "Requesting file completion"
-        set --global __%[1]s_comp_do_file_comp 1
-    end
-
-    # If we don't want file completion, we must return true even if there
-    # are no completions found.  This is because fish will perform the last
-    # completion command, even if its condition is false, if no other
-    # completion command was triggered
-    return (not set --query __%[1]s_comp_do_file_comp)
-end
-
-# Remove any pre-existing completions for the program since we will be handling all of them
-# TODO this cleanup is not sufficient.  Fish completions are only loaded once the user triggers
-# them, so the below deletion will not work as it is run too early.  What else can we do?
-complete -c %[1]s -e
-
-# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
-# is called first.  It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
-#
-# This completion will be run second as complete commands are added FILO.
-# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
-complete -c %[1]s -n 'set --query __%[1]s_comp_do_file_comp'
-
-# This completion will be run first as complete commands are added FILO.
-# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results abd __%[1]s_comp_do_file_comp.
-# It provides the program's completion choices.
-complete -c %[1]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
-
-`, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
-}
-
-// GenFishCompletion generates fish completion file and writes to the passed writer.
-func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
-	buf := new(bytes.Buffer)
-	genFishComp(buf, c.Name(), includeDesc)
-	_, err := buf.WriteTo(w)
-	return err
-}
-
-// GenFishCompletionFile generates fish completion file.
-func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
-	outFile, err := os.Create(filename)
-	if err != nil {
-		return err
-	}
-	defer outFile.Close()
-
-	return c.GenFishCompletion(outFile, includeDesc)
-}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md
deleted file mode 100644
index 6bfe5f88ef272805c9ba657f27cfb36d0e4ab53e..0000000000000000000000000000000000000000
--- a/vendor/github.com/spf13/cobra/fish_completions.md
+++ /dev/null
@@ -1,7 +0,0 @@
-## Generating Fish Completions for your own cobra.Command
-
-Cobra supports native Fish completions generated from the root `cobra.Command`.  You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information.  You can choose to make this option configurable by your users.
-
-### Limitations
-
-* Custom completions implemented using the `ValidArgsFunction` and `RegisterFlagCompletionFunc()` are supported automatically but the ones implemented in Bash scripting are not.
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
index dea1030ba43110ec0e8784597cc2cda577ec7ece..9a9eb65a3703e4ad1f522884d35e2de17a680924 100644
--- a/vendor/github.com/spf13/cobra/go.mod
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -3,10 +3,11 @@ module github.com/spf13/cobra
 go 1.12
 
 require (
-	github.com/cpuguy83/go-md2man/v2 v2.0.0
+	github.com/BurntSushi/toml v0.3.1 // indirect
+	github.com/cpuguy83/go-md2man v1.0.10
 	github.com/inconshreveable/mousetrap v1.0.0
 	github.com/mitchellh/go-homedir v1.1.0
 	github.com/spf13/pflag v1.0.3
-	github.com/spf13/viper v1.4.0
+	github.com/spf13/viper v1.3.2
 	gopkg.in/yaml.v2 v2.2.2
 )
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
index 3aaa2ac0fd62841e4b5d57bef9e0ea901ba47fd9..9761f4d03fe77f399ec413ea96285353bfece54b 100644
--- a/vendor/github.com/spf13/cobra/go.sum
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -1,91 +1,31 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
 github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
 github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
 github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
@@ -94,56 +34,18 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
 github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
index 4b0421cf9ee47908beae4b4648babb75b09ee028..f38ec5956b6400841b45ca7c44940d9312e37e9e 100644
--- a/vendor/github.com/stretchr/testify/LICENSE
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -1,6 +1,6 @@
 MIT License
 
-Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
+Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 49370eb167420ee5bf1802a92c713a31fddece60..e0364e9e7f6412dab01ad916a85a192e0476799a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -32,8 +32,7 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
 	return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
 }
 
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
 func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -93,7 +92,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
 // EqualValuesf asserts that two objects are equal or convertable to the same types
 // and equal.
 //
-//    assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+//    assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
 func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -127,7 +126,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
 
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+//    assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
 func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -161,8 +160,7 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
 	return False(t, value, append([]interface{}{msg}, args...)...)
 }
 
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
 func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -173,7 +171,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
 // Greaterf asserts that the first element is greater than the second
 //
 //    assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-//    assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+//    assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
 //    assert.Greaterf(t, "b", "a", "error message %s", "formatted")
 func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -225,7 +223,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
 //
 //  assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
-// Returns whether the assertion was successful (true) or not (false).
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
 func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -237,7 +235,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
 //
 //  assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
-// Returns whether the assertion was successful (true) or not (false).
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
 func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -245,18 +243,6 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
 	return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
 }
 
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-//  assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
-}
-
 // HTTPSuccessf asserts that a specified handler returns a success status code.
 //
 //  assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
@@ -271,7 +257,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//    assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
 func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -281,7 +267,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+// 	 assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
 func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -339,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
 	return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
@@ -353,7 +347,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
 // Lessf asserts that the first element is less than the second
 //
 //    assert.Lessf(t, 1, 2, "error message %s", "formatted")
-//    assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+//    assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
 //    assert.Lessf(t, "a", "b", "error message %s", "formatted")
 func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -375,17 +369,6 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
 	return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
 }
 
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-//    assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
-}
-
 // Nilf asserts that the specified object is nil.
 //
 //    assert.Nilf(t, err, "error message %s", "formatted")
@@ -396,15 +379,6 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool
 	return Nil(t, object, append([]interface{}{msg}, args...)...)
 }
 
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
 // NoErrorf asserts that a function returned no error (i.e. `nil`).
 //
 //   actualObj, err := SomeFunction()
@@ -418,15 +392,6 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
 	return NoError(t, err, append([]interface{}{msg}, args...)...)
 }
 
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
 // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
@@ -466,16 +431,6 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
 	return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-//    assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
-func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
 // NotNilf asserts that the specified object is not nil.
 //
 //    assert.NotNilf(t, err, "error message %s", "formatted")
@@ -498,7 +453,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//  assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
 //  assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
 func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -507,19 +462,6 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
 	return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
 }
 
-// NotSamef asserts that two pointers do not reference the same object.
-//
-//    assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
 // NotSubsetf asserts that the specified list(array, slice...) contains not all
 // elements given in the specified subset(array, slice...).
 //
@@ -549,18 +491,6 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool
 	return Panics(t, f, append([]interface{}{msg}, args...)...)
 }
 
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-//   assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
-}
-
 // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
@@ -574,7 +504,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//  assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
 //  assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
 func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -627,14 +557,6 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
 	return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
 }
 
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
 // Zerof asserts that i is the zero value for its type.
 func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 9db889427a72000b50adf767245ca1bdaf2d3bc7..26830403a9b356e2206e4c0c7e4e22ff00d14aa4 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -53,8 +53,7 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
 	return Containsf(a.t, s, contains, msg, args...)
 }
 
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
 func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -62,8 +61,7 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
 	return DirExists(a.t, path, msgAndArgs...)
 }
 
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
 func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -169,7 +167,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
 // EqualValuesf asserts that two objects are equal or convertable to the same types
 // and equal.
 //
-//    a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+//    a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
 func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -251,7 +249,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
 
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+//    a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
 func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -311,8 +309,7 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
 	return Falsef(a.t, value, msg, args...)
 }
 
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
 func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -320,8 +317,7 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
 	return FileExists(a.t, path, msgAndArgs...)
 }
 
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
 func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -370,7 +366,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
 // Greaterf asserts that the first element is greater than the second
 //
 //    a.Greaterf(2, 1, "error message %s", "formatted")
-//    a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+//    a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
 //    a.Greaterf("b", "a", "error message %s", "formatted")
 func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -447,7 +443,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
 //
 //  a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
-// Returns whether the assertion was successful (true) or not (false).
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
 func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -471,7 +467,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
 //
 //  a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
-// Returns whether the assertion was successful (true) or not (false).
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
 func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -479,30 +475,6 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
 	return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
 }
 
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-//  a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
-}
-
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-//  a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
-}
-
 // HTTPSuccess asserts that a specified handler returns a success status code.
 //
 //  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -539,7 +511,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//    a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
 func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -549,7 +521,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDelta(math.Pi, 22/7.0, 0.01)
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
 func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -591,7 +563,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+// 	 a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
 func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -667,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
 	return JSONEqf(a.t, expected, actual, msg, args...)
 }
 
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
@@ -730,7 +718,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
 // Lessf asserts that the first element is less than the second
 //
 //    a.Lessf(1, 2, "error message %s", "formatted")
-//    a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+//    a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
 //    a.Lessf("a", "b", "error message %s", "formatted")
 func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -739,28 +727,6 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
 	return Lessf(a.t, e1, e2, msg, args...)
 }
 
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-//    a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
-func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return Never(a.t, condition, waitFor, tick, msgAndArgs...)
-}
-
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-//    a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return Neverf(a.t, condition, waitFor, tick, msg, args...)
-}
-
 // Nil asserts that the specified object is nil.
 //
 //    a.Nil(err)
@@ -781,24 +747,6 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b
 	return Nilf(a.t, object, msg, args...)
 }
 
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoDirExists(a.t, path, msgAndArgs...)
-}
-
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoDirExistsf(a.t, path, msg, args...)
-}
-
 // NoError asserts that a function returned no error (i.e. `nil`).
 //
 //   actualObj, err := SomeFunction()
@@ -825,24 +773,6 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
 	return NoErrorf(a.t, err, msg, args...)
 }
 
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoFileExists(a.t, path, msgAndArgs...)
-}
-
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NoFileExistsf(a.t, path, msg, args...)
-}
-
 // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
@@ -908,26 +838,6 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
 	return NotEqual(a.t, expected, actual, msgAndArgs...)
 }
 
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-//    a.NotEqualValues(obj1, obj2)
-func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotEqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-//    a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
-func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotEqualValuesf(a.t, expected, actual, msg, args...)
-}
-
 // NotEqualf asserts that the specified values are NOT equal.
 //
 //    a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -994,7 +904,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//  a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
 //  a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1003,32 +913,6 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
 	return NotRegexpf(a.t, rx, str, msg, args...)
 }
 
-// NotSame asserts that two pointers do not reference the same object.
-//
-//    a.NotSame(ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotSame(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotSamef asserts that two pointers do not reference the same object.
-//
-//    a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return NotSamef(a.t, expected, actual, msg, args...)
-}
-
 // NotSubset asserts that the specified list(array, slice...) contains not all
 // elements given in the specified subset(array, slice...).
 //
@@ -1077,30 +961,6 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	return Panics(a.t, f, msgAndArgs...)
 }
 
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-//   a.PanicsWithError("crazy error", func(){ GoCrazy() })
-func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return PanicsWithError(a.t, errString, f, msgAndArgs...)
-}
-
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-//   a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return PanicsWithErrorf(a.t, errString, f, msg, args...)
-}
-
 // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
@@ -1146,7 +1006,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//  a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
 //  a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1243,22 +1103,6 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
 	return WithinDurationf(a.t, expected, actual, delta, msg, args...)
 }
 
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
-	if h, ok := a.t.(tHelper); ok {
-		h.Helper()
-	}
-	return YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
 // Zero asserts that i is the zero value for its type.
 func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
similarity index 62%
rename from vendor/github.com/stretchr/testify/assert/assertion_compare.go
rename to vendor/github.com/stretchr/testify/assert/assertion_order.go
index dc200395ceb700c8d5c0858abfce35223c31013e..15a486ca6e2466011a3b72d3ad8e93ca409ac4b8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -5,28 +5,20 @@ import (
 	"reflect"
 )
 
-type CompareType int
-
-const (
-	compareLess CompareType = iota - 1
-	compareEqual
-	compareGreater
-)
-
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
 	switch kind {
 	case reflect.Int:
 		{
 			intobj1 := obj1.(int)
 			intobj2 := obj2.(int)
 			if intobj1 > intobj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if intobj1 == intobj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if intobj1 < intobj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Int8:
@@ -34,13 +26,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			int8obj1 := obj1.(int8)
 			int8obj2 := obj2.(int8)
 			if int8obj1 > int8obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if int8obj1 == int8obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if int8obj1 < int8obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Int16:
@@ -48,13 +40,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			int16obj1 := obj1.(int16)
 			int16obj2 := obj2.(int16)
 			if int16obj1 > int16obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if int16obj1 == int16obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if int16obj1 < int16obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Int32:
@@ -62,13 +54,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			int32obj1 := obj1.(int32)
 			int32obj2 := obj2.(int32)
 			if int32obj1 > int32obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if int32obj1 == int32obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if int32obj1 < int32obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Int64:
@@ -76,13 +68,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			int64obj1 := obj1.(int64)
 			int64obj2 := obj2.(int64)
 			if int64obj1 > int64obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if int64obj1 == int64obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if int64obj1 < int64obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Uint:
@@ -90,13 +82,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			uintobj1 := obj1.(uint)
 			uintobj2 := obj2.(uint)
 			if uintobj1 > uintobj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if uintobj1 == uintobj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if uintobj1 < uintobj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Uint8:
@@ -104,13 +96,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			uint8obj1 := obj1.(uint8)
 			uint8obj2 := obj2.(uint8)
 			if uint8obj1 > uint8obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if uint8obj1 == uint8obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if uint8obj1 < uint8obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Uint16:
@@ -118,13 +110,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			uint16obj1 := obj1.(uint16)
 			uint16obj2 := obj2.(uint16)
 			if uint16obj1 > uint16obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if uint16obj1 == uint16obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if uint16obj1 < uint16obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Uint32:
@@ -132,13 +124,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			uint32obj1 := obj1.(uint32)
 			uint32obj2 := obj2.(uint32)
 			if uint32obj1 > uint32obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if uint32obj1 == uint32obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if uint32obj1 < uint32obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Uint64:
@@ -146,13 +138,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			uint64obj1 := obj1.(uint64)
 			uint64obj2 := obj2.(uint64)
 			if uint64obj1 > uint64obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if uint64obj1 == uint64obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if uint64obj1 < uint64obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Float32:
@@ -160,13 +152,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			float32obj1 := obj1.(float32)
 			float32obj2 := obj2.(float32)
 			if float32obj1 > float32obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if float32obj1 == float32obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if float32obj1 < float32obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.Float64:
@@ -174,13 +166,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			float64obj1 := obj1.(float64)
 			float64obj2 := obj2.(float64)
 			if float64obj1 > float64obj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if float64obj1 == float64obj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if float64obj1 < float64obj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	case reflect.String:
@@ -188,18 +180,18 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 			stringobj1 := obj1.(string)
 			stringobj2 := obj2.(string)
 			if stringobj1 > stringobj2 {
-				return compareGreater, true
+				return -1, true
 			}
 			if stringobj1 == stringobj2 {
-				return compareEqual, true
+				return 0, true
 			}
 			if stringobj1 < stringobj2 {
-				return compareLess, true
+				return 1, true
 			}
 		}
 	}
 
-	return compareEqual, false
+	return 0, false
 }
 
 // Greater asserts that the first element is greater than the second
@@ -208,7 +200,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
 //    assert.Greater(t, float64(2), float64(1))
 //    assert.Greater(t, "b", "a")
 func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != -1 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
 }
 
 // GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -218,7 +229,26 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
 //    assert.GreaterOrEqual(t, "b", "a")
 //    assert.GreaterOrEqual(t, "b", "b")
 func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != -1 && res != 0 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
 }
 
 // Less asserts that the first element is less than the second
@@ -227,7 +257,26 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
 //    assert.Less(t, float64(1), float64(2))
 //    assert.Less(t, "a", "b")
 func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != 1 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
 }
 
 // LessOrEqual asserts that the first element is less than or equal to the second
@@ -237,10 +286,6 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
 //    assert.LessOrEqual(t, "a", "b")
 //    assert.LessOrEqual(t, "b", "b")
 func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
-}
-
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
@@ -251,24 +296,14 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
 		return Fail(t, "Elements should be the same type", msgAndArgs...)
 	}
 
-	compareResult, isComparable := compare(e1, e2, e1Kind)
+	res, isComparable := compare(e1, e2, e1Kind)
 	if !isComparable {
 		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
 	}
 
-	if !containsValue(allowedComparesResults, compareResult) {
-		return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+	if res != 1 && res != 0 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
 	}
 
 	return true
 }
-
-func containsValue(values []CompareType, value CompareType) bool {
-	for _, v := range values {
-		if v == value {
-			return true
-		}
-	}
-
-	return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 914a10d83afca7aeae680dd08d7a5eb74fa6929c..044da8b01f22644b8d10443c08a2d9ef073b315c 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -11,7 +11,6 @@ import (
 	"reflect"
 	"regexp"
 	"runtime"
-	"runtime/debug"
 	"strings"
 	"time"
 	"unicode"
@@ -19,10 +18,10 @@ import (
 
 	"github.com/davecgh/go-spew/spew"
 	"github.com/pmezard/go-difflib/difflib"
-	yaml "gopkg.in/yaml.v3"
+	yaml "gopkg.in/yaml.v2"
 )
 
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
 
 // TestingT is an interface wrapper around *testing.T
 type TestingT interface {
@@ -45,7 +44,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
 // for table driven tests.
 type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
 
-// Comparison is a custom function that returns true on success and false on failure
+// Comparison a custom function that returns true on success and false on failure
 type Comparison func() (success bool)
 
 /*
@@ -104,11 +103,11 @@ the problem actually occurred in calling code.*/
 // failed.
 func CallerInfo() []string {
 
-	var pc uintptr
-	var ok bool
-	var file string
-	var line int
-	var name string
+	pc := uintptr(0)
+	file := ""
+	line := 0
+	ok := false
+	name := ""
 
 	callers := []string{}
 	for i := 0; ; i++ {
@@ -352,19 +351,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
 
 }
 
-// validateEqualArgs checks whether provided arguments can be safely used in the
-// Equal/NotEqual functions.
-func validateEqualArgs(expected, actual interface{}) error {
-	if expected == nil && actual == nil {
-		return nil
-	}
-
-	if isFunction(expected) || isFunction(actual) {
-		return errors.New("cannot take func type as argument")
-	}
-	return nil
-}
-
 // Same asserts that two pointers reference the same object.
 //
 //    assert.Same(t, ptr1, ptr2)
@@ -376,49 +362,24 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
 		h.Helper()
 	}
 
-	if !samePointers(expected, actual) {
-		return Fail(t, fmt.Sprintf("Not same: \n"+
-			"expected: %p %#v\n"+
-			"actual  : %p %#v", expected, expected, actual, actual), msgAndArgs...)
-	}
-
-	return true
-}
-
-// NotSame asserts that two pointers do not reference the same object.
-//
-//    assert.NotSame(t, ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-
-	if samePointers(expected, actual) {
-		return Fail(t, fmt.Sprintf(
-			"Expected and actual point to the same object: %p %#v",
-			expected, expected), msgAndArgs...)
+	expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual)
+	if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
+		return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
 	}
-	return true
-}
 
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
-	firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
-	if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
-		return false
+	expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
+	if expectedType != actualType {
+		return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
+			expectedType, actualType), msgAndArgs...)
 	}
 
-	firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
-	if firstType != secondType {
-		return false
+	if expected != actual {
+		return Fail(t, fmt.Sprintf("Not same: \n"+
+			"expected: %p %#v\n"+
+			"actual  : %p %#v", expected, expected, actual, actual), msgAndArgs...)
 	}
 
-	// compare pointer addresses
-	return first == second
+	return true
 }
 
 // formatUnequalValues takes two values of arbitrary types and returns string
@@ -429,27 +390,12 @@ func samePointers(first, second interface{}) bool {
 // to a type conversion in the Go grammar.
 func formatUnequalValues(expected, actual interface{}) (e string, a string) {
 	if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
-		return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
-			fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
-	}
-	switch expected.(type) {
-	case time.Duration:
-		return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+		return fmt.Sprintf("%T(%#v)", expected, expected),
+			fmt.Sprintf("%T(%#v)", actual, actual)
 	}
-	return truncatingFormat(expected), truncatingFormat(actual)
-}
 
-// truncatingFormat formats the data and truncates it if it's too long.
-//
-// This helps keep formatted error messages lines from exceeding the
-// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
-func truncatingFormat(data interface{}) string {
-	value := fmt.Sprintf("%#v", data)
-	max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
-	if len(value) > max {
-		value = value[0:max] + "<... truncated>"
-	}
-	return value
+	return fmt.Sprintf("%#v", expected),
+		fmt.Sprintf("%#v", actual)
 }
 
 // EqualValues asserts that two objects are equal or convertable to the same types
@@ -496,12 +442,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
 //
 //    assert.NotNil(t, err)
 func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	if !isNil(object) {
-		return true
-	}
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
+	if !isNil(object) {
+		return true
+	}
 	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
 }
 
@@ -542,12 +488,12 @@ func isNil(object interface{}) bool {
 //
 //    assert.Nil(t, err)
 func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	if isNil(object) {
-		return true
-	}
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
+	if isNil(object) {
+		return true
+	}
 	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
 }
 
@@ -584,11 +530,12 @@ func isEmpty(object interface{}) bool {
 //
 //  assert.Empty(t, obj)
 func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
 	pass := isEmpty(object)
 	if !pass {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
 		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
 	}
 
@@ -603,11 +550,12 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 //    assert.Equal(t, "two", obj[1])
 //  }
 func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
 	pass := !isEmpty(object)
 	if !pass {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
 		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
 	}
 
@@ -650,10 +598,16 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
 //
 //    assert.True(t, myBool)
 func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
-	if !value {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if h, ok := t.(interface {
+		Helper()
+	}); ok {
+		h.Helper()
+	}
+
+	if value != true {
 		return Fail(t, "Should be true", msgAndArgs...)
 	}
 
@@ -665,10 +619,11 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
 //
 //    assert.False(t, myBool)
 func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
-	if value {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if value != false {
 		return Fail(t, "Should be false", msgAndArgs...)
 	}
 
@@ -699,21 +654,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
 
 }
 
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-//    assert.NotEqualValues(t, obj1, obj2)
-func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-
-	if ObjectsAreEqualValues(expected, actual) {
-		return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
-	}
-
-	return true
-}
-
 // containsElement try loop over the list check if the list includes the element.
 // return (false, false) if impossible.
 // return (true, false) if element was not found.
@@ -766,10 +706,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
 
 	ok, found := includeElement(s, contains)
 	if !ok {
-		return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
 	}
 	if !found {
-		return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
 	}
 
 	return true
@@ -900,39 +840,27 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
 		return true
 	}
 
-	if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
-		return false
-	}
-
-	extraA, extraB := diffLists(listA, listB)
+	aKind := reflect.TypeOf(listA).Kind()
+	bKind := reflect.TypeOf(listB).Kind()
 
-	if len(extraA) == 0 && len(extraB) == 0 {
-		return true
+	if aKind != reflect.Array && aKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
 	}
 
-	return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
-}
-
-// isList checks that the provided value is array or slice.
-func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
-	kind := reflect.TypeOf(list).Kind()
-	if kind != reflect.Array && kind != reflect.Slice {
-		return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
-			msgAndArgs...)
+	if bKind != reflect.Array && bKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
 	}
-	return true
-}
 
-// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
-// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
-// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
-func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
 	aValue := reflect.ValueOf(listA)
 	bValue := reflect.ValueOf(listB)
 
 	aLen := aValue.Len()
 	bLen := bValue.Len()
 
+	if aLen != bLen {
+		return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+	}
+
 	// Mark indexes in bValue that we already used
 	visited := make([]bool, bLen)
 	for i := 0; i < aLen; i++ {
@@ -949,38 +877,11 @@ func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
 			}
 		}
 		if !found {
-			extraA = append(extraA, element)
-		}
-	}
-
-	for j := 0; j < bLen; j++ {
-		if visited[j] {
-			continue
+			return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
 		}
-		extraB = append(extraB, bValue.Index(j).Interface())
 	}
 
-	return
-}
-
-func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
-	var msg bytes.Buffer
-
-	msg.WriteString("elements differ")
-	if len(extraA) > 0 {
-		msg.WriteString("\n\nextra elements in list A:\n")
-		msg.WriteString(spewConfig.Sdump(extraA))
-	}
-	if len(extraB) > 0 {
-		msg.WriteString("\n\nextra elements in list B:\n")
-		msg.WriteString(spewConfig.Sdump(extraB))
-	}
-	msg.WriteString("\n\nlistA:\n")
-	msg.WriteString(spewConfig.Sdump(listA))
-	msg.WriteString("\n\nlistB:\n")
-	msg.WriteString(spewConfig.Sdump(listB))
-
-	return msg.String()
+	return true
 }
 
 // Condition uses a Comparison to assert a complex condition.
@@ -1000,17 +901,15 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
 type PanicTestFunc func()
 
 // didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}, string) {
+func didPanic(f PanicTestFunc) (bool, interface{}) {
 
 	didPanic := false
 	var message interface{}
-	var stack string
 	func() {
 
 		defer func() {
 			if message = recover(); message != nil {
 				didPanic = true
-				stack = string(debug.Stack())
 			}
 		}()
 
@@ -1019,7 +918,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}, string) {
 
 	}()
 
-	return didPanic, message, stack
+	return didPanic, message
 
 }
 
@@ -1031,7 +930,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 		h.Helper()
 	}
 
-	if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
+	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
 		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
 	}
 
@@ -1047,34 +946,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr
 		h.Helper()
 	}
 
-	funcDidPanic, panicValue, panickedStack := didPanic(f)
+	funcDidPanic, panicValue := didPanic(f)
 	if !funcDidPanic {
 		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
 	}
 	if panicValue != expected {
-		return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
-	}
-
-	return true
-}
-
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-//   assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
-func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-
-	funcDidPanic, panicValue, panickedStack := didPanic(f)
-	if !funcDidPanic {
-		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
-	}
-	panicErr, ok := panicValue.(error)
-	if !ok || panicErr.Error() != errString {
-		return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...)
 	}
 
 	return true
@@ -1088,8 +965,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 		h.Helper()
 	}
 
-	if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
-		return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
+	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...)
 	}
 
 	return true
@@ -1116,8 +993,6 @@ func toFloat(x interface{}) (float64, bool) {
 	xok := true
 
 	switch xn := x.(type) {
-	case uint:
-		xf = float64(xn)
 	case uint8:
 		xf = float64(xn)
 	case uint16:
@@ -1139,7 +1014,7 @@ func toFloat(x interface{}) (float64, bool) {
 	case float32:
 		xf = float64(xn)
 	case float64:
-		xf = xn
+		xf = float64(xn)
 	case time.Duration:
 		xf = float64(xn)
 	default:
@@ -1151,7 +1026,7 @@ func toFloat(x interface{}) (float64, bool) {
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
 func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1253,9 +1128,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
 	if !aok {
 		return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
 	}
-	if math.IsNaN(af) {
-		return 0, errors.New("expected value must not be NaN")
-	}
 	if af == 0 {
 		return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
 	}
@@ -1263,9 +1135,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
 	if !bok {
 		return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
 	}
-	if math.IsNaN(bf) {
-		return 0, errors.New("actual value must not be NaN")
-	}
 
 	return math.Abs(af-bf) / math.Abs(af), nil
 }
@@ -1275,9 +1144,6 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	if math.IsNaN(epsilon) {
-		return Fail(t, "epsilon must not be NaN")
-	}
 	actualEpsilon, err := calcRelativeError(expected, actual)
 	if err != nil {
 		return Fail(t, err.Error(), msgAndArgs...)
@@ -1325,10 +1191,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
 //	   assert.Equal(t, expectedObj, actualObj)
 //   }
 func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
 	if err != nil {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
 		return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
 	}
 
@@ -1342,10 +1208,11 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
 //	   assert.Equal(t, expectedError, err)
 //   }
 func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
 	if err == nil {
-		if h, ok := t.(tHelper); ok {
-			h.Helper()
-		}
 		return Fail(t, "An error is expected but got nil.", msgAndArgs...)
 	}
 
@@ -1447,8 +1314,7 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
 	return true
 }
 
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
 func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1466,24 +1332,7 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
 	return true
 }
 
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	info, err := os.Lstat(path)
-	if err != nil {
-		return true
-	}
-	if info.IsDir() {
-		return true
-	}
-	return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
-}
-
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
 func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1501,25 +1350,6 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
 	return true
 }
 
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	info, err := os.Lstat(path)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return true
-		}
-		return true
-	}
-	if !info.IsDir() {
-		return true
-	}
-	return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
-}
-
 // JSONEq asserts that two JSON strings are equivalent.
 //
 //  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@@ -1609,6 +1439,15 @@ func diff(expected interface{}, actual interface{}) string {
 	return "\n\nDiff:\n" + diff
 }
 
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+	if isFunction(expected) || isFunction(actual) {
+		return errors.New("cannot take func type as argument")
+	}
+	return nil
+}
+
 func isFunction(arg interface{}) bool {
 	if arg == nil {
 		return false
@@ -1621,7 +1460,6 @@ var spewConfig = spew.ConfigState{
 	DisablePointerAddresses: true,
 	DisableCapacities:       true,
 	SortKeys:                true,
-	DisableMethods:          true,
 }
 
 type tHelper interface {
@@ -1637,59 +1475,24 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
 		h.Helper()
 	}
 
-	ch := make(chan bool, 1)
-
 	timer := time.NewTimer(waitFor)
-	defer timer.Stop()
-
 	ticker := time.NewTicker(tick)
+	checkPassed := make(chan bool)
+	defer timer.Stop()
 	defer ticker.Stop()
-
-	for tick := ticker.C; ; {
+	defer close(checkPassed)
+	for {
 		select {
 		case <-timer.C:
 			return Fail(t, "Condition never satisfied", msgAndArgs...)
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
-		case v := <-ch:
-			if v {
+		case result := <-checkPassed:
+			if result {
 				return true
 			}
-			tick = ticker.C
-		}
-	}
-}
-
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-//    assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
-func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-
-	ch := make(chan bool, 1)
-
-	timer := time.NewTimer(waitFor)
-	defer timer.Stop()
-
-	ticker := time.NewTicker(tick)
-	defer ticker.Stop()
-
-	for tick := ticker.C; ; {
-		select {
-		case <-timer.C:
-			return true
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
-		case v := <-ch:
-			if v {
-				return Fail(t, "Condition satisfied", msgAndArgs...)
-			}
-			tick = ticker.C
+		case <-ticker.C:
+			go func() {
+				checkPassed <- condition()
+			}()
 		}
 	}
 }
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
index df189d2348f17a3d16888e2581d2a3b7a9d47e93..9ad56851d9714967b6071772ff44182a1e86e431 100644
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
 	}
 }
 
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 4ed341dd28934c102aa7a40c74ee24b6555c1db1..df46fa777acb3b8bc6c58371d8c48375a6a623de 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -33,6 +33,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
 		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
 	}
 
 	isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
@@ -55,6 +56,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
 		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
 	}
 
 	isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
@@ -77,6 +79,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
 		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
 	}
 
 	isErrorCode := code >= http.StatusBadRequest
@@ -87,28 +90,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
 	return isErrorCode
 }
 
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-//  assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
-	if h, ok := t.(tHelper); ok {
-		h.Helper()
-	}
-	code, err := httpCode(handler, method, url, values)
-	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
-	}
-
-	successful := code == statuscode
-	if !successful {
-		Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
-	}
-
-	return successful
-}
-
 // HTTPBody is a helper that returns HTTP body of the response. It returns
 // empty string if building a new request fails.
 func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..273490790955c1af9ce4a4f5300cf194f69a0476
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitignore
@@ -0,0 +1,15 @@
+*.out
+*.test
+*.xml
+*.swp
+.idea/
+.tmp/
+*.iml
+*.cov
+*.html
+*.log
+gen/thrift/js
+gen/thrift/py
+vendor/
+crossdock-main
+crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..295ebcf62207d1e2780105bdf60ad4aef8e15244
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "idl"]
+	path = idl
+	url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e81cc8805692f23f0219130c1eb753a32fb7a064
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -0,0 +1,56 @@
+sudo: required
+
+language: go
+go_import_path: github.com/uber/jaeger-client-go
+
+dist: trusty
+
+matrix:
+  include:
+  - go: 1.13.x
+    env:
+    - TESTS=true
+    - USE_DEP=true
+    - COVERAGE=true
+  - go: 1.13.x
+    env:
+    - USE_DEP=true
+    - CROSSDOCK=true
+  - go: 1.13.x
+    env:
+    - TESTS=true
+    - USE_DEP=false
+    - USE_GLIDE=true
+  # test with previous version of Go
+  - go: 1.12.x
+    env:
+    - TESTS=true
+    - USE_DEP=true
+    - CI_SKIP_LINT=true
+
+services:
+  - docker
+
+env:
+  global:
+    - DOCKER_COMPOSE_VERSION=1.8.0
+    - COMMIT=${TRAVIS_COMMIT::8}
+    # DOCKER_PASS
+    - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
+    # DOCKER_USER
+    - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
+
+install:
+  - make install-ci USE_DEP=$USE_DEP
+  - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
+
+script:
+  - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
+  - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
+
+after_success:
+  - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
+  - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
+
+after_failure:
+  - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..c77d161780a6c284b72d960be06289e5e4d65026
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -0,0 +1,325 @@
+Changes by Version
+==================
+
+2.23.0 (2020-04-22)
+-------------------
+
+- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
+- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
+- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
+
+2.22.1 (2020-01-16)
+-------------------
+
+- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
+
+
+2.22.0 (2020-01-15)
+-------------------
+
+- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
+- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
+
+
+2.21.1 (2019-12-20)
+-------------------
+
+- Update version correctly.
+
+
+2.21.0 (2019-12-20)
+-------------------
+
+- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
+- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
+- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
+- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
+
+
+2.20.1 (2019-11-08)
+-------------------
+
+Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
+
+- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
+- Create `OperationNameLateBinding` sampler option and config option
+- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
+
+
+2.20.0 (2019-11-06)
+-------------------
+
+## New Features
+
+- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
+
+  Sampling state is shared between all spans of the trace that are still in memory.
+  This allows implementation of delayed sampling decisions (see below).
+
+- Support delayed sampling decisions (#449) -- Yuri Shkuro
+
+  This is a large structural change to how the samplers work.
+  It allows some samplers to be executed multiple times on different
+  span events (like setting a tag) and make a positive sampling decision
+  later in the span life cycle, or even based on children spans.
+  See [README](./README.md#delayed-sampling) for more details.
+
+  There is a related minor change in behavior of the adaptive (per-operation) sampler,
+  which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
+  operation used to make the sampling decision is always the one provided at span creation.
+
+- Add experimental tag matching sampler (#452) -- Yuri Shkuro
+
+  A sampler that can sample a trace based on a certain tag added to the root
+  span or one of its local (in-process) children. The sampler can be used with
+  another experimental `PrioritySampler` that allows multiple samplers to try
+  to make a sampling decision, in a certain priority order.
+
+- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
+- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
+
+## Minor patches
+
+- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
+- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
+- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
+- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
+
+2.19.0 (2019-09-23)
+-------------------
+
+- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
+
+
+2.18.1 (2019-09-16)
+-------------------
+
+- Remove go.mod / go.sum that interfere with `go get` (#432)
+
+
+2.18.0 (2019-09-09)
+-------------------
+
+- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) 
+
+
+2.17.0 (2019-08-30)
+-------------------
+
+- Add a flag for firehose mode (#419) 
+- Default sampling server URL to agent (#414) 
+- Update default sampling rate when sampling strategy is refreshed (#413) 
+- Support "Self" Span Reference (#411) 
+- Don't complain about blank service name if tracing is Disabled (#410) Yuri 
+- Use IP address from tag if exist (#402) 
+- Expose span data to custom reporters [fixes #394] (#399) 
+- Fix the span allocation in the pool (#381) 
+
+
+2.16.0 (2019-03-24)
+-------------------
+
+- Add baggage to B3 codec (#319) 
+- Add support for 128bit trace ids to zipkin thrift spans. (#378) 
+- Update zipkin propagation logic to support 128bit traceIDs (#373) 
+- Accept "true" for the x-b3-sampled header (#356) 
+
+- Allow setting of PoolSpans from Config object (#322) 
+- Make propagators public to allow wrapping (#379) 
+- Change default metric namespace to use relevant separator for the metric backend (#364) 
+- Change metrics prefix to jaeger_tracer and add descriptions (#346) 
+- Bump OpenTracing to ^1.1.x (#383) 
+- Upgrade jaeger-lib to v2.0.0 (#359) 
+- Avoid defer when generating random number (#358) 
+- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) 
+- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) 
+
+
+2.15.0 (2018-10-10)
+-------------------
+
+- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) 
+- Make maximum annotation length configurable in tracer options (#318) 
+- Support more environment variables in configuration (#323) 
+- Print error on Sampler Query failure (#328) 
+- Add an HTTPOption to support custom http.RoundTripper (#333) 
+- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) 
+
+
+2.14.0 (2018-04-30)
+-------------------
+
+- Support throttling for debug traces (#274) 
+- Remove dependency on Apache Thrift (#303) 
+- Remove dependency on tchannel  (#295) (#294) 
+- Test with Go 1.9 (#298) 
+
+
+2.13.0 (2018-04-15)
+-------------------
+
+- Use value receiver for config.NewTracer() (#283) 
+- Lock span during jaeger thrift conversion (#273) 
+- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260)  
+- Added support for client configuration via env vars (#275) 
+- Allow overriding sampler in the Config (#270) 
+
+
+2.12.0 (2018-03-14)
+-------------------
+
+- Use lock when retrieving span.Context() (#268)
+- Add Configuration support for custom Injector and Extractor (#263) 
+
+
+2.11.2 (2018-01-12)
+-------------------
+
+- Add Gopkg.toml to allow using the lib with `dep`
+
+
+2.11.1 (2018-01-03)
+-------------------
+
+- Do not enqueue spans after Reporter is closed (#235, #245)
+- Change default flush interval to 1sec (#243)
+
+
+2.11.0 (2017-11-27)
+-------------------
+
+- Normalize metric names and tags to be compatible with Prometheus (#222)
+
+
+2.10.0 (2017-11-14)
+-------------------
+
+- Support custom tracing headers (#176)
+- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
+- Do not coerce baggage keys to lower case (#196)
+- Log span name when span cannot be reported (#198)
+- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
+
+
+2.9.0 (2017-07-29)
+------------------
+
+- Pin thrift <= 0.10 (#179)
+- Introduce a parallel interface ContribObserver (#159)
+
+
+2.8.0 (2017-07-05)
+------------------
+
+- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
+- Add options to set tracer tags
+
+
+2.7.0 (2017-06-21)
+------------------
+
+- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
+- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
+- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
+- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
+- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
+
+
+2.6.0 (2017-03-28)
+------------------
+
+- Add config option to initialize RPC Metrics feature
+
+
+2.5.0 (2017-03-23)
+------------------
+
+- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
+- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
+- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
+
+
+2.4.0 (2017-03-21)
+------------------
+
+- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
+- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
+
+
+2.3.0 (2017-03-20)
+------------------
+
+- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
+- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
+
+
+2.2.1 (2017-03-14)
+------------------
+
+- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
+
+
+2.2.0 (2017-03-10)
+------------------
+
+- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
+- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
+
+
+2.1.2 (2017-02-27)
+-------------------
+
+- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
+- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
+- Add tracer initialization godoc examples
+
+
+2.1.1 (2017-02-21)
+-------------------
+
+- Fix inefficient usage of zap.Logger
+
+
+2.1.0 (2017-02-17)
+-------------------
+
+- Add adapter for zap.Logger (https://github.com/uber-go/zap)
+- Move logging API to ./log/ package
+
+
+2.0.0 (2017-02-08)
+-------------------
+
+- Support Adaptive Sampling
+- Support 128bit Trace IDs
+- Change trace/span IDs from uint64 to strong types TraceID and SpanID
+- Add Zipkin HTTP B3 Propagation format support #72
+- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
+- Change API for tracer, reporter, sampler initialization
+
+
+1.6.0 (2016-10-14)
+-------------------
+
+- Add Zipkin HTTP transport
+- Support external baggage via jaeger-baggage header
+- Unpin Thrift version, keep to master
+
+
+1.5.1 (2016-09-27)
+-------------------
+
+- Relax dependency on opentracing to ^1
+
+
+1.5.0 (2016-09-27)
+-------------------
+
+- Upgrade to opentracing-go 1.0
+- Support KV logging for Spans
+
+
+1.4.0 (2016-09-14)
+-------------------
+
+- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..41e2154cf6b6b89c09ddc228b2bd3a2de0faade7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
@@ -0,0 +1,170 @@
+# How to Contribute to Jaeger
+
+We'd love your help!
+
+Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
+pull requests. This document outlines some of the conventions on development
+workflow, commit message formatting, contact points and other resources to make
+it easier to get your contribution accepted.
+
+We gratefully welcome improvements to documentation as well as to code.
+
+# Certificate of Origin
+
+By contributing to this project you agree to the [Developer Certificate of
+Origin](https://developercertificate.org/) (DCO). This document was created
+by the Linux Kernel community and is a simple statement that you, as a
+contributor, have the legal right to make the contribution. See the [DCO](DCO)
+file for details.
+
+## Getting Started
+
+This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
+
+To get started, make sure you clone the Git repository into the correct location
+`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
+
+```
+mkdir -p $GOPATH/src/github.com/uber
+cd $GOPATH/src/github.com/uber
+git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
+cd jaeger-client-go
+git submodule update --init --recursive
+```
+
+Then install dependencies and run the tests:
+
+```
+make install
+make test
+```
+
+## Imports grouping
+
+This projects follows the following pattern for grouping imports in Go files:
+  * imports from standard library
+  * imports from other projects
+  * imports from `jaeger-client-go` project
+
+For example:
+
+```go
+import (
+	"fmt"
+
+	"github.com/uber/jaeger-lib/metrics"
+	"go.uber.org/zap"
+
+	"github.com/uber/jaeger-client-go/config"
+)
+```
+
+## Making A Change
+
+*Before making any significant changes, please [open an
+issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
+changes ahead of time will make the contribution process smooth for everyone.
+
+Once we've discussed your changes and you've got your code ready, make sure
+that tests are passing (`make test` or `make cover`) and open your PR. Your
+pull request is most likely to be accepted if it:
+
+* Includes tests for new functionality.
+* Follows the guidelines in [Effective
+  Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
+  review comments](https://github.com/golang/go/wiki/CodeReviewComments).
+* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
+   * Separate subject from body with a blank line
+   * Limit the subject line to 50 characters
+   * Capitalize the subject line
+   * Do not end the subject line with a period
+   * Use the imperative mood in the subject line
+   * Wrap the body at 72 characters
+   * Use the body to explain _what_ and _why_ instead of _how_
+* Each commit must be signed by the author ([see below](#sign-your-work)).
+
+## License
+
+By contributing your code, you agree to license your contribution under the terms
+of the [Apache License](LICENSE).
+
+If you are adding a new file it should have a header like below.  The easiest
+way to add such header is to run `make fmt`.
+
+```
+// Copyright (c) 2017 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+```
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the
+patch, which certifies that you wrote it or otherwise have the right to
+pass it on as an open-source patch.  The rules are pretty simple: if you
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+    Signed-off-by: Joe Smith 
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+If you want this to be automatic you can set up some aliases:
+
+```
+git config --add alias.amend "commit -s --amend"
+git config --add alias.c "commit -s"
+```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
new file mode 100644
index 0000000000000000000000000000000000000000..068953d4bd988db39cd869b35f662b80bcb21305
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/DCO
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
+
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
new file mode 100644
index 0000000000000000000000000000000000000000..2a5215a5008ba2c074e6df0d8e3e0a843e604f52
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -0,0 +1,328 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
+  name = "github.com/BurntSushi/toml"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
+  version = "v0.3.1"
+
+[[projects]]
+  digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+  name = "github.com/beorn7/perks"
+  packages = ["quantile"]
+  pruneopts = "UT"
+  revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
+  version = "v1.0.1"
+
+[[projects]]
+  branch = "master"
+  digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
+  name = "github.com/codahale/hdrhistogram"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
+
+[[projects]]
+  branch = "master"
+  digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
+  name = "github.com/crossdock/crossdock-go"
+  packages = [
+    ".",
+    "assert",
+    "require",
+  ]
+  pruneopts = "UT"
+  revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
+
+[[projects]]
+  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  pruneopts = "UT"
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
+
+[[projects]]
+  digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
+  name = "github.com/golang/mock"
+  packages = ["gomock"]
+  pruneopts = "UT"
+  revision = "3a35fb6e3e18b9dbfee291262260dee7372d2a92"
+  version = "v1.4.3"
+
+[[projects]]
+  digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
+  name = "github.com/golang/protobuf"
+  packages = ["proto"]
+  pruneopts = "UT"
+  revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
+  version = "v1.3.2"
+
+[[projects]]
+  digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+  name = "github.com/matttproud/golang_protobuf_extensions"
+  packages = ["pbutil"]
+  pruneopts = "UT"
+  revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+  version = "v1.0.1"
+
+[[projects]]
+  digest = "1:727b8f567a30d0739d6c26b9472b3422b351c93cf62095164c845a54b16fc18e"
+  name = "github.com/opentracing/opentracing-go"
+  packages = [
+    ".",
+    "ext",
+    "harness",
+    "log",
+  ]
+  pruneopts = "UT"
+  revision = "659c90643e714681897ec2521c60567dd21da733"
+  version = "v1.1.0"
+
+[[projects]]
+  digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
+  name = "github.com/pkg/errors"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
+  version = "v0.8.1"
+
+[[projects]]
+  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  pruneopts = "UT"
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
+  name = "github.com/prometheus/client_golang"
+  packages = [
+    "prometheus",
+    "prometheus/internal",
+  ]
+  pruneopts = "UT"
+  revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
+  version = "v1.1.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+  name = "github.com/prometheus/client_model"
+  packages = ["go"]
+  pruneopts = "UT"
+  revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016"
+
+[[projects]]
+  digest = "1:f119e3205d3a1f0f19dbd7038eb37528e2c6f0933269dc344e305951fb87d632"
+  name = "github.com/prometheus/common"
+  packages = [
+    "expfmt",
+    "internal/bitbucket.org/ww/goautoneg",
+    "model",
+  ]
+  pruneopts = "UT"
+  revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
+  version = "v0.7.0"
+
+[[projects]]
+  digest = "1:a210815b437763623ecca8eb91e6a0bf4f2d6773c5a6c9aec0e28f19e5fd6deb"
+  name = "github.com/prometheus/procfs"
+  packages = [
+    ".",
+    "internal/fs",
+    "internal/util",
+  ]
+  pruneopts = "UT"
+  revision = "499c85531f756d1129edd26485a5f73871eeb308"
+  version = "v0.0.5"
+
+[[projects]]
+  digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a"
+  name = "github.com/stretchr/testify"
+  packages = [
+    "assert",
+    "require",
+    "suite",
+  ]
+  pruneopts = "UT"
+  revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
+  version = "v1.4.0"
+
+[[projects]]
+  digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
+  name = "github.com/uber/jaeger-lib"
+  packages = [
+    "metrics",
+    "metrics/metricstest",
+    "metrics/prometheus",
+  ]
+  pruneopts = "UT"
+  revision = "a87ae9d84fb038a8d79266298970720be7c80fcd"
+  version = "v2.2.0"
+
+[[projects]]
+  digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
+  name = "go.uber.org/atomic"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
+  version = "v1.5.0"
+
+[[projects]]
+  digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
+  name = "go.uber.org/multierr"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
+  version = "v1.3.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
+  name = "go.uber.org/tools"
+  packages = ["update-license"]
+  pruneopts = "UT"
+  revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
+
+[[projects]]
+  digest = "1:98a70115729234dc73ee7bb83973cb39cb8fedf278d17df77264382bad0183ec"
+  name = "go.uber.org/zap"
+  packages = [
+    ".",
+    "buffer",
+    "internal/bufferpool",
+    "internal/color",
+    "internal/exit",
+    "zapcore",
+    "zaptest/observer",
+  ]
+  pruneopts = "UT"
+  revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
+  version = "v1.12.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
+  name = "golang.org/x/lint"
+  packages = [
+    ".",
+    "golint",
+  ]
+  pruneopts = "UT"
+  revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
+
+[[projects]]
+  branch = "master"
+  digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
+  name = "golang.org/x/net"
+  packages = [
+    "context",
+    "context/ctxhttp",
+  ]
+  pruneopts = "UT"
+  revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
+
+[[projects]]
+  branch = "master"
+  digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
+  name = "golang.org/x/sys"
+  packages = ["windows"]
+  pruneopts = "UT"
+  revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
+
+[[projects]]
+  branch = "master"
+  digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
+  name = "golang.org/x/tools"
+  packages = [
+    "go/analysis",
+    "go/analysis/passes/inspect",
+    "go/ast/astutil",
+    "go/ast/inspector",
+    "go/buildutil",
+    "go/gcexportdata",
+    "go/internal/gcimporter",
+    "go/internal/packagesdriver",
+    "go/packages",
+    "go/types/objectpath",
+    "go/types/typeutil",
+    "internal/fastwalk",
+    "internal/gopathwalk",
+    "internal/semver",
+    "internal/span",
+  ]
+  pruneopts = "UT"
+  revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
+
+[[projects]]
+  digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
+  name = "gopkg.in/yaml.v2"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
+  version = "v2.2.4"
+
+[[projects]]
+  digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
+  name = "honnef.co/go/tools"
+  packages = [
+    "arg",
+    "cmd/staticcheck",
+    "config",
+    "deprecated",
+    "facts",
+    "functions",
+    "go/types/typeutil",
+    "internal/cache",
+    "internal/passes/buildssa",
+    "internal/renameio",
+    "internal/sharedcheck",
+    "lint",
+    "lint/lintdsl",
+    "lint/lintutil",
+    "lint/lintutil/format",
+    "loader",
+    "printf",
+    "simple",
+    "ssa",
+    "ssautil",
+    "staticcheck",
+    "staticcheck/vrp",
+    "stylecheck",
+    "unused",
+    "version",
+  ]
+  pruneopts = "UT"
+  revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
+  version = "2019.2.3"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  input-imports = [
+    "github.com/crossdock/crossdock-go",
+    "github.com/golang/mock/gomock",
+    "github.com/opentracing/opentracing-go",
+    "github.com/opentracing/opentracing-go/ext",
+    "github.com/opentracing/opentracing-go/harness",
+    "github.com/opentracing/opentracing-go/log",
+    "github.com/pkg/errors",
+    "github.com/prometheus/client_golang/prometheus",
+    "github.com/stretchr/testify/assert",
+    "github.com/stretchr/testify/require",
+    "github.com/stretchr/testify/suite",
+    "github.com/uber/jaeger-lib/metrics",
+    "github.com/uber/jaeger-lib/metrics/metricstest",
+    "github.com/uber/jaeger-lib/metrics/prometheus",
+    "go.uber.org/atomic",
+    "go.uber.org/zap",
+    "go.uber.org/zap/zapcore",
+    "go.uber.org/zap/zaptest/observer",
+  ]
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
new file mode 100644
index 0000000000000000000000000000000000000000..1fed7f8146a3f19f341d780f401fce2df2e9def7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -0,0 +1,31 @@
+[[constraint]]
+  name = "github.com/crossdock/crossdock-go"
+  branch = "master"
+
+[[constraint]]
+  name = "github.com/opentracing/opentracing-go"
+  version = "^1.1"
+
+[[constraint]]
+  name = "github.com/prometheus/client_golang"
+  version = "^1"
+
+[[constraint]]
+  name = "github.com/stretchr/testify"
+  version = "^1.1.3"
+
+[[constraint]]
+  name = "go.uber.org/atomic"
+  version = "^1"
+
+[[constraint]]
+  name = "github.com/uber/jaeger-lib"
+  version = "^2.2"
+
+[[constraint]]
+  name = "go.uber.org/zap"
+  version = "^1"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d5e962ccff662d2e8aa06e548ba77f0a6b1c8a0a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -0,0 +1,134 @@
+PROJECT_ROOT=github.com/uber/jaeger-client-go
+PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+# all .go files that don't exist in hidden directories
+ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
+        -e ".*/\..*" \
+        -e ".*/_.*" \
+        -e ".*/mocks.*")
+
+USE_DEP := true
+
+-include crossdock/rules.mk
+
+RACE=-race
+GOTEST=go test -v $(RACE)
+GOLINT=golint
+GOVET=go vet
+GOFMT=gofmt
+FMT_LOG=fmt.log
+LINT_LOG=lint.log
+
+THRIFT_VER=0.9.3
+THRIFT_IMG=thrift:$(THRIFT_VER)
+THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
+THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
+THRIFT_GEN_DIR=thrift-gen
+
+PASS=$(shell printf "\033[32mPASS\033[0m")
+FAIL=$(shell printf "\033[31mFAIL\033[0m")
+COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
+
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test fmt lint
+
+.PHONY: test
+test:
+ifeq ($(USE_DEP),true)
+	dep check
+endif
+	bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
+
+.PHONY: fmt
+fmt:
+	$(GOFMT) -e -s -l -w $(ALL_SRC)
+	./scripts/updateLicenses.sh
+
+.PHONY: lint
+lint:
+	$(GOVET) $(PACKAGES)
+	@cat /dev/null > $(LINT_LOG)
+	@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
+	@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
+	@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
+	./scripts/updateLicenses.sh >> $(FMT_LOG)
+	@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
+
+
+.PHONY: install
+install:
+	@echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
+ifeq ($(USE_DEP),true)
+	dep version || make install-dep
+	dep ensure
+endif
+ifeq ($(USE_GLIDE),true)
+	glide --version || go get github.com/Masterminds/glide
+	glide install
+endif
+
+
+.PHONY: cover
+cover:
+	$(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
+
+.PHONY: cover-html
+cover-html: cover
+	go tool cover -html=cover.out -o cover.html
+
+# This is not part of the regular test target because we don't want to slow it
+# down.
+.PHONY: test-examples
+test-examples:
+	make -C examples
+
+.PHONY: thrift
+thrift: idl-submodule thrift-compile
+
+# TODO at the moment we're not generating tchan_*.go files
+.PHONY: thrift-compile
+thrift-compile: thrift-image
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
+	sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
+	sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
+	sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
+		$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
+	rm -rf thrift-gen/*/*-remote
+	rm -rf crossdock/thrift/*/*-remote
+	rm -rf thrift-gen/jaeger/collector.go
+
+.PHONY: idl-submodule
+idl-submodule:
+	git submodule init
+	git submodule update
+
+.PHONY: thrift-image
+thrift-image:
+	$(THRIFT) -version
+
+.PHONY: install-dep
+install-dep:
+	- curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
+	- chmod +x $$GOPATH/bin/dep
+
+.PHONY: install-ci
+install-ci: install
+	go get github.com/wadey/gocovmerge
+	go get github.com/mattn/goveralls
+	go get golang.org/x/tools/cmd/cover
+	go get golang.org/x/lint/golint
+
+.PHONY: test-ci
+test-ci: cover
+ifeq ($(CI_SKIP_LINT),true)
+	echo 'skipping lint'
+else
+	make lint
+endif
+
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c348e73a7471a0a5f33fdbc6d248a1df1121eec
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -0,0 +1,318 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+
+# Jaeger Bindings for Go OpenTracing API
+
+Instrumentation library that implements an
+[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
+
+**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
+  * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
+  * :x: `import "github.com/jaegertracing/jaeger-client-go"`
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## Installation
+
+We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
+and [semantic versioning](http://semver.org/) when including this library into an application.
+For example, Jaeger backend imports this library like this:
+
+```toml
+[[constraint]]
+  name = "github.com/uber/jaeger-client-go"
+  version = "2.17"
+```
+
+If you instead want to use the latest version in `master`, you can pull it via `go get`.
+Note that during `go get` you may see build errors due to incompatible dependencies, which is why
+we recommend using semantic versions for dependencies.  The error  may be fixed by running
+`make install` (it will install `dep` if you don't have it):
+
+```shell
+go get -u github.com/uber/jaeger-client-go/
+cd $GOPATH/src/github.com/uber/jaeger-client-go/
+git submodule update --init --recursive
+make install
+```
+
+## Initialization
+
+See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+and [config/example_test.go](./config/example_test.go).
+
+### Environment variables
+
+The tracer can be initialized with values coming from environment variables. None of the env vars are required
+and all of them can be overridden via direct setting of the property on the configuration object.
+
+Property| Description
+--- | ---
+JAEGER_SERVICE_NAME | The service name
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits])
+JAEGER_SAMPLER_TYPE | The sampler type
+JAEGER_SAMPLER_PARAM | The sampler parameter (number)
+JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits])
+JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
+JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
+JAEGER_RPC_METRICS | Whether to store RPC metrics
+
+By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
+`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
+to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
+secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
+variables.
+
+### Closing the tracer via `io.Closer`
+
+The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
+It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
+before exiting, e.g.
+
+```go
+tracer, closer, err := cfg.NewTracer(...)
+defer closer.Close()
+```
+
+This is especially useful for command-line tools that enable tracing, as well as
+for the long-running apps that support graceful shutdown. For example, if your deployment
+system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
+exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
+
+### Metrics & Monitoring
+
+The tracer emits a number of different metrics, defined in
+[metrics.go](metrics.go). The monitoring backend is expected to support
+tag-based metric names, e.g. instead of `statsd`-style string names
+like `counters.my-service.jaeger.spans.started.sampled`, the metrics
+are defined by a short name and a collection of key/value tags, for
+example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
+file for the full list and descriptions of emitted metrics.
+
+The monitoring backend is represented by the `metrics.Factory` interface from package
+[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics).  An implementation
+of that interface can be passed as an option to either the Configuration object or the Tracer
+constructor, for example:
+
+```go
+import (
+    "github.com/uber/jaeger-client-go/config"
+    "github.com/uber/jaeger-lib/metrics/prometheus"
+)
+
+    metricsFactory := prometheus.New()
+    tracer, closer, err := config.Configuration{
+        ServiceName: "your-service-name",
+    }.NewTracer(
+        config.Metrics(metricsFactory),
+    )
+```
+
+By default, a no-op `metrics.NullFactory` is used.
+
+### Logging
+
+The tracer can be configured with an optional logger, which will be
+used to log communication errors, or log spans if a logging reporter
+option is specified in the configuration. The logging API is abstracted
+by the [Logger](logger.go) interface. A logger instance implementing
+this interface can be set on the `Config` object before calling the
+`New` method.
+
+Besides the [zap](https://github.com/uber-go/zap) implementation
+bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
+one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
+
+## Instrumentation for Tracing
+
+Since this tracer is fully compliant with OpenTracing API 1.0,
+all code instrumentation should only use the API itself, as described
+in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
+
+## Features
+
+### Reporters
+
+A "reporter" is a component that receives the finished spans and reports
+them to somewhere. Under normal circumstances, the Tracer
+should use the default `RemoteReporter`, which sends the spans out of
+process via configurable "transport". For testing purposes, one can
+use an `InMemoryReporter` that accumulates spans in a buffer and
+allows to retrieve them for later verification. Also available are
+`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
+which logs all finished spans using their `String()` method, and a
+`CompositeReporter` that can be used to combine more than one reporter
+into one, e.g. to attach a logging reporter to the main remote reporter.
+
+### Span Reporting Transports
+
+The remote reporter uses "transports" to actually send the spans out
+of process. Currently the supported transports include:
+  * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
+  * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
+
+### Sampling
+
+The tracer does not record all spans, but only those that have the
+sampling bit set in the `flags`. When a new trace is started and a new
+unique ID is generated, a sampling decision is made whether this trace
+should be sampled. The sampling decision is propagated to all downstream
+calls via the `flags` field of the trace context. The following samplers
+are available:
+  1. `RemotelyControlledSampler` uses one of the other simpler samplers
+     and periodically updates it by polling an external server. This
+     allows dynamic control of the sampling strategies.
+  1. `ConstSampler` always makes the same sampling decision for all
+     trace IDs. it can be configured to either sample all traces, or
+     to sample none.
+  1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
+     for a given trace to be sampled. The actual decision is made by
+     comparing the trace ID with a random number multiplied by the
+     sampling rate.
+  1. `RateLimitingSampler` can be used to allow only a certain fixed
+     number of traces to be sampled per second.
+
+#### Delayed sampling
+
+Version 2.20 introduced the ability to delay sampling decisions in the life cycle
+of the root span. It involves several features and architectural changes:
+  * **Shared sampling state**: the sampling state is shared across all local
+    (i.e. in-process) spans for a given trace.
+  * **New `SamplerV2` API** allows the sampler to be called at multiple points 
+    in the life cycle of a span:
+    * on span creation
+    * on overwriting span operation name
+    * on setting span tags
+    * on finishing the span
+  * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
+    to indicate if the negative sampling decision is final or not (positive sampling
+    decisions are always final). If the decision is not final, the sampler will be
+    called again on further span life cycle events, like setting tags.
+
+These new features are used in the experimental `x.TagMatchingSampler`, which
+can sample a trace based on a certain tag added to the root
+span or one of its local (in-process) children. The sampler can be used with
+another experimental `x.PrioritySampler` that allows multiple samplers to try
+to make a sampling decision, in a certain priority order.
+
+### Baggage Injection
+
+The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
+to the span context and propagated throughout the trace. An external process can inject baggage
+by setting the special HTTP Header `jaeger-baggage` on a request:
+
+```sh
+curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
+```
+
+Baggage can also be programatically set inside your service:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+    span.SetBaggageItem("key", "value")
+}
+```
+
+Another service downstream of that can retrieve the baggage in a similar way:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+    val := span.BaggageItem("key")
+    println(val)
+}
+```
+
+### Debug Traces (Forced Sampling)
+
+#### Programmatically
+
+The OpenTracing API defines a `sampling.priority` standard tag that
+can be used to affect the sampling of a span and its children:
+
+```go
+import (
+    "github.com/opentracing/opentracing-go"
+    "github.com/opentracing/opentracing-go/ext"
+)
+
+span := opentracing.SpanFromContext(ctx)
+ext.SamplingPriority.Set(span, 1)
+```
+
+#### Via HTTP Headers
+
+Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
+which can be set in the incoming request, e.g.
+
+```sh
+curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
+```
+
+When Jaeger sees this header in the request that otherwise has no
+tracing context, it ensures that the new trace started for this
+request will be sampled in the "debug" mode (meaning it should survive
+all downsampling that might happen in the collection pipeline), and the
+root span will have a tag as if this statement was executed:
+
+```go
+span.SetTag("jaeger-debug-id", "some-correlation-id")
+```
+
+This allows using Jaeger UI to find the trace by this tag.
+
+### Zipkin HTTP B3 compatible header propagation
+
+Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
+by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
+
+However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
+
+## SelfRef
+
+Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
+to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
+but not yet accepted. This allows the caller to provide an already created `SpanContext`
+when starting a new span. The `Self` reference bypasses trace and span id generation,
+as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
+set appropriately by the caller).
+
+The `Self` reference supports the following use cases:
+  * the ability to provide externally generated trace and span IDs
+  * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
+
+Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
+```
+span := tracer.StartSpan(
+    "continued_span",
+    jaeger.SelfRef(yourSpanContext),
+)
+...
+defer span.Finish()
+```
+
+## License
+
+[Apache 2.0 License](LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
+[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
+[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
+[ot-url]: http://opentracing.io
+[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
+[timeunits]: https://golang.org/pkg/time/#ParseDuration
+[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
new file mode 100644
index 0000000000000000000000000000000000000000..115e49ab8ad3657f184f8a4a60eb220c1c6fb5ea
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -0,0 +1,11 @@
+# Release Process
+
+1. Create a PR "Preparing for release X.Y.Z" against master branch
+    * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)`
+    * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
+2. Create a release "Release X.Y.Z" on Github
+    * Create Tag `vX.Y.Z`
+    * Copy CHANGELOG.md into the release notes
+3. Create a PR "Back to development" against master branch
+    * Add ` (unreleased)` to CHANGELOG.md
+    * Update `JaegerClientVersion` in constants.go to `Go-dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
new file mode 100644
index 0000000000000000000000000000000000000000..1037ca0e861db7dc8fb6ab2bea0dda795caf5620
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go/log"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+)
+
+// baggageSetter is an actor that can set a baggage value on a Span given certain
+// restrictions (eg. maxValueLength).
+type baggageSetter struct {
+	restrictionManager baggage.RestrictionManager
+	metrics            *Metrics
+}
+
+func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
+	return &baggageSetter{
+		restrictionManager: restrictionManager,
+		metrics:            metrics,
+	}
+}
+
+// (NB) span should hold the lock before making this call
+func (s *baggageSetter) setBaggage(span *Span, key, value string) {
+	var truncated bool
+	var prevItem string
+	restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
+	if !restriction.KeyAllowed() {
+		s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+		s.metrics.BaggageUpdateFailure.Inc(1)
+		return
+	}
+	if len(value) > restriction.MaxValueLength() {
+		truncated = true
+		value = value[:restriction.MaxValueLength()]
+		s.metrics.BaggageTruncate.Inc(1)
+	}
+	prevItem = span.context.baggage[key]
+	s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+	span.context = span.context.WithBaggageItem(key, value)
+	s.metrics.BaggageUpdateSuccess.Inc(1)
+}
+
+func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
+	if !span.context.IsSampled() {
+		return
+	}
+	fields := []log.Field{
+		log.String("event", "baggage"),
+		log.String("key", key),
+		log.String("value", value),
+	}
+	if prevItem != "" {
+		fields = append(fields, log.String("override", "true"))
+	}
+	if truncated {
+		fields = append(fields, log.String("truncated", "true"))
+	}
+	if !valid {
+		fields = append(fields, log.String("invalid", "true"))
+	}
+	span.logFieldsNoLocking(fields...)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..01a505f5e986094fa017b5c230c057e31f8c0b17
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+
+	"github.com/opentracing/opentracing-go"
+)
+
+const (
+	// JaegerClientVersion is the version of the client library reported as Span tag.
+	JaegerClientVersion = "Go-2.23.0"
+
+	// JaegerClientVersionTagKey is the name of the tag used to report client version.
+	JaegerClientVersionTagKey = "jaeger.version"
+
+	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+	// if found in the carrier, forces the trace to be sampled as "debug" trace.
+	// The value of the header is recorded as the tag on the root span, so that the
+	// trace can be found in the UI using this value as a correlation ID.
+	JaegerDebugHeader = "jaeger-debug-id"
+
+	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+	// a root span does not exist.
+	JaegerBaggageHeader = "jaeger-baggage"
+
+	// TracerHostnameTagKey used to report host name of the process.
+	TracerHostnameTagKey = "hostname"
+
+	// TracerIPTagKey used to report ip of the process.
+	TracerIPTagKey = "ip"
+
+	// TracerUUIDTagKey used to report UUID of the client process.
+	TracerUUIDTagKey = "client-uuid"
+
+	// SamplerTypeTagKey reports which sampler was used on the root span.
+	SamplerTypeTagKey = "sampler.type"
+
+	// SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
+	SamplerParamTagKey = "sampler.param"
+
+	// TraceContextHeaderName is the http header name used to propagate tracing context.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceContextHeaderName = "uber-trace-id"
+
+	// TracerStateHeaderName is deprecated.
+	// Deprecated: use TraceContextHeaderName
+	TracerStateHeaderName = TraceContextHeaderName
+
+	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceBaggageHeaderPrefix = "uberctx-"
+
+	// SamplerTypeConst is the type of sampler that always makes the same decision.
+	SamplerTypeConst = "const"
+
+	// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
+	SamplerTypeRemote = "remote"
+
+	// SamplerTypeProbabilistic is the type of sampler that samples traces
+	// with a certain fixed probability.
+	SamplerTypeProbabilistic = "probabilistic"
+
+	// SamplerTypeRateLimiting is the type of sampler that samples
+	// only up to a fixed number of traces per second.
+	SamplerTypeRateLimiting = "ratelimiting"
+
+	// SamplerTypeLowerBound is the type of sampler that samples
+	// at least a fixed number of traces per second.
+	SamplerTypeLowerBound = "lowerbound"
+
+	// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
+	DefaultUDPSpanServerHost = "localhost"
+
+	// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
+	DefaultUDPSpanServerPort = 6831
+
+	// DefaultSamplingServerPort is the default port to fetch sampling config from, via http
+	DefaultSamplingServerPort = 5778
+
+	// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
+	DefaultMaxTagValueLength = 256
+
+	// SelfRefType is a jaeger specific reference type that supports creating a span
+	// with an already defined context.
+	selfRefType opentracing.SpanReferenceType = 99
+)
+
+var (
+	// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
+	DefaultSamplingServerURL = fmt.Sprintf("http://localhost:%d/sampling", DefaultSamplingServerPort)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ce1881f3b834a3edb19aa6941ed5dec1d5afe32
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
+type ContribObserver interface {
+	// Create and return a span observer. Called when a span starts.
+	// If the Observer is not interested in the given span, it must return (nil, false).
+	// E.g :
+	//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+	//         var sp opentracing.Span
+	//         sso := opentracing.StartSpanOptions{}
+	//         if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
+	//             // we have a valid SpanObserver
+	//         }
+	//         ...
+	//     }
+	OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
+}
+
+// ContribSpanObserver is created by the Observer and receives notifications
+// about other Span events. This interface is meant to match
+// github.com/opentracing-contrib/go-observer, via duck typing, without
+// directly importing the go-observer package.
+type ContribSpanObserver interface {
+	OnSetOperationName(operationName string)
+	OnSetTag(key string, value interface{})
+	OnFinish(options opentracing.FinishOptions)
+}
+
+// wrapper observer for the old observers (see observer.go)
+type oldObserver struct {
+	obs Observer
+}
+
+func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
+	spanObserver := o.obs.OnStartSpan(operationName, options)
+	return spanObserver, spanObserver != nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f5549033d5dd3ab27c2ab31eac1e09b73f24fb5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
+It is currently using Zipkin-compatible data model and can be directly
+itegrated with Zipkin backend (http://zipkin.io).
+
+For integration instructions please refer to the README:
+
+https://github.com/uber/jaeger-client-go/blob/master/README.md
+*/
+package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
new file mode 100644
index 0000000000000000000000000000000000000000..f4c05b2dbda5a1c4f5bbc033f64148bb4c36639b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.lock
@@ -0,0 +1,98 @@
+hash: a4a449cfc060c2d7be850a69b171e4382a3bd00d1a0a72cfc944facc3fe263bf
+updated: 2019-09-23T17:10:15.213856-04:00
+imports:
+- name: github.com/beorn7/perks
+  version: 37c8de3658fcb183f997c4e13e8337516ab753e6
+  subpackages:
+  - quantile
+- name: github.com/codahale/hdrhistogram
+  version: 3a0bb77429bd3a61596f5e8a3172445844342120
+- name: github.com/crossdock/crossdock-go
+  version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
+  subpackages:
+  - assert
+  - require
+- name: github.com/davecgh/go-spew
+  version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d
+  subpackages:
+  - spew
+- name: github.com/golang/protobuf
+  version: 1680a479a2cfb3fa22b972af7e36d0a0fde47bf8
+  subpackages:
+  - proto
+- name: github.com/matttproud/golang_protobuf_extensions
+  version: c182affec369e30f25d3eb8cd8a478dee585ae7d
+  subpackages:
+  - pbutil
+- name: github.com/opentracing/opentracing-go
+  version: 659c90643e714681897ec2521c60567dd21da733
+  subpackages:
+  - ext
+  - harness
+  - log
+- name: github.com/pkg/errors
+  version: ba968bfe8b2f7e042a574c888954fccecfa385b4
+- name: github.com/pmezard/go-difflib
+  version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
+  subpackages:
+  - difflib
+- name: github.com/prometheus/client_golang
+  version: 170205fb58decfd011f1550d4cfb737230d7ae4f
+  subpackages:
+  - prometheus
+  - prometheus/internal
+- name: github.com/prometheus/client_model
+  version: 14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016
+  subpackages:
+  - go
+- name: github.com/prometheus/common
+  version: 287d3e634a1e550c9e463dd7e5a75a422c614505
+  subpackages:
+  - expfmt
+  - internal/bitbucket.org/ww/goautoneg
+  - model
+- name: github.com/prometheus/procfs
+  version: de25ac347ef9305868b04dc42425c973b863b18c
+  subpackages:
+  - internal/fs
+  - internal/util
+- name: github.com/stretchr/testify
+  version: 85f2b59c4459e5bf57488796be8c3667cb8246d6
+  subpackages:
+  - assert
+  - require
+  - suite
+- name: github.com/uber-go/atomic
+  version: df976f2515e274675050de7b3f42545de80594fd
+- name: github.com/uber/jaeger-lib
+  version: a87ae9d84fb038a8d79266298970720be7c80fcd
+  subpackages:
+  - metrics
+  - metrics/metricstest
+  - metrics/prometheus
+- name: go.uber.org/atomic
+  version: df976f2515e274675050de7b3f42545de80594fd
+- name: go.uber.org/multierr
+  version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
+- name: go.uber.org/zap
+  version: 27376062155ad36be76b0f12cf1572a221d3a48c
+  subpackages:
+  - buffer
+  - internal/bufferpool
+  - internal/color
+  - internal/exit
+  - zapcore
+- name: golang.org/x/net
+  version: aa69164e4478b84860dc6769c710c699c67058a3
+  subpackages:
+  - context
+  - context/ctxhttp
+- name: golang.org/x/sys
+  version: 0a153f010e6963173baba2306531d173aa843137
+  subpackages:
+  - windows
+- name: gopkg.in/yaml.v2
+  version: 51d6538a90f86fe93ac480b35f37b2be17fef232
+- name: github.com/golang/mock 
+  version: 3a35fb6e3e18b9dbfee291262260dee7372d2a92
+testImports: []
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eb58c67ffc4ebbebabca7688977dfb9e1aaa92c8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml
@@ -0,0 +1,28 @@
+package: github.com/uber/jaeger-client-go
+import:
+- package: github.com/opentracing/opentracing-go
+  version: ^1.1
+  subpackages:
+  - ext
+  - log
+- package: github.com/crossdock/crossdock-go
+- package: github.com/uber/jaeger-lib
+  version: ^2.0.0
+  subpackages:
+  - metrics
+- package: github.com/pkg/errors
+  version: ~0.8.0
+- package: go.uber.org/zap
+  source: https://github.com/uber-go/zap.git
+  version: ^1
+- package: github.com/uber-go/atomic
+  version: ^1
+- package: github.com/prometheus/client_golang
+  version: ^1
+testImport:
+- package: github.com/stretchr/testify
+  subpackages:
+  - assert
+  - require
+  - suite
+- package: github.com/golang/mock
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
new file mode 100644
index 0000000000000000000000000000000000000000..5da70351d91d043794a3f75383f0339564c1ebe1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// HeadersConfig contains the values for the header keys that Jaeger will use.
+// These values may be either custom or default depending on whether custom
+// values were provided via a configuration.
+type HeadersConfig struct {
+	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+	// if found in the carrier, forces the trace to be sampled as "debug" trace.
+	// The value of the header is recorded as the tag on the root span, so that the
+	// trace can be found in the UI using this value as a correlation ID.
+	JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
+
+	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+	// a root span does not exist.
+	JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
+
+	// TraceContextHeaderName is the http header name used to propagate tracing context.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
+
+	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
+}
+
+// ApplyDefaults sets missing configuration keys to default values
+func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
+	if c.JaegerBaggageHeader == "" {
+		c.JaegerBaggageHeader = JaegerBaggageHeader
+	}
+	if c.JaegerDebugHeader == "" {
+		c.JaegerDebugHeader = JaegerDebugHeader
+	}
+	if c.TraceBaggageHeaderPrefix == "" {
+		c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
+	}
+	if c.TraceContextHeaderName == "" {
+		c.TraceContextHeaderName = TraceContextHeaderName
+	}
+	return c
+}
+
+func getDefaultHeadersConfig() *HeadersConfig {
+	return &HeadersConfig{
+		JaegerDebugHeader:        JaegerDebugHeader,
+		JaegerBaggageHeader:      JaegerBaggageHeader,
+		TraceContextHeaderName:   TraceContextHeaderName,
+		TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
new file mode 100644
index 0000000000000000000000000000000000000000..c16a5c566291d45d59d72ff9139388b97e5c9838
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage
+
+const (
+	defaultMaxValueLength = 2048
+)
+
+// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
+type Restriction struct {
+	keyAllowed     bool
+	maxValueLength int
+}
+
+// NewRestriction returns a new Restriction.
+func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
+	return &Restriction{
+		keyAllowed:     keyAllowed,
+		maxValueLength: maxValueLength,
+	}
+}
+
+// KeyAllowed returns whether the baggage key for this restriction is allowed.
+func (r *Restriction) KeyAllowed() bool {
+	return r.keyAllowed
+}
+
+// MaxValueLength returns the max length for the baggage value.
+func (r *Restriction) MaxValueLength() int {
+	return r.maxValueLength
+}
+
+// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
+// will return a Restriction for a specific baggage key which will determine whether the baggage
+// key is allowed for the current service and any other applicable restrictions on the baggage
+// value.
+type RestrictionManager interface {
+	GetRestriction(service, key string) *Restriction
+}
+
+// DefaultRestrictionManager allows any baggage key.
+type DefaultRestrictionManager struct {
+	defaultRestriction *Restriction
+}
+
+// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
+func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
+	if maxValueLength == 0 {
+		maxValueLength = defaultMaxValueLength
+	}
+	return &DefaultRestrictionManager{
+		defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
+	}
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
+	return m.defaultRestriction
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe0bef268a2dab8d94b7c9bc2494c442e2b1d9e4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reporterstats
+
+// ReporterStats exposes some metrics from the RemoteReporter.
+type ReporterStats interface {
+	SpansDroppedFromQueue() int64
+}
+
+// Receiver can be implemented by a Transport to be given ReporterStats.
+type Receiver interface {
+	SetReporterStats(ReporterStats)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
new file mode 100644
index 0000000000000000000000000000000000000000..0e10b8a5aa8e948c9d989238bb3968a609ca97b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spanlog
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opentracing/opentracing-go/log"
+)
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+// TODO refactor into pluggable materializer
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+	fields := fieldsAsMap(make(map[string]string, len(logFields)))
+	for _, field := range logFields {
+		field.Marshal(fields)
+	}
+	if event, ok := fields["event"]; ok && len(fields) == 1 {
+		return []byte(event), nil
+	}
+	return json.Marshal(fields)
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+	ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+	ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+	ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+	value(ml)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
new file mode 100644
index 0000000000000000000000000000000000000000..196ed69cacae0f7dbd625d805e8207409650f8b3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package throttler
+
+// Throttler is used to rate limits operations. For example, given how debug spans
+// are always sampled, a throttler can be enabled per client to rate limit the amount
+// of debug spans a client can start.
+type Throttler interface {
+	// IsAllowed determines whether the operation should be allowed and not be
+	// throttled.
+	IsAllowed(operation string) bool
+}
+
+// DefaultThrottler doesn't throttle at all.
+type DefaultThrottler struct{}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t DefaultThrottler) IsAllowed(operation string) bool {
+	return true
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
new file mode 100644
index 0000000000000000000000000000000000000000..8402d087c29fd22ab88a6f17f28eec8f06b79869
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/interop.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go"
+)
+
+// TODO this file should not be needed after TChannel PR.
+
+type formatKey int
+
+// SpanContextFormat is a constant used as OpenTracing Format.
+// Requires *SpanContext as carrier.
+// This format is intended for interop with TChannel or other Zipkin-like tracers.
+const SpanContextFormat formatKey = iota
+
+type jaegerTraceContextPropagator struct {
+	tracer *Tracer
+}
+
+func (p *jaegerTraceContextPropagator) Inject(
+	ctx SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(*SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	carrier.CopyFrom(&ctx)
+	return nil
+}
+
+func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(*SpanContext)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	ctx := new(SpanContext)
+	ctx.CopyFrom(carrier)
+	return *ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
new file mode 100644
index 0000000000000000000000000000000000000000..868b2a5b5465a2a634379096f6b5c852f5d6a0ab
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+
+	"github.com/opentracing/opentracing-go/log"
+
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+type tags []*j.Tag
+
+// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
+func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
+	fields := tags(make([]*j.Tag, 0, len(logFields)))
+	for _, field := range logFields {
+		field.Marshal(&fields)
+	}
+	return fields
+}
+
+func (t *tags) EmitString(key, value string) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
+}
+
+func (t *tags) EmitBool(key string, value bool) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
+}
+
+func (t *tags) EmitInt(key string, value int) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt32(key string, value int32) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt64(key string, value int64) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
+}
+
+func (t *tags) EmitUint32(key string, value uint32) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitUint64(key string, value uint64) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitFloat32(key string, value float32) {
+	vDouble := float64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
+}
+
+func (t *tags) EmitFloat64(key string, value float64) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
+}
+
+func (t *tags) EmitObject(key string, value interface{}) {
+	vStr := fmt.Sprintf("%+v", value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
+}
+
+func (t *tags) EmitLazyLogger(value log.LazyLogger) {
+	value(t)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ac2f8f949c86a1cb7b8fb214e62cf9045d07079
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// BuildJaegerThrift builds jaeger span based on internal span.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerThrift(span *Span) *j.Span {
+	span.Lock()
+	defer span.Unlock()
+	startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+	duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+	jaegerSpan := &j.Span{
+		TraceIdLow:    int64(span.context.traceID.Low),
+		TraceIdHigh:   int64(span.context.traceID.High),
+		SpanId:        int64(span.context.spanID),
+		ParentSpanId:  int64(span.context.parentID),
+		OperationName: span.operationName,
+		Flags:         int32(span.context.samplingState.flags()),
+		StartTime:     startTime,
+		Duration:      duration,
+		Tags:          buildTags(span.tags, span.tracer.options.maxTagValueLength),
+		Logs:          buildLogs(span.logs),
+		References:    buildReferences(span.references),
+	}
+	return jaegerSpan
+}
+
+// BuildJaegerProcessThrift creates a thrift Process type.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerProcessThrift(span *Span) *j.Process {
+	span.Lock()
+	defer span.Unlock()
+	return buildJaegerProcessThrift(span.tracer)
+}
+
+func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
+	process := &j.Process{
+		ServiceName: tracer.serviceName,
+		Tags:        buildTags(tracer.tags, tracer.options.maxTagValueLength),
+	}
+	if tracer.process.UUID != "" {
+		process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
+	}
+	return process
+}
+
+func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
+	jTags := make([]*j.Tag, 0, len(tags))
+	for _, tag := range tags {
+		jTag := buildTag(&tag, maxTagValueLength)
+		jTags = append(jTags, jTag)
+	}
+	return jTags
+}
+
+func buildLogs(logs []opentracing.LogRecord) []*j.Log {
+	jLogs := make([]*j.Log, 0, len(logs))
+	for _, log := range logs {
+		jLog := &j.Log{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+			Fields:    ConvertLogsToJaegerTags(log.Fields),
+		}
+		jLogs = append(jLogs, jLog)
+	}
+	return jLogs
+}
+
+func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
+	jTag := &j.Tag{Key: tag.key}
+	switch value := tag.value.(type) {
+	case string:
+		vStr := truncateString(value, maxTagValueLength)
+		jTag.VStr = &vStr
+		jTag.VType = j.TagType_STRING
+	case []byte:
+		if len(value) > maxTagValueLength {
+			value = value[:maxTagValueLength]
+		}
+		jTag.VBinary = value
+		jTag.VType = j.TagType_BINARY
+	case int:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int8:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint8:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int16:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint16:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int32:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint32:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int64:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint64:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case float32:
+		vDouble := float64(value)
+		jTag.VDouble = &vDouble
+		jTag.VType = j.TagType_DOUBLE
+	case float64:
+		vDouble := float64(value)
+		jTag.VDouble = &vDouble
+		jTag.VType = j.TagType_DOUBLE
+	case bool:
+		vBool := value
+		jTag.VBool = &vBool
+		jTag.VType = j.TagType_BOOL
+	default:
+		vStr := truncateString(stringify(value), maxTagValueLength)
+		jTag.VStr = &vStr
+		jTag.VType = j.TagType_STRING
+	}
+	return jTag
+}
+
+func buildReferences(references []Reference) []*j.SpanRef {
+	retMe := make([]*j.SpanRef, 0, len(references))
+	for _, ref := range references {
+		if ref.Type == opentracing.ChildOfRef {
+			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
+		} else if ref.Type == opentracing.FollowsFromRef {
+			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
+		}
+	}
+	return retMe
+}
+
+func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
+	return &j.SpanRef{
+		RefType:     refType,
+		TraceIdLow:  int64(ctx.traceID.Low),
+		TraceIdHigh: int64(ctx.traceID.High),
+		SpanId:      int64(ctx.spanID),
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..692355aed0785dace748545bf063a803de615fa8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go
@@ -0,0 +1,138 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"sync"
+)
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+	// Error logs a message at error priority
+	Error(msg string)
+
+	// Infof logs a message at info priority
+	Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+	log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+	log.Printf(msg, args...)
+}
+
+// Debugf logs a message at debug priority
+func (l *stdLogger) Debugf(msg string, args ...interface{}) {
+	log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...)
+}
+
+// NullLogger is implementation of the Logger interface that is no-op
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string)                       {}
+func (l *nullLogger) Infof(msg string, args ...interface{})  {}
+func (l *nullLogger) Debugf(msg string, args ...interface{}) {}
+
+// BytesBufferLogger implements Logger backed by a bytes.Buffer.
+type BytesBufferLogger struct {
+	mux sync.Mutex
+	buf bytes.Buffer
+}
+
+// Error implements Logger.
+func (l *BytesBufferLogger) Error(msg string) {
+	l.mux.Lock()
+	l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
+	l.mux.Unlock()
+}
+
+// Infof implements Logger.
+func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
+	l.mux.Lock()
+	l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
+	l.mux.Unlock()
+}
+
+// Debugf implements Logger.
+func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) {
+	l.mux.Lock()
+	l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n")
+	l.mux.Unlock()
+}
+
+// String returns string representation of the underlying buffer.
+func (l *BytesBufferLogger) String() string {
+	l.mux.Lock()
+	defer l.mux.Unlock()
+	return l.buf.String()
+}
+
+// Flush empties the underlying buffer.
+func (l *BytesBufferLogger) Flush() {
+	l.mux.Lock()
+	defer l.mux.Unlock()
+	l.buf.Reset()
+}
+
+// DebugLogger is an interface which adds a debug logging level
+type DebugLogger interface {
+	Logger
+
+	// Debugf logs a message at debug priority
+	Debugf(msg string, args ...interface{})
+}
+
+// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger
+// If the provided Logger doesn't satisfy the interface, a logger with debug
+// disabled is returned
+func DebugLogAdapter(logger Logger) DebugLogger {
+	if debugLogger, ok := logger.(DebugLogger); ok {
+		return debugLogger
+	}
+	logger.Infof("debug logging disabled")
+	return debugDisabledLogAdapter{logger: logger}
+}
+
+type debugDisabledLogAdapter struct {
+	logger Logger
+}
+
+func (d debugDisabledLogAdapter) Error(msg string) {
+	d.logger.Error(msg)
+}
+
+func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) {
+	d.logger.Infof(msg, args...)
+}
+
+// Debugf is a nop
+func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4f0b501923d51b42d27f55b6ec84e40c7185a65
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/logger.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "log"
+
+// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+	// Error logs a message at error priority
+	Error(msg string)
+
+	// Infof logs a message at info priority
+	Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+	log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+	log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that delegates to default `log` package
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string)                      {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..50e4e22d6c1d5b44e069d54096bb4ee9a30e8b04
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/uber/jaeger-lib/metrics"
+)
+
+// Metrics is a container of all stats emitted by Jaeger tracer.
+type Metrics struct {
+	// Number of traces started by this tracer as sampled
+	TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
+
+	// Number of traces started by this tracer as not sampled
+	TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+
+	// Number of traces started by this tracer with delayed sampling
+	TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
+
+	// Number of externally started sampled traces this tracer joined
+	TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
+
+	// Number of externally started not-sampled traces this tracer joined
+	TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
+
+	// Number of sampled spans started by this tracer
+	SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
+
+	// Number of not sampled spans started by this tracer
+	SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
+
+	// Number of spans with delayed sampling started by this tracer
+	SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
+
+	// Number of errors decoding tracing context
+	DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
+
+	// Number of spans successfully reported
+	ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
+
+	// Number of spans not reported due to a Sender failure
+	ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
+
+	// Number of spans dropped due to internal queue overflow
+	ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
+
+	// Current number of spans in the reporter queue
+	ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
+
+	// Number of times the Sampler succeeded to retrieve sampling strategy
+	SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
+
+	// Number of times the Sampler failed to retrieve sampling strategy
+	SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
+
+	// Number of times the Sampler succeeded to retrieve and update sampling strategy
+	SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
+
+	// Number of times the Sampler failed to update sampling strategy
+	SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
+
+	// Number of times baggage was successfully written or updated on spans.
+	BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
+
+	// Number of times baggage failed to write or update on spans.
+	BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
+
+	// Number of times baggage was truncated as per baggage restrictions.
+	BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
+
+	// Number of times baggage restrictions were successfully updated.
+	BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
+
+	// Number of times baggage restrictions failed to update.
+	BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
+
+	// Number of times debug spans were throttled.
+	ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
+
+	// Number of times throttler successfully updated.
+	ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
+
+	// Number of times throttler failed to update.
+	ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
+}
+
+// NewMetrics creates a new Metrics struct and initializes it.
+func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
+	m := &Metrics{}
+	// TODO the namespace "jaeger" should be configurable
+	metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
+	return m
+}
+
+// NewNullMetrics creates a new Metrics struct that won't report any metrics.
+func NewNullMetrics() *Metrics {
+	return NewMetrics(metrics.NullFactory, nil)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bbd028897a9d5fe3d248b6a1143d2e6301da109
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/observer.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with the Tracer to receive notifications about
+// new Spans.
+//
+// Deprecated: use jaeger.ContribObserver instead.
+type Observer interface {
+	OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+//
+// Deprecated: use jaeger.ContribSpanObserver instead.
+type SpanObserver interface {
+	OnSetOperationName(operationName string)
+	OnSetTag(key string, value interface{})
+	OnFinish(options opentracing.FinishOptions)
+}
+
+// compositeObserver is a dispatcher to other observers
+type compositeObserver struct {
+	observers []ContribObserver
+}
+
+// compositeSpanObserver is a dispatcher to other span observers
+type compositeSpanObserver struct {
+	observers []ContribSpanObserver
+}
+
+// noopSpanObserver is used when there are no observers registered
+// on the Tracer or none of them returns span observers from OnStartSpan.
+var noopSpanObserver = &compositeSpanObserver{}
+
+func (o *compositeObserver) append(contribObserver ContribObserver) {
+	o.observers = append(o.observers, contribObserver)
+}
+
+func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
+	var spanObservers []ContribSpanObserver
+	for _, obs := range o.observers {
+		spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+		if ok {
+			if spanObservers == nil {
+				spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
+			}
+			spanObservers = append(spanObservers, spanObs)
+		}
+	}
+	if len(spanObservers) == 0 {
+		return noopSpanObserver
+	}
+	return &compositeSpanObserver{observers: spanObservers}
+}
+
+func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
+	for _, obs := range o.observers {
+		obs.OnSetOperationName(operationName)
+	}
+}
+
+func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
+	for _, obs := range o.observers {
+		obs.OnSetTag(key, value)
+	}
+}
+
+func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
+	for _, obs := range o.observers {
+		obs.OnFinish(options)
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
new file mode 100644
index 0000000000000000000000000000000000000000..30cbf99624c94bca52fe3641a597cd70a99d7cec
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/process.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// Process holds process specific metadata that's relevant to this client.
+type Process struct {
+	Service string
+	UUID    string
+	Tags    []Tag
+}
+
+// ProcessSetter sets a process. This can be used by any class that requires
+// the process to be set as part of initialization.
+// See internal/throttler/remote/throttler.go for an example.
+type ProcessSetter interface {
+	SetProcess(process Process)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
new file mode 100644
index 0000000000000000000000000000000000000000..42fd64b5882a2508d199695215650e154a88f0fb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -0,0 +1,313 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"log"
+	"net/url"
+	"strings"
+	"sync"
+
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Injector is responsible for injecting SpanContext instances in a manner suitable
+// for propagation via a format-specific "carrier" object. Typically the
+// injection will take place across an RPC boundary, but message queues and
+// other IPC mechanisms are also reasonable places to use an Injector.
+type Injector interface {
+	// Inject takes `SpanContext` and injects it into `carrier`. The actual type
+	// of `carrier` depends on the `format` passed to `Tracer.Inject()`.
+	//
+	// Implementations may return opentracing.ErrInvalidCarrier or any other
+	// implementation-specific error if injection fails.
+	Inject(ctx SpanContext, carrier interface{}) error
+}
+
+// Extractor is responsible for extracting SpanContext instances from a
+// format-specific "carrier" object. Typically the extraction will take place
+// on the server side of an RPC boundary, but message queues and other IPC
+// mechanisms are also reasonable places to use an Extractor.
+type Extractor interface {
+	// Extract decodes a SpanContext instance from the given `carrier`,
+	// or (nil, opentracing.ErrSpanContextNotFound) if no context could
+	// be found in the `carrier`.
+	Extract(carrier interface{}) (SpanContext, error)
+}
+
+// TextMapPropagator is a combined Injector and Extractor for TextMap format
+type TextMapPropagator struct {
+	headerKeys  *HeadersConfig
+	metrics     Metrics
+	encodeValue func(string) string
+	decodeValue func(string) string
+}
+
+// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
+func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+	return &TextMapPropagator{
+		headerKeys: headerKeys,
+		metrics:    metrics,
+		encodeValue: func(val string) string {
+			return val
+		},
+		decodeValue: func(val string) string {
+			return val
+		},
+	}
+}
+
+// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
+func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+	return &TextMapPropagator{
+		headerKeys: headerKeys,
+		metrics:    metrics,
+		encodeValue: func(val string) string {
+			return url.QueryEscape(val)
+		},
+		decodeValue: func(val string) string {
+			// ignore decoding errors, cannot do anything about them
+			if v, err := url.QueryUnescape(val); err == nil {
+				return v
+			}
+			return val
+		},
+	}
+}
+
+// BinaryPropagator is a combined Injector and Extractor for Binary format
+type BinaryPropagator struct {
+	tracer  *Tracer
+	buffers sync.Pool
+}
+
+// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
+func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
+	return &BinaryPropagator{
+		tracer:  tracer,
+		buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
+	}
+}
+
+// Inject implements Injector of TextMapPropagator
+func (p *TextMapPropagator) Inject(
+	sc SpanContext,
+	abstractCarrier interface{},
+) error {
+	textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	// Do not encode the string with trace context to avoid accidental double-encoding
+	// if people are using opentracing < 0.10.0. Our colon-separated representation
+	// of the trace context is already safe for HTTP headers.
+	textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
+	for k, v := range sc.baggage {
+		safeKey := p.addBaggageKeyPrefix(k)
+		safeVal := p.encodeValue(v)
+		textMapWriter.Set(safeKey, safeVal)
+	}
+	return nil
+}
+
+// Extract implements Extractor of TextMapPropagator
+func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	var ctx SpanContext
+	var baggage map[string]string
+	err := textMapReader.ForeachKey(func(rawKey, value string) error {
+		key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
+		if key == p.headerKeys.TraceContextHeaderName {
+			var err error
+			safeVal := p.decodeValue(value)
+			if ctx, err = ContextFromString(safeVal); err != nil {
+				return err
+			}
+		} else if key == p.headerKeys.JaegerDebugHeader {
+			ctx.debugID = p.decodeValue(value)
+		} else if key == p.headerKeys.JaegerBaggageHeader {
+			if baggage == nil {
+				baggage = make(map[string]string)
+			}
+			for k, v := range p.parseCommaSeparatedMap(value) {
+				baggage[k] = v
+			}
+		} else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
+			if baggage == nil {
+				baggage = make(map[string]string)
+			}
+			safeKey := p.removeBaggageKeyPrefix(key)
+			safeVal := p.decodeValue(value)
+			baggage[safeKey] = safeVal
+		}
+		return nil
+	})
+	if err != nil {
+		p.metrics.DecodingErrors.Inc(1)
+		return emptyContext, err
+	}
+	if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
+		return emptyContext, opentracing.ErrSpanContextNotFound
+	}
+	ctx.baggage = baggage
+	return ctx, nil
+}
+
+// Inject implements Injector of BinaryPropagator
+func (p *BinaryPropagator) Inject(
+	sc SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(io.Writer)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	// Handle the tracer context
+	if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
+		return err
+	}
+
+	// Handle the baggage items
+	if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
+		return err
+	}
+	for k, v := range sc.baggage {
+		if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
+			return err
+		}
+		io.WriteString(carrier, k)
+		if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
+			return err
+		}
+		io.WriteString(carrier, v)
+	}
+
+	return nil
+}
+
+// Extract implements Extractor of BinaryPropagator
+func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(io.Reader)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	var ctx SpanContext
+	ctx.samplingState = &samplingState{}
+
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+
+	var flags byte
+	if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	ctx.samplingState.setFlags(flags)
+
+	// Handle the baggage items
+	var numBaggage int32
+	if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
+		ctx.baggage = make(map[string]string, iNumBaggage)
+		buf := p.buffers.Get().(*bytes.Buffer)
+		defer p.buffers.Put(buf)
+
+		var keyLen, valLen int32
+		for i := 0; i < iNumBaggage; i++ {
+			if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			buf.Reset()
+			buf.Grow(int(keyLen))
+			if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			key := buf.String()
+
+			if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			buf.Reset()
+			buf.Grow(int(valLen))
+			if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			ctx.baggage[key] = buf.String()
+		}
+	}
+
+	return ctx, nil
+}
+
+// Converts a comma separated key value pair list into a map
+// e.g. key1=value1, key2=value2, key3 = value3
+// is converted to map[string]string { "key1" : "value1",
+//                                     "key2" : "value2",
+//                                     "key3" : "value3" }
+func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+	baggage := make(map[string]string)
+	value, err := url.QueryUnescape(value)
+	if err != nil {
+		log.Printf("Unable to unescape %s, %v", value, err)
+		return baggage
+	}
+	for _, kvpair := range strings.Split(value, ",") {
+		kv := strings.Split(strings.TrimSpace(kvpair), "=")
+		if len(kv) == 2 {
+			baggage[kv[0]] = kv[1]
+		} else {
+			log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
+		}
+	}
+	return baggage
+}
+
+// Converts a baggage item key into an http header format,
+// by prepending TraceBaggageHeaderPrefix and encoding the key string
+func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
+	// TODO encodeBaggageKeyAsHeader add caching and escaping
+	return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
+}
+
+func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
+	// TODO decodeBaggageHeaderKey add caching and escaping
+	return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
new file mode 100644
index 0000000000000000000000000000000000000000..5646e78bb2a8f0e7d3a9900e35cf4aa2da1897ef
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reference.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "github.com/opentracing/opentracing-go"
+
+// Reference represents a causal reference to other Spans (via their SpanContext).
+type Reference struct {
+	Type    opentracing.SpanReferenceType
+	Context SpanContext
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..a71a92c3e84ec3e4d077c87e67eed2842bdb30bd
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -0,0 +1,322 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	"github.com/uber/jaeger-client-go/internal/reporterstats"
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
+type Reporter interface {
+	// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+	// If the reporter is processing Span asynchronously then it needs to Retain() the span,
+	// and then Release() it when no longer needed, to avoid span data corruption.
+	Report(span *Span)
+
+	// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
+	Close()
+}
+
+// ------------------------------
+
+type nullReporter struct{}
+
+// NewNullReporter creates a no-op reporter that ignores all reported spans.
+func NewNullReporter() Reporter {
+	return &nullReporter{}
+}
+
+// Report implements Report() method of Reporter by doing nothing.
+func (r *nullReporter) Report(span *Span) {
+	// no-op
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *nullReporter) Close() {
+	// no-op
+}
+
+// ------------------------------
+
+type loggingReporter struct {
+	logger Logger
+}
+
+// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
+func NewLoggingReporter(logger Logger) Reporter {
+	return &loggingReporter{logger}
+}
+
+// Report implements Report() method of Reporter by logging the span to the logger.
+func (r *loggingReporter) Report(span *Span) {
+	r.logger.Infof("Reporting span %+v", span)
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *loggingReporter) Close() {
+	// no-op
+}
+
+// ------------------------------
+
+// InMemoryReporter is used for testing, and simply collects spans in memory.
+type InMemoryReporter struct {
+	spans []opentracing.Span
+	lock  sync.Mutex
+}
+
+// NewInMemoryReporter creates a reporter that stores spans in memory.
+// NOTE: the Tracer should be created with options.PoolSpans = false.
+func NewInMemoryReporter() *InMemoryReporter {
+	return &InMemoryReporter{
+		spans: make([]opentracing.Span, 0, 10),
+	}
+}
+
+// Report implements Report() method of Reporter by storing the span in the buffer.
+func (r *InMemoryReporter) Report(span *Span) {
+	r.lock.Lock()
+	// Need to retain the span otherwise it will be released
+	r.spans = append(r.spans, span.Retain())
+	r.lock.Unlock()
+}
+
+// Close implements Close() method of Reporter
+func (r *InMemoryReporter) Close() {
+	r.Reset()
+}
+
+// SpansSubmitted returns the number of spans accumulated in the buffer.
+func (r *InMemoryReporter) SpansSubmitted() int {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	return len(r.spans)
+}
+
+// GetSpans returns accumulated spans as a copy of the buffer.
+func (r *InMemoryReporter) GetSpans() []opentracing.Span {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	copied := make([]opentracing.Span, len(r.spans))
+	copy(copied, r.spans)
+	return copied
+}
+
+// Reset clears all accumulated spans.
+func (r *InMemoryReporter) Reset() {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+
+	// Before reset the collection need to release Span memory
+	for _, span := range r.spans {
+		span.(*Span).Release()
+	}
+	r.spans = r.spans[:0]
+}
+
+// ------------------------------
+
+type compositeReporter struct {
+	reporters []Reporter
+}
+
+// NewCompositeReporter creates a reporter that ignores all reported spans.
+func NewCompositeReporter(reporters ...Reporter) Reporter {
+	return &compositeReporter{reporters: reporters}
+}
+
+// Report implements Report() method of Reporter by delegating to each underlying reporter.
+func (r *compositeReporter) Report(span *Span) {
+	for _, reporter := range r.reporters {
+		reporter.Report(span)
+	}
+}
+
+// Close implements Close() method of Reporter by closing each underlying reporter.
+func (r *compositeReporter) Close() {
+	for _, reporter := range r.reporters {
+		reporter.Close()
+	}
+}
+
+// ------------- REMOTE REPORTER -----------------
+
+type reporterQueueItemType int
+
+const (
+	defaultQueueSize           = 100
+	defaultBufferFlushInterval = 1 * time.Second
+
+	reporterQueueItemSpan reporterQueueItemType = iota
+	reporterQueueItemClose
+)
+
+type reporterQueueItem struct {
+	itemType reporterQueueItemType
+	span     *Span
+	close    *sync.WaitGroup
+}
+
+// reporterStats implements reporterstats.ReporterStats.
+type reporterStats struct {
+	droppedCount int64 // provided to Transports to report data loss to the backend
+}
+
+// SpansDroppedFromQueue implements reporterstats.ReporterStats.
+func (r *reporterStats) SpansDroppedFromQueue() int64 {
+	return atomic.LoadInt64(&r.droppedCount)
+}
+
+func (r *reporterStats) incDroppedCount() {
+	atomic.AddInt64(&r.droppedCount, 1)
+}
+
+type remoteReporter struct {
+	// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+	// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+	queueLength int64 // used to update metrics.Gauge
+	closed      int64 // 0 - not closed, 1 - closed
+
+	reporterOptions
+
+	sender        Transport
+	queue         chan reporterQueueItem
+	reporterStats *reporterStats
+}
+
+// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
+// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
+// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
+// Calls to Close() block until all spans reported prior to the call to Close are flushed.
+func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
+	options := reporterOptions{}
+	for _, option := range opts {
+		option(&options)
+	}
+	if options.bufferFlushInterval <= 0 {
+		options.bufferFlushInterval = defaultBufferFlushInterval
+	}
+	if options.logger == nil {
+		options.logger = log.NullLogger
+	}
+	if options.metrics == nil {
+		options.metrics = NewNullMetrics()
+	}
+	if options.queueSize <= 0 {
+		options.queueSize = defaultQueueSize
+	}
+	reporter := &remoteReporter{
+		reporterOptions: options,
+		sender:          sender,
+		queue:           make(chan reporterQueueItem, options.queueSize),
+		reporterStats:   new(reporterStats),
+	}
+	if receiver, ok := sender.(reporterstats.Receiver); ok {
+		receiver.SetReporterStats(reporter.reporterStats)
+	}
+	go reporter.processQueue()
+	return reporter
+}
+
+// Report implements Report() method of Reporter.
+// It passes the span to a background go-routine for submission to Jaeger backend.
+// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
+// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
+// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
+// because some of them may still be successfully added to the queue.
+func (r *remoteReporter) Report(span *Span) {
+	select {
+	// Need to retain the span otherwise it will be released
+	case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
+		atomic.AddInt64(&r.queueLength, 1)
+	default:
+		r.metrics.ReporterDropped.Inc(1)
+		r.reporterStats.incDroppedCount()
+	}
+}
+
+// Close implements Close() method of Reporter by waiting for the queue to be drained.
+func (r *remoteReporter) Close() {
+	r.logger.Debugf("closing reporter")
+	if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
+		r.logger.Error("Repeated attempt to close the reporter is ignored")
+		return
+	}
+	r.sendCloseEvent()
+	_ = r.sender.Close()
+}
+
+func (r *remoteReporter) sendCloseEvent() {
+	wg := &sync.WaitGroup{}
+	wg.Add(1)
+	item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
+
+	r.queue <- item // if the queue is full we will block until there is space
+	atomic.AddInt64(&r.queueLength, 1)
+	wg.Wait()
+}
+
+// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
+// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
+// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
+// reporting new spans.
+func (r *remoteReporter) processQueue() {
+	// flush causes the Sender to flush its accumulated spans and clear the buffer
+	flush := func() {
+		if flushed, err := r.sender.Flush(); err != nil {
+			r.metrics.ReporterFailure.Inc(int64(flushed))
+			r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
+		} else if flushed > 0 {
+			r.metrics.ReporterSuccess.Inc(int64(flushed))
+		}
+	}
+
+	timer := time.NewTicker(r.bufferFlushInterval)
+	for {
+		select {
+		case <-timer.C:
+			flush()
+		case item := <-r.queue:
+			atomic.AddInt64(&r.queueLength, -1)
+			switch item.itemType {
+			case reporterQueueItemSpan:
+				span := item.span
+				if flushed, err := r.sender.Append(span); err != nil {
+					r.metrics.ReporterFailure.Inc(int64(flushed))
+					r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
+				} else if flushed > 0 {
+					r.metrics.ReporterSuccess.Inc(int64(flushed))
+					// to reduce the number of gauge stats, we only emit queue length on flush
+					r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
+					r.logger.Debugf("flushed %d spans", flushed)
+				}
+				span.Release()
+			case reporterQueueItemClose:
+				timer.Stop()
+				flush()
+				item.close.Done()
+				return
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..2fc030547ea15461cd17d531379a1965b58ab72c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// ReporterOption is a function that sets some option on the reporter.
+type ReporterOption func(c *reporterOptions)
+
+// ReporterOptions is a factory for all available ReporterOption's
+var ReporterOptions reporterOptions
+
+// reporterOptions control behavior of the reporter.
+type reporterOptions struct {
+	// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
+	queueSize int
+	// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
+	bufferFlushInterval time.Duration
+	// logger is used to log errors of span submissions
+	logger log.DebugLogger
+	// metrics is used to record runtime stats
+	metrics *Metrics
+}
+
+// QueueSize creates a ReporterOption that sets the size of the internal queue where
+// spans are stored before they are processed.
+func (reporterOptions) QueueSize(queueSize int) ReporterOption {
+	return func(r *reporterOptions) {
+		r.queueSize = queueSize
+	}
+}
+
+// Metrics creates a ReporterOption that initializes Metrics in the reporter,
+// which is used to record runtime statistics.
+func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
+	return func(r *reporterOptions) {
+		r.metrics = metrics
+	}
+}
+
+// BufferFlushInterval creates a ReporterOption that sets how often the queue
+// is force-flushed.
+func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
+	return func(r *reporterOptions) {
+		r.bufferFlushInterval = bufferFlushInterval
+	}
+}
+
+// Logger creates a ReporterOption that initializes the logger used to log
+// errors of span submissions.
+func (reporterOptions) Logger(logger Logger) ReporterOption {
+	return func(r *reporterOptions) {
+		r.logger = log.DebugLogAdapter(logger)
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
new file mode 100644
index 0000000000000000000000000000000000000000..d0be8ad500796c87d2218c135dd502059fe8447c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -0,0 +1,516 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+
+	"github.com/uber/jaeger-client-go/thrift-gen/sampling"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+	defaultMaxOperations = 2000
+)
+
+// Sampler decides whether a new trace should be sampled or not.
+type Sampler interface {
+	// IsSampled decides whether a trace with given `id` and `operation`
+	// should be sampled. This function will also return the tags that
+	// can be used to identify the type of sampling that was applied to
+	// the root span. Most simple samplers would return two tags,
+	// sampler.type and sampler.param, similar to those used in the Configuration
+	IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
+
+	// Close does a clean shutdown of the sampler, stopping any background
+	// go-routines it may have started.
+	Close()
+
+	// Equal checks if the `other` sampler is functionally equivalent
+	// to this sampler.
+	// TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
+	Equal(other Sampler) bool
+}
+
+// -----------------------
+
+// ConstSampler is a sampler that always makes the same decision.
+type ConstSampler struct {
+	legacySamplerV1Base
+	Decision bool
+	tags     []Tag
+}
+
+// NewConstSampler creates a ConstSampler.
+func NewConstSampler(sample bool) *ConstSampler {
+	tags := []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeConst},
+		{key: SamplerParamTagKey, value: sample},
+	}
+	s := &ConstSampler{
+		Decision: sample,
+		tags:     tags,
+	}
+	s.delegate = s.IsSampled
+	return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.Decision, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ConstSampler) Close() {
+	// nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ConstSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*ConstSampler); ok {
+		return s.Decision == o.Decision
+	}
+	return false
+}
+
+// String is used to log sampler details.
+func (s *ConstSampler) String() string {
+	return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
+}
+
+// -----------------------
+
+// ProbabilisticSampler is a sampler that randomly samples a certain percentage
+// of traces.
+type ProbabilisticSampler struct {
+	legacySamplerV1Base
+	samplingRate     float64
+	samplingBoundary uint64
+	tags             []Tag
+}
+
+const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
+
+// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
+// samplingRate, in the range between 0.0 and 1.0.
+//
+// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
+// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
+// TODO remove the error from this function for next major release
+func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
+	if samplingRate < 0.0 || samplingRate > 1.0 {
+		return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+	}
+	return newProbabilisticSampler(samplingRate), nil
+}
+
+func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
+	s := new(ProbabilisticSampler)
+	s.delegate = s.IsSampled
+	return s.init(samplingRate)
+}
+
+func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
+	s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+	s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
+	s.tags = []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
+		{key: SamplerParamTagKey, value: s.samplingRate},
+	}
+	return s
+}
+
+// SamplingRate returns the sampling probability this sampled was constructed with.
+func (s *ProbabilisticSampler) SamplingRate() float64 {
+	return s.samplingRate
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ProbabilisticSampler) Close() {
+	// nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ProbabilisticSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*ProbabilisticSampler); ok {
+		return s.samplingBoundary == o.samplingBoundary
+	}
+	return false
+}
+
+// Update modifies in-place the sampling rate. Locking must be done externally.
+func (s *ProbabilisticSampler) Update(samplingRate float64) error {
+	if samplingRate < 0.0 || samplingRate > 1.0 {
+		return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+	}
+	s.init(samplingRate)
+	return nil
+}
+
+// String is used to log sampler details.
+func (s *ProbabilisticSampler) String() string {
+	return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
+}
+
+// -----------------------
+
+// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
+// burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
+// number of sequential requests can be sampled each second.
+type RateLimitingSampler struct {
+	legacySamplerV1Base
+	maxTracesPerSecond float64
+	rateLimiter        *utils.ReconfigurableRateLimiter
+	tags               []Tag
+}
+
+// NewRateLimitingSampler creates new RateLimitingSampler.
+func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
+	s := new(RateLimitingSampler)
+	s.delegate = s.IsSampled
+	return s.init(maxTracesPerSecond)
+}
+
+func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
+	if s.rateLimiter == nil {
+		s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+	} else {
+		s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+	}
+	s.maxTracesPerSecond = maxTracesPerSecond
+	s.tags = []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
+		{key: SamplerParamTagKey, value: maxTracesPerSecond},
+	}
+	return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.rateLimiter.CheckCredit(1.0), s.tags
+}
+
+// Update reconfigures the rate limiter, while preserving its accumulated balance.
+// Locking must be done externally.
+func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
+	if s.maxTracesPerSecond != maxTracesPerSecond {
+		s.init(maxTracesPerSecond)
+	}
+}
+
+// Close does nothing.
+func (s *RateLimitingSampler) Close() {
+	// nothing to do
+}
+
+// Equal compares with another sampler.
+func (s *RateLimitingSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*RateLimitingSampler); ok {
+		return s.maxTracesPerSecond == o.maxTracesPerSecond
+	}
+	return false
+}
+
+// String is used to log sampler details.
+func (s *RateLimitingSampler) String() string {
+	return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
+}
+
+// -----------------------
+
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
+// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
+// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
+// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
+//
+// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for ProbabilisticSampler will be used.
+type GuaranteedThroughputProbabilisticSampler struct {
+	probabilisticSampler *ProbabilisticSampler
+	lowerBoundSampler    *RateLimitingSampler
+	tags                 []Tag
+	samplingRate         float64
+	lowerBound           float64
+}
+
+// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
+// ProbabilisticSampler and RateLimitingSampler.
+func NewGuaranteedThroughputProbabilisticSampler(
+	lowerBound, samplingRate float64,
+) (*GuaranteedThroughputProbabilisticSampler, error) {
+	return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
+}
+
+func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
+	s := &GuaranteedThroughputProbabilisticSampler{
+		lowerBoundSampler: NewRateLimitingSampler(lowerBound),
+		lowerBound:        lowerBound,
+	}
+	s.setProbabilisticSampler(samplingRate)
+	return s
+}
+
+func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
+	if s.probabilisticSampler == nil {
+		s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+	} else if s.samplingRate != samplingRate {
+		s.probabilisticSampler.init(samplingRate)
+	}
+	// since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
+	samplingRate = s.probabilisticSampler.SamplingRate()
+	if s.samplingRate != samplingRate || s.tags == nil {
+		s.samplingRate = s.probabilisticSampler.SamplingRate()
+		s.tags = []Tag{
+			{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
+			{key: SamplerParamTagKey, value: s.samplingRate},
+		}
+	}
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
+		s.lowerBoundSampler.IsSampled(id, operation)
+		return true, tags
+	}
+	sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
+	return sampled, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Close() {
+	s.probabilisticSampler.Close()
+	s.lowerBoundSampler.Close()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+	// more information.
+	return false
+}
+
+// this function should only be called while holding a Write lock
+func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
+	s.setProbabilisticSampler(samplingRate)
+	if s.lowerBound != lowerBound {
+		s.lowerBoundSampler.Update(lowerBound)
+		s.lowerBound = lowerBound
+	}
+}
+
+func (s GuaranteedThroughputProbabilisticSampler) String() string {
+	return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
+}
+
+// -----------------------
+
+// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
+// on a per-operation basis.
+type PerOperationSampler struct {
+	sync.RWMutex
+
+	samplers       map[string]*GuaranteedThroughputProbabilisticSampler
+	defaultSampler *ProbabilisticSampler
+	lowerBound     float64
+	maxOperations  int
+
+	// see description in PerOperationSamplerParams
+	operationNameLateBinding bool
+}
+
+// NewAdaptiveSampler returns a new PerOperationSampler.
+// Deprecated: please use NewPerOperationSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
+	return NewPerOperationSampler(PerOperationSamplerParams{
+		MaxOperations: maxOperations,
+		Strategies:    strategies,
+	}), nil
+}
+
+// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
+type PerOperationSamplerParams struct {
+	// Max number of operations that will be tracked. Other operations will be given default strategy.
+	MaxOperations int
+
+	// Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
+	// When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
+	// the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
+	// in applications that always provide the correct span name on trace creation.
+	//
+	// For backwards compatibility this option is off by default.
+	OperationNameLateBinding bool
+
+	// Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
+	Strategies *sampling.PerOperationSamplingStrategies
+}
+
+// NewPerOperationSampler returns a new PerOperationSampler.
+func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
+	if params.MaxOperations <= 0 {
+		params.MaxOperations = defaultMaxOperations
+	}
+	samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
+	for _, strategy := range params.Strategies.PerOperationStrategies {
+		sampler := newGuaranteedThroughputProbabilisticSampler(
+			params.Strategies.DefaultLowerBoundTracesPerSecond,
+			strategy.ProbabilisticSampling.SamplingRate,
+		)
+		samplers[strategy.Operation] = sampler
+	}
+	return &PerOperationSampler{
+		samplers:                 samplers,
+		defaultSampler:           newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
+		lowerBound:               params.Strategies.DefaultLowerBoundTracesPerSecond,
+		maxOperations:            params.MaxOperations,
+		operationNameLateBinding: params.OperationNameLateBinding,
+	}
+}
+
+// IsSampled is not used and only exists to match Sampler V1 API.
+// TODO (breaking change) remove when upgrading everything to SamplerV2
+func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return false, nil
+}
+
+func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
+	samplerV1 := s.getSamplerForOperation(operationName)
+	var sampled bool
+	var tags []Tag
+	if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
+		sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
+	}
+	return sampled, tags
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
+	sampled, tags := s.trySampling(span, span.OperationName())
+	return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	sampled, tags := s.trySampling(span, operationName)
+	return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
+	s.RLock()
+	sampler, ok := s.samplers[operation]
+	if ok {
+		defer s.RUnlock()
+		return sampler
+	}
+	s.RUnlock()
+	s.Lock()
+	defer s.Unlock()
+
+	// Check if sampler has already been created
+	sampler, ok = s.samplers[operation]
+	if ok {
+		return sampler
+	}
+	// Store only up to maxOperations of unique ops.
+	if len(s.samplers) >= s.maxOperations {
+		return s.defaultSampler
+	}
+	newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
+	s.samplers[operation] = newSampler
+	return newSampler
+}
+
+// Close invokes Close on all underlying samplers.
+func (s *PerOperationSampler) Close() {
+	s.Lock()
+	defer s.Unlock()
+	for _, sampler := range s.samplers {
+		sampler.Close()
+	}
+	s.defaultSampler.Close()
+}
+
+func (s *PerOperationSampler) String() string {
+	var sb strings.Builder
+
+	fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
+	fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
+	fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
+	fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
+	fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
+	fmt.Fprintf(&sb, "samplers=[")
+	for operationName, sampler := range s.samplers {
+		fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
+	}
+	fmt.Fprintf(&sb, "])")
+
+	return sb.String()
+}
+
+// Equal is not used.
+// TODO (breaking change) remove this in the future
+func (s *PerOperationSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
+	// samplers which all need to be initialized before this function can be called for a comparison.
+	// Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
+	// changing. Hence this function always returns false so that the update function can be called.
+	// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
+	return false
+}
+
+func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+	s.Lock()
+	defer s.Unlock()
+	newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
+	for _, strategy := range strategies.PerOperationStrategies {
+		operation := strategy.Operation
+		samplingRate := strategy.ProbabilisticSampling.SamplingRate
+		lowerBound := strategies.DefaultLowerBoundTracesPerSecond
+		if sampler, ok := s.samplers[operation]; ok {
+			sampler.update(lowerBound, samplingRate)
+			newSamplers[operation] = sampler
+		} else {
+			sampler := newGuaranteedThroughputProbabilisticSampler(
+				lowerBound,
+				samplingRate,
+			)
+			newSamplers[operation] = sampler
+		}
+	}
+	s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
+	if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
+		s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
+	}
+	s.samplers = newSamplers
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
new file mode 100644
index 0000000000000000000000000000000000000000..112e3e1cb3ee660463671eab0e8a86153b10a4d1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -0,0 +1,337 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/thrift-gen/sampling"
+)
+
+const (
+	defaultSamplingRefreshInterval = time.Minute
+)
+
+// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
+type SamplingStrategyFetcher interface {
+	Fetch(service string) ([]byte, error)
+}
+
+// SamplingStrategyParser is used to parse sampling strategy updates. The output object
+// should be of the type that is recognized by the SamplerUpdaters.
+type SamplingStrategyParser interface {
+	Parse(response []byte) (interface{}, error)
+}
+
+// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
+// retrieved from remote config server, to the current sampler. The updater can modify
+// the sampler in-place if sampler supports it, or create a new one.
+//
+// If the strategy does not contain configuration for the sampler in question,
+// updater must return modifiedSampler=nil to give other updaters a chance to inspect
+// the sampling strategy response.
+//
+// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
+type SamplerUpdater interface {
+	Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
+}
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+	// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+	// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+	closed int64 // 0 - not closed, 1 - closed
+
+	sync.RWMutex
+	samplerOptions
+
+	serviceName string
+	doneChan    chan *sync.WaitGroup
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+	serviceName string,
+	opts ...SamplerOption,
+) *RemotelyControlledSampler {
+	options := new(samplerOptions).applyOptionsAndDefaults(opts...)
+	sampler := &RemotelyControlledSampler{
+		samplerOptions: *options,
+		serviceName:    serviceName,
+		doneChan:       make(chan *sync.WaitGroup),
+	}
+	go sampler.pollController()
+	return sampler
+}
+
+// IsSampled implements IsSampled() of Sampler.
+// TODO (breaking change) remove when Sampler V1 is removed
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return false, nil
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+	return s.sampler.OnCreateSpan(span)
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	return s.sampler.OnSetOperationName(span, operationName)
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	return s.sampler.OnSetTag(span, key, value)
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+	return s.sampler.OnFinishSpan(span)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+	if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+		s.logger.Error("Repeated attempt to close the sampler is ignored")
+		return
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+	s.doneChan <- &wg
+	wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+	// more information.
+	return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+	ticker := time.NewTicker(s.samplingRefreshInterval)
+	defer ticker.Stop()
+	s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+	for {
+		select {
+		case <-ticker.C:
+			s.UpdateSampler()
+		case wg := <-s.doneChan:
+			wg.Done()
+			return
+		}
+	}
+}
+
+// Sampler returns the currently active sampler.
+func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
+	s.Lock()
+	defer s.Unlock()
+	return s.sampler
+}
+
+func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
+	s.Lock()
+	defer s.Unlock()
+	s.sampler = sampler
+}
+
+// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
+// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
+func (s *RemotelyControlledSampler) UpdateSampler() {
+	res, err := s.samplingFetcher.Fetch(s.serviceName)
+	if err != nil {
+		s.metrics.SamplerQueryFailure.Inc(1)
+		s.logger.Infof("failed to fetch sampling strategy: %v", err)
+		return
+	}
+	strategy, err := s.samplingParser.Parse(res)
+	if err != nil {
+		s.metrics.SamplerUpdateFailure.Inc(1)
+		s.logger.Infof("failed to parse sampling strategy response: %v", err)
+		return
+	}
+
+	s.Lock()
+	defer s.Unlock()
+
+	s.metrics.SamplerRetrieved.Inc(1)
+	if err := s.updateSamplerViaUpdaters(strategy); err != nil {
+		s.metrics.SamplerUpdateFailure.Inc(1)
+		s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
+		return
+	}
+	s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
+	for _, updater := range s.updaters {
+		sampler, err := updater.Update(s.sampler, strategy)
+		if err != nil {
+			return err
+		}
+		if sampler != nil {
+			s.logger.Debugf("sampler updated: %+v", sampler)
+			s.sampler = sampler
+			return nil
+		}
+	}
+	return fmt.Errorf("unsupported sampling strategy %+v", strategy)
+}
+
+// -----------------------
+
+// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type ProbabilisticSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if resp, ok := strategy.(response); ok {
+		if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
+			if ps, ok := sampler.(*ProbabilisticSampler); ok {
+				if err := ps.Update(probabilistic.SamplingRate); err != nil {
+					return nil, err
+				}
+				return sampler, nil
+			}
+			return newProbabilisticSampler(probabilistic.SamplingRate), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type RateLimitingSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if resp, ok := strategy.(response); ok {
+		if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
+			rateLimit := float64(rateLimiting.MaxTracesPerSecond)
+			if rl, ok := sampler.(*RateLimitingSampler); ok {
+				rl.Update(rateLimit)
+				return rl, nil
+			}
+			return NewRateLimitingSampler(rateLimit), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+// Fields have the same meaning as in PerOperationSamplerParams.
+type AdaptiveSamplerUpdater struct {
+	MaxOperations            int
+	OperationNameLateBinding bool
+}
+
+// Update implements Update of SamplerUpdater.
+func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetOperationSampling() *sampling.PerOperationSamplingStrategies
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if p, ok := strategy.(response); ok {
+		if operations := p.GetOperationSampling(); operations != nil {
+			if as, ok := sampler.(*PerOperationSampler); ok {
+				as.update(operations)
+				return as, nil
+			}
+			return NewPerOperationSampler(PerOperationSamplerParams{
+				MaxOperations:            u.MaxOperations,
+				OperationNameLateBinding: u.OperationNameLateBinding,
+				Strategies:               operations,
+			}), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+type httpSamplingStrategyFetcher struct {
+	serverURL string
+	logger    log.DebugLogger
+}
+
+func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
+	v := url.Values{}
+	v.Set("service", serviceName)
+	uri := f.serverURL + "?" + v.Encode()
+
+	// TODO create and reuse http.Client with proper timeout settings, etc.
+	resp, err := http.Get(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err := resp.Body.Close(); err != nil {
+			f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
+		}
+	}()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode >= 400 {
+		return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+	}
+
+	return body, nil
+}
+
+// -----------------------
+
+type samplingStrategyParser struct{}
+
+func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
+	strategy := new(sampling.SamplingStrategyResponse)
+	if err := json.Unmarshal(response, strategy); err != nil {
+		return nil, err
+	}
+	return strategy, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..e4a6108b73cfb2a5e3cd33a05db4aba3889f24ca
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -0,0 +1,162 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// SamplerOption is a function that sets some option on the sampler
+type SamplerOption func(options *samplerOptions)
+
+// SamplerOptions is a factory for all available SamplerOption's.
+var SamplerOptions SamplerOptionsFactory
+
+// SamplerOptionsFactory is a factory for all available SamplerOption's.
+// The type acts as a namespace for factory functions. It is public to
+// make the functions discoverable via godoc. Recommended to be used
+// via global SamplerOptions variable.
+type SamplerOptionsFactory struct{}
+
+type samplerOptions struct {
+	metrics                 *Metrics
+	sampler                 SamplerV2
+	logger                  log.DebugLogger
+	samplingServerURL       string
+	samplingRefreshInterval time.Duration
+	samplingFetcher         SamplingStrategyFetcher
+	samplingParser          SamplingStrategyParser
+	updaters                []SamplerUpdater
+	posParams               PerOperationSamplerParams
+}
+
+// Metrics creates a SamplerOption that initializes Metrics on the sampler,
+// which is used to emit statistics.
+func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
+	return func(o *samplerOptions) {
+		o.metrics = m
+	}
+}
+
+// MaxOperations creates a SamplerOption that sets the maximum number of
+// operations the sampler will keep track of.
+func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
+	return func(o *samplerOptions) {
+		o.posParams.MaxOperations = maxOperations
+	}
+}
+
+// OperationNameLateBinding creates a SamplerOption that sets the respective
+// field in the PerOperationSamplerParams.
+func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
+	return func(o *samplerOptions) {
+		o.posParams.OperationNameLateBinding = enable
+	}
+}
+
+// InitialSampler creates a SamplerOption that sets the initial sampler
+// to use before a remote sampler is created and used.
+func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
+	return func(o *samplerOptions) {
+		o.sampler = samplerV1toV2(sampler)
+	}
+}
+
+// Logger creates a SamplerOption that sets the logger used by the sampler.
+func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
+	return func(o *samplerOptions) {
+		o.logger = log.DebugLogAdapter(logger)
+	}
+}
+
+// SamplingServerURL creates a SamplerOption that sets the sampling server url
+// of the local agent that contains the sampling strategies.
+func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingServerURL = samplingServerURL
+	}
+}
+
+// SamplingRefreshInterval creates a SamplerOption that sets how often the
+// sampler will poll local agent for the appropriate sampling strategy.
+func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingRefreshInterval = samplingRefreshInterval
+	}
+}
+
+// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
+func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingFetcher = fetcher
+	}
+}
+
+// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
+func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingParser = parser
+	}
+}
+
+// Updaters creates a SamplerOption that initializes sampler updaters.
+func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
+	return func(o *samplerOptions) {
+		o.updaters = updaters
+	}
+}
+
+func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
+	for _, option := range opts {
+		option(o)
+	}
+	if o.sampler == nil {
+		o.sampler = newProbabilisticSampler(0.001)
+	}
+	if o.logger == nil {
+		o.logger = log.NullLogger
+	}
+	if o.samplingServerURL == "" {
+		o.samplingServerURL = DefaultSamplingServerURL
+	}
+	if o.metrics == nil {
+		o.metrics = NewNullMetrics()
+	}
+	if o.samplingRefreshInterval <= 0 {
+		o.samplingRefreshInterval = defaultSamplingRefreshInterval
+	}
+	if o.samplingFetcher == nil {
+		o.samplingFetcher = &httpSamplingStrategyFetcher{
+			serverURL: o.samplingServerURL,
+			logger:    o.logger,
+		}
+	}
+	if o.samplingParser == nil {
+		o.samplingParser = new(samplingStrategyParser)
+	}
+	if o.updaters == nil {
+		o.updaters = []SamplerUpdater{
+			&AdaptiveSamplerUpdater{
+				MaxOperations:            o.posParams.MaxOperations,
+				OperationNameLateBinding: o.posParams.OperationNameLateBinding,
+			},
+			new(ProbabilisticSamplerUpdater),
+			new(RateLimitingSamplerUpdater),
+		}
+	}
+	return o
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
new file mode 100644
index 0000000000000000000000000000000000000000..a50671a23682a08b259dfcf20e54953487b84640
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// SamplingDecision is returned by the V2 samplers.
+type SamplingDecision struct {
+	Sample    bool
+	Retryable bool
+	Tags      []Tag
+}
+
+// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
+// be made at different points of the span lifecycle.
+type SamplerV2 interface {
+	OnCreateSpan(span *Span) SamplingDecision
+	OnSetOperationName(span *Span, operationName string) SamplingDecision
+	OnSetTag(span *Span, key string, value interface{}) SamplingDecision
+	OnFinishSpan(span *Span) SamplingDecision
+
+	// Close does a clean shutdown of the sampler, stopping any background
+	// go-routines it may have started.
+	Close()
+}
+
+// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
+func samplerV1toV2(s Sampler) SamplerV2 {
+	if s2, ok := s.(SamplerV2); ok {
+		return s2
+	}
+	type legacySamplerV1toV2Adapter struct {
+		legacySamplerV1Base
+	}
+	return &legacySamplerV1toV2Adapter{
+		legacySamplerV1Base: legacySamplerV1Base{
+			delegate: s.IsSampled,
+		},
+	}
+}
+
+// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
+// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
+// for backwards compatibility reasons.
+// TODO (breaking change) remove this in the next major release
+type SamplerV2Base struct{}
+
+// IsSampled implements IsSampled of Sampler.
+func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
+	return false, nil
+}
+
+// Close implements Close of Sampler.
+func (SamplerV2Base) Close() {}
+
+// Equal implements Equal of Sampler.
+func (SamplerV2Base) Equal(other Sampler) bool { return false }
+
+// legacySamplerV1Base is used as a base for simple samplers that only implement
+// the legacy isSampled() function that is not sensitive to its arguments.
+type legacySamplerV1Base struct {
+	delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
+}
+
+func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
+	isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+	return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+	return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
new file mode 100644
index 0000000000000000000000000000000000000000..42c9112c07603ecaac62864f7f024c379342c41c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -0,0 +1,487 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// Span implements opentracing.Span
+type Span struct {
+	// referenceCounter used to increase the lifetime of
+	// the object before return it into the pool.
+	referenceCounter int32
+
+	sync.RWMutex
+
+	tracer *Tracer
+
+	// TODO: (breaking change) change to use a pointer
+	context SpanContext
+
+	// The name of the "operation" this span is an instance of.
+	// Known as a "span name" in some implementations.
+	operationName string
+
+	// firstInProcess, if true, indicates that this span is the root of the (sub)tree
+	// of spans in the current process. In other words it's true for the root spans,
+	// and the ingress spans when the process joins another trace.
+	firstInProcess bool
+
+	// startTime is the timestamp indicating when the span began, with microseconds precision.
+	startTime time.Time
+
+	// duration returns duration of the span with microseconds precision.
+	// Zero value means duration is unknown.
+	duration time.Duration
+
+	// tags attached to this span
+	tags []Tag
+
+	// The span's "micro-log"
+	logs []opentracing.LogRecord
+
+	// The number of logs dropped because of MaxLogsPerSpan.
+	numDroppedLogs int
+
+	// references for this span
+	references []Reference
+
+	observer ContribSpanObserver
+}
+
+// Tag is a simple key value wrapper.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+type Tag struct {
+	key   string
+	value interface{}
+}
+
+// NewTag creates a new Tag.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+func NewTag(key string, value interface{}) Tag {
+	return Tag{key: key, value: value}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *Span) SetOperationName(operationName string) opentracing.Span {
+	s.Lock()
+	s.operationName = operationName
+	s.Unlock()
+	if !s.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnSetOperationName(s, operationName)
+		s.applySamplingDecision(decision, true)
+	}
+	s.observer.OnSetOperationName(operationName)
+	return s
+}
+
+// SetTag implements SetTag() of opentracing.Span
+func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+	return s.setTagInternal(key, value, true)
+}
+
+func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
+	s.observer.OnSetTag(key, value)
+	if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
+		return s
+	}
+	if !s.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnSetTag(s, key, value)
+		s.applySamplingDecision(decision, lock)
+	}
+	if s.isWriteable() {
+		if lock {
+			s.Lock()
+			defer s.Unlock()
+		}
+		s.appendTagNoLocking(key, value)
+	}
+	return s
+}
+
+// SpanContext returns span context
+func (s *Span) SpanContext() SpanContext {
+	s.Lock()
+	defer s.Unlock()
+	return s.context
+}
+
+// StartTime returns span start time
+func (s *Span) StartTime() time.Time {
+	s.Lock()
+	defer s.Unlock()
+	return s.startTime
+}
+
+// Duration returns span duration
+func (s *Span) Duration() time.Duration {
+	s.Lock()
+	defer s.Unlock()
+	return s.duration
+}
+
+// Tags returns tags for span
+func (s *Span) Tags() opentracing.Tags {
+	s.Lock()
+	defer s.Unlock()
+	var result = make(opentracing.Tags, len(s.tags))
+	for _, tag := range s.tags {
+		result[tag.key] = tag.value
+	}
+	return result
+}
+
+// Logs returns micro logs for span
+func (s *Span) Logs() []opentracing.LogRecord {
+	s.Lock()
+	defer s.Unlock()
+
+	logs := append([]opentracing.LogRecord(nil), s.logs...)
+	if s.numDroppedLogs != 0 {
+		fixLogs(logs, s.numDroppedLogs)
+	}
+
+	return logs
+}
+
+// References returns references for this span
+func (s *Span) References() []opentracing.SpanReference {
+	s.Lock()
+	defer s.Unlock()
+
+	if s.references == nil || len(s.references) == 0 {
+		return nil
+	}
+
+	result := make([]opentracing.SpanReference, len(s.references))
+	for i, r := range s.references {
+		result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
+	}
+	return result
+}
+
+func (s *Span) appendTagNoLocking(key string, value interface{}) {
+	s.tags = append(s.tags, Tag{key: key, value: value})
+}
+
+// LogFields implements opentracing.Span API
+func (s *Span) LogFields(fields ...log.Field) {
+	s.Lock()
+	defer s.Unlock()
+	if !s.context.IsSampled() {
+		return
+	}
+	s.logFieldsNoLocking(fields...)
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) logFieldsNoLocking(fields ...log.Field) {
+	lr := opentracing.LogRecord{
+		Fields:    fields,
+		Timestamp: time.Now(),
+	}
+	s.appendLogNoLocking(lr)
+}
+
+// LogKV implements opentracing.Span API
+func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
+	s.RLock()
+	sampled := s.context.IsSampled()
+	s.RUnlock()
+	if !sampled {
+		return
+	}
+	fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
+	if err != nil {
+		s.LogFields(log.Error(err), log.String("function", "LogKV"))
+		return
+	}
+	s.LogFields(fields...)
+}
+
+// LogEvent implements opentracing.Span API
+func (s *Span) LogEvent(event string) {
+	s.Log(opentracing.LogData{Event: event})
+}
+
+// LogEventWithPayload implements opentracing.Span API
+func (s *Span) LogEventWithPayload(event string, payload interface{}) {
+	s.Log(opentracing.LogData{Event: event, Payload: payload})
+}
+
+// Log implements opentracing.Span API
+func (s *Span) Log(ld opentracing.LogData) {
+	s.Lock()
+	defer s.Unlock()
+	if s.context.IsSampled() {
+		if ld.Timestamp.IsZero() {
+			ld.Timestamp = s.tracer.timeNow()
+		}
+		s.appendLogNoLocking(ld.ToLogRecord())
+	}
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
+	maxLogs := s.tracer.options.maxLogsPerSpan
+	if maxLogs == 0 || len(s.logs) < maxLogs {
+		s.logs = append(s.logs, lr)
+		return
+	}
+
+	// We have too many logs. We don't touch the first numOld logs; we treat the
+	// rest as a circular buffer and overwrite the oldest log among those.
+	numOld := (maxLogs - 1) / 2
+	numNew := maxLogs - numOld
+	s.logs[numOld+s.numDroppedLogs%numNew] = lr
+	s.numDroppedLogs++
+}
+
+// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
+// the end (i.e. pos circular left shifts).
+func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
+	// This algorithm is described in:
+	//    http://www.cplusplus.com/reference/algorithm/rotate
+	for first, middle, next := 0, pos, pos; first != middle; {
+		buf[first], buf[next] = buf[next], buf[first]
+		first++
+		next++
+		if next == len(buf) {
+			next = middle
+		} else if first == middle {
+			middle = next
+		}
+	}
+}
+
+func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
+	// We dropped some log events, which means that we used part of Logs as a
+	// circular buffer (see appendLog). De-circularize it.
+	numOld := (len(logs) - 1) / 2
+	numNew := len(logs) - numOld
+	rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
+
+	// Replace the log in the middle (the oldest "new" log) with information
+	// about the dropped logs. This means that we are effectively dropping one
+	// more "new" log.
+	numDropped := numDroppedLogs + 1
+	logs[numOld] = opentracing.LogRecord{
+		// Keep the timestamp of the last dropped event.
+		Timestamp: logs[numOld].Timestamp,
+		Fields: []log.Field{
+			log.String("event", "dropped Span logs"),
+			log.Int("dropped_log_count", numDropped),
+			log.String("component", "jaeger-client"),
+		},
+	}
+}
+
+func (s *Span) fixLogsIfDropped() {
+	if s.numDroppedLogs == 0 {
+		return
+	}
+	fixLogs(s.logs, s.numDroppedLogs)
+	s.numDroppedLogs = 0
+}
+
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
+func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
+	s.Lock()
+	defer s.Unlock()
+	s.tracer.setBaggage(s, key, value)
+	return s
+}
+
+// BaggageItem implements BaggageItem() of opentracing.SpanContext
+func (s *Span) BaggageItem(key string) string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.context.baggage[key]
+}
+
+// Finish implements opentracing.Span API
+// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
+// so after that, the Span object should no longer be used because it won't be valid anymore.
+func (s *Span) Finish() {
+	s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// FinishWithOptions implements opentracing.Span API
+func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
+	if options.FinishTime.IsZero() {
+		options.FinishTime = s.tracer.timeNow()
+	}
+	s.observer.OnFinish(options)
+	s.Lock()
+	s.duration = options.FinishTime.Sub(s.startTime)
+	s.Unlock()
+	if !s.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnFinishSpan(s)
+		s.applySamplingDecision(decision, true)
+	}
+	if s.context.IsSampled() {
+		s.Lock()
+		s.fixLogsIfDropped()
+		if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
+			// Note: bulk logs are not subject to maxLogsPerSpan limit
+			if options.LogRecords != nil {
+				s.logs = append(s.logs, options.LogRecords...)
+			}
+			for _, ld := range options.BulkLogData {
+				s.logs = append(s.logs, ld.ToLogRecord())
+			}
+		}
+		s.Unlock()
+	}
+	// call reportSpan even for non-sampled traces, to return span to the pool
+	// and update metrics counter
+	s.tracer.reportSpan(s)
+}
+
+// Context implements opentracing.Span API
+func (s *Span) Context() opentracing.SpanContext {
+	s.Lock()
+	defer s.Unlock()
+	return s.context
+}
+
+// Tracer implements opentracing.Span API
+func (s *Span) Tracer() opentracing.Tracer {
+	return s.tracer
+}
+
+func (s *Span) String() string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.context.String()
+}
+
+// OperationName allows retrieving current operation name.
+func (s *Span) OperationName() string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.operationName
+}
+
+// Retain increases object counter to increase the lifetime of the object
+func (s *Span) Retain() *Span {
+	atomic.AddInt32(&s.referenceCounter, 1)
+	return s
+}
+
+// Release decrements object counter and return to the
+// allocator manager  when counter will below zero
+func (s *Span) Release() {
+	if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
+		s.tracer.spanAllocator.Put(s)
+	}
+}
+
+// reset span state and release unused data
+func (s *Span) reset() {
+	s.firstInProcess = false
+	s.context = emptyContext
+	s.operationName = ""
+	s.tracer = nil
+	s.startTime = time.Time{}
+	s.duration = 0
+	s.observer = nil
+	atomic.StoreInt32(&s.referenceCounter, 0)
+
+	// Note: To reuse memory we can save the pointers on the heap
+	s.tags = s.tags[:0]
+	s.logs = s.logs[:0]
+	s.numDroppedLogs = 0
+	s.references = s.references[:0]
+}
+
+func (s *Span) serviceName() string {
+	return s.tracer.serviceName
+}
+
+func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+	if !decision.Retryable {
+		s.context.samplingState.setFinal()
+	}
+	if decision.Sample {
+		s.context.samplingState.setSampled()
+		if len(decision.Tags) > 0 {
+			if lock {
+				s.Lock()
+				defer s.Unlock()
+			}
+			for _, tag := range decision.Tags {
+				s.appendTagNoLocking(tag.key, tag.value)
+			}
+		}
+	}
+}
+
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (s *Span) isWriteable() bool {
+	state := s.context.samplingState
+	return !state.isFinal() || state.isSampled()
+}
+
+func (s *Span) isSamplingFinalized() bool {
+	return s.context.samplingState.isFinal()
+}
+
+// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+// The behavior of setSamplingPriority is surprising
+// If noDebugFlagOnForcedSampling is set
+//     setSamplingPriority(span, 1) always sets only flagSampled
+// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
+//     setSamplingPriority(span, 1) sets both flagSampled and flagDebug
+// However,
+//     setSamplingPriority(span, 0) always only resets flagSampled
+//
+// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
+// leave flagDebug set
+func setSamplingPriority(s *Span, value interface{}) bool {
+	val, ok := value.(uint16)
+	if !ok {
+		return false
+	}
+	if val == 0 {
+		s.context.samplingState.unsetSampled()
+		s.context.samplingState.setFinal()
+		return true
+	}
+	if s.tracer.options.noDebugFlagOnForcedSampling {
+		s.context.samplingState.setSampled()
+		s.context.samplingState.setFinal()
+		return true
+	} else if s.tracer.isDebugAllowed(s.operationName) {
+		s.context.samplingState.setDebugAndSampled()
+		s.context.samplingState.setFinal()
+		return true
+	}
+	return false
+}
+
+// EnableFirehose enables firehose flag on the span context
+func EnableFirehose(s *Span) {
+	s.Lock()
+	defer s.Unlock()
+	s.context.samplingState.setFirehose()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
new file mode 100644
index 0000000000000000000000000000000000000000..6fe0cd0ce1eb11ca2e7cc977b14c26ff814c7a92
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2019 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "sync"
+
+// SpanAllocator abstraction of managign span allocations
+type SpanAllocator interface {
+	Get() *Span
+	Put(*Span)
+}
+
+type syncPollSpanAllocator struct {
+	spanPool sync.Pool
+}
+
+func newSyncPollSpanAllocator() SpanAllocator {
+	return &syncPollSpanAllocator{
+		spanPool: sync.Pool{New: func() interface{} {
+			return &Span{}
+		}},
+	}
+}
+
+func (pool *syncPollSpanAllocator) Get() *Span {
+	return pool.spanPool.Get().(*Span)
+}
+
+func (pool *syncPollSpanAllocator) Put(span *Span) {
+	span.reset()
+	pool.spanPool.Put(span)
+}
+
+type simpleSpanAllocator struct{}
+
+func (pool simpleSpanAllocator) Get() *Span {
+	return &Span{}
+}
+
+func (pool simpleSpanAllocator) Put(span *Span) {
+	// @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
+	// since finished spans are not reused, no need to reset them
+	// span.reset()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b44f3f8c2f1ba09999c3cee6678ca076f1fbb1c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -0,0 +1,383 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"sync"
+
+	"go.uber.org/atomic"
+)
+
+const (
+	flagSampled  = 1
+	flagDebug    = 2
+	flagFirehose = 8
+)
+
+var (
+	errEmptyTracerStateString     = errors.New("Cannot convert empty string to tracer state")
+	errMalformedTracerStateString = errors.New("String does not match tracer state format")
+
+	emptyContext = SpanContext{}
+)
+
+// TraceID represents unique 128bit identifier of a trace
+type TraceID struct {
+	High, Low uint64
+}
+
+// SpanID represents unique 64bit identifier of a span
+type SpanID uint64
+
+// SpanContext represents propagated span identity and state
+type SpanContext struct {
+	// traceID represents globally unique ID of the trace.
+	// Usually generated as a random number.
+	traceID TraceID
+
+	// spanID represents span ID that must be unique within its trace,
+	// but does not have to be globally unique.
+	spanID SpanID
+
+	// parentID refers to the ID of the parent span.
+	// Should be 0 if the current span is a root span.
+	parentID SpanID
+
+	// Distributed Context baggage. The is a snapshot in time.
+	baggage map[string]string
+
+	// debugID can be set to some correlation ID when the context is being
+	// extracted from a TextMap carrier.
+	//
+	// See JaegerDebugHeader in constants.go
+	debugID string
+
+	// samplingState is shared across all spans
+	samplingState *samplingState
+
+	// remote indicates that span context represents a remote parent
+	remote bool
+}
+
+type samplingState struct {
+	// Span context's state flags that are propagated across processes. Only lower 8 bits are used.
+	// We use an int32 instead of byte to be able to use CAS operations.
+	stateFlags atomic.Int32
+
+	// When state is not final, sampling will be retried on other span write operations,
+	// like SetOperationName / SetTag, and the spans will remain writable.
+	final atomic.Bool
+
+	// localRootSpan stores the SpanID of the first span created in this process for a given trace.
+	localRootSpan SpanID
+
+	// extendedState allows samplers to keep intermediate state.
+	// The keys and values in this map are completely opaque: interface{} -> interface{}.
+	extendedState sync.Map
+}
+
+func (s *samplingState) isLocalRootSpan(id SpanID) bool {
+	return id == s.localRootSpan
+}
+
+func (s *samplingState) setFlag(newFlag int32) {
+	swapped := false
+	for !swapped {
+		old := s.stateFlags.Load()
+		swapped = s.stateFlags.CAS(old, old|newFlag)
+	}
+}
+
+func (s *samplingState) unsetFlag(newFlag int32) {
+	swapped := false
+	for !swapped {
+		old := s.stateFlags.Load()
+		swapped = s.stateFlags.CAS(old, old&^newFlag)
+	}
+}
+
+func (s *samplingState) setSampled() {
+	s.setFlag(flagSampled)
+}
+
+func (s *samplingState) unsetSampled() {
+	s.unsetFlag(flagSampled)
+}
+
+func (s *samplingState) setDebugAndSampled() {
+	s.setFlag(flagDebug | flagSampled)
+}
+
+func (s *samplingState) setFirehose() {
+	s.setFlag(flagFirehose)
+}
+
+func (s *samplingState) setFlags(flags byte) {
+	s.stateFlags.Store(int32(flags))
+}
+
+func (s *samplingState) setFinal() {
+	s.final.Store(true)
+}
+
+func (s *samplingState) flags() byte {
+	return byte(s.stateFlags.Load())
+}
+
+func (s *samplingState) isSampled() bool {
+	return s.stateFlags.Load()&flagSampled == flagSampled
+}
+
+func (s *samplingState) isDebug() bool {
+	return s.stateFlags.Load()&flagDebug == flagDebug
+}
+
+func (s *samplingState) isFirehose() bool {
+	return s.stateFlags.Load()&flagFirehose == flagFirehose
+}
+
+func (s *samplingState) isFinal() bool {
+	return s.final.Load()
+}
+
+func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
+	if value, ok := s.extendedState.Load(key); ok {
+		return value
+	}
+	value := initValue()
+	value, _ = s.extendedState.LoadOrStore(key, value)
+	return value
+}
+
+// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+	for k, v := range c.baggage {
+		if !handler(k, v) {
+			break
+		}
+	}
+}
+
+// IsSampled returns whether this trace was chosen for permanent storage
+// by the sampling mechanism of the tracer.
+func (c SpanContext) IsSampled() bool {
+	return c.samplingState.isSampled()
+}
+
+// IsDebug indicates whether sampling was explicitly requested by the service.
+func (c SpanContext) IsDebug() bool {
+	return c.samplingState.isDebug()
+}
+
+// IsSamplingFinalized indicates whether the sampling decision has been finalized.
+func (c SpanContext) IsSamplingFinalized() bool {
+	return c.samplingState.isFinal()
+}
+
+// IsFirehose indicates whether the firehose flag was set
+func (c SpanContext) IsFirehose() bool {
+	return c.samplingState.isFirehose()
+}
+
+// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
+// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
+func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
+	return c.samplingState.extendedStateForKey(key, initValue)
+}
+
+// IsValid indicates whether this context actually represents a valid trace.
+func (c SpanContext) IsValid() bool {
+	return c.traceID.IsValid() && c.spanID != 0
+}
+
+// SetFirehose enables firehose mode for this trace.
+func (c SpanContext) SetFirehose() {
+	c.samplingState.setFirehose()
+}
+
+func (c SpanContext) String() string {
+	if c.traceID.High == 0 {
+		return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+	}
+	return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+}
+
+// ContextFromString reconstructs the Context encoded in a string
+func ContextFromString(value string) (SpanContext, error) {
+	var context SpanContext
+	if value == "" {
+		return emptyContext, errEmptyTracerStateString
+	}
+	parts := strings.Split(value, ":")
+	if len(parts) != 4 {
+		return emptyContext, errMalformedTracerStateString
+	}
+	var err error
+	if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
+		return emptyContext, err
+	}
+	if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
+		return emptyContext, err
+	}
+	if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
+		return emptyContext, err
+	}
+	flags, err := strconv.ParseUint(parts[3], 10, 8)
+	if err != nil {
+		return emptyContext, err
+	}
+	context.samplingState = &samplingState{}
+	context.samplingState.setFlags(byte(flags))
+	return context, nil
+}
+
+// TraceID returns the trace ID of this span context
+func (c SpanContext) TraceID() TraceID {
+	return c.traceID
+}
+
+// SpanID returns the span ID of this span context
+func (c SpanContext) SpanID() SpanID {
+	return c.spanID
+}
+
+// ParentID returns the parent span ID of this span context
+func (c SpanContext) ParentID() SpanID {
+	return c.parentID
+}
+
+// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
+func (c SpanContext) Flags() byte {
+	return c.samplingState.flags()
+}
+
+// NewSpanContext creates a new instance of SpanContext
+func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
+	samplingState := &samplingState{}
+	if sampled {
+		samplingState.setSampled()
+	}
+
+	return SpanContext{
+		traceID:       traceID,
+		spanID:        spanID,
+		parentID:      parentID,
+		samplingState: samplingState,
+		baggage:       baggage}
+}
+
+// CopyFrom copies data from ctx into this context, including span identity and baggage.
+// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
+func (c *SpanContext) CopyFrom(ctx *SpanContext) {
+	c.traceID = ctx.traceID
+	c.spanID = ctx.spanID
+	c.parentID = ctx.parentID
+	c.samplingState = ctx.samplingState
+	if l := len(ctx.baggage); l > 0 {
+		c.baggage = make(map[string]string, l)
+		for k, v := range ctx.baggage {
+			c.baggage[k] = v
+		}
+	} else {
+		c.baggage = nil
+	}
+}
+
+// WithBaggageItem creates a new context with an extra baggage item.
+func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
+	var newBaggage map[string]string
+	if c.baggage == nil {
+		newBaggage = map[string]string{key: value}
+	} else {
+		newBaggage = make(map[string]string, len(c.baggage)+1)
+		for k, v := range c.baggage {
+			newBaggage[k] = v
+		}
+		newBaggage[key] = value
+	}
+	// Use positional parameters so the compiler will help catch new fields.
+	return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+}
+
+// isDebugIDContainerOnly returns true when the instance of the context is only
+// used to return the debug/correlation ID from extract() method. This happens
+// in the situation when "jaeger-debug-id" header is passed in the carrier to
+// the extract() method, but the request otherwise has no span context in it.
+// Previously this would've returned opentracing.ErrSpanContextNotFound from the
+// extract method, but now it returns a dummy context with only debugID filled in.
+//
+// See JaegerDebugHeader in constants.go
+// See TextMapPropagator#Extract
+func (c *SpanContext) isDebugIDContainerOnly() bool {
+	return !c.traceID.IsValid() && c.debugID != ""
+}
+
+// ------- TraceID -------
+
+func (t TraceID) String() string {
+	if t.High == 0 {
+		return fmt.Sprintf("%x", t.Low)
+	}
+	return fmt.Sprintf("%x%016x", t.High, t.Low)
+}
+
+// TraceIDFromString creates a TraceID from a hexadecimal string
+func TraceIDFromString(s string) (TraceID, error) {
+	var hi, lo uint64
+	var err error
+	if len(s) > 32 {
+		return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
+	} else if len(s) > 16 {
+		hiLen := len(s) - 16
+		if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
+			return TraceID{}, err
+		}
+		if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
+			return TraceID{}, err
+		}
+	} else {
+		if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
+			return TraceID{}, err
+		}
+	}
+	return TraceID{High: hi, Low: lo}, nil
+}
+
+// IsValid checks if the trace ID is valid, i.e. not zero.
+func (t TraceID) IsValid() bool {
+	return t.High != 0 || t.Low != 0
+}
+
+// ------- SpanID -------
+
+func (s SpanID) String() string {
+	return fmt.Sprintf("%x", uint64(s))
+}
+
+// SpanIDFromString creates a SpanID from a hexadecimal string
+func SpanIDFromString(s string) (SpanID, error) {
+	if len(s) > 16 {
+		return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
+	}
+	id, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return SpanID(0), err
+	}
+	return SpanID(id), nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
new file mode 100644
index 0000000000000000000000000000000000000000..e48811c500af13512307ffb9bc9af720fcc6c189
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -0,0 +1,411 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+type Agent interface {
+	// Parameters:
+	//  - Spans
+	EmitZipkinBatch(spans []*zipkincore.Span) (err error)
+	// Parameters:
+	//  - Batch
+	EmitBatch(batch *jaeger.Batch) (err error)
+}
+
+type AgentClient struct {
+	Transport       thrift.TTransport
+	ProtocolFactory thrift.TProtocolFactory
+	InputProtocol   thrift.TProtocol
+	OutputProtocol  thrift.TProtocol
+	SeqId           int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+	return &AgentClient{Transport: t,
+		ProtocolFactory: f,
+		InputProtocol:   f.GetProtocol(t),
+		OutputProtocol:  f.GetProtocol(t),
+		SeqId:           0,
+	}
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+	return &AgentClient{Transport: t,
+		ProtocolFactory: nil,
+		InputProtocol:   iprot,
+		OutputProtocol:  oprot,
+		SeqId:           0,
+	}
+}
+
+// Parameters:
+//  - Spans
+func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+	if err = p.sendEmitZipkinBatch(spans); err != nil {
+		return
+	}
+	return
+}
+
+func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
+	oprot := p.OutputProtocol
+	if oprot == nil {
+		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.OutputProtocol = oprot
+	}
+	p.SeqId++
+	if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
+		return
+	}
+	args := AgentEmitZipkinBatchArgs{
+		Spans: spans,
+	}
+	if err = args.Write(oprot); err != nil {
+		return
+	}
+	if err = oprot.WriteMessageEnd(); err != nil {
+		return
+	}
+	return oprot.Flush()
+}
+
+// Parameters:
+//  - Batch
+func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
+	if err = p.sendEmitBatch(batch); err != nil {
+		return
+	}
+	return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
+	oprot := p.OutputProtocol
+	if oprot == nil {
+		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.OutputProtocol = oprot
+	}
+	p.SeqId++
+	if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+		return
+	}
+	args := AgentEmitBatchArgs{
+		Batch: batch,
+	}
+	if err = args.Write(oprot); err != nil {
+		return
+	}
+	if err = oprot.WriteMessageEnd(); err != nil {
+		return
+	}
+	return oprot.Flush()
+}
+
+type AgentProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+	self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
+	self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+	return self0
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(seqId, iprot, oprot)
+	}
+	iprot.Skip(thrift.STRUCT)
+	iprot.ReadMessageEnd()
+	x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+	x1.Write(oprot)
+	oprot.WriteMessageEnd()
+	oprot.Flush()
+	return false, x1
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+	handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := AgentEmitZipkinBatchArgs{}
+	if err = args.Read(iprot); err != nil {
+		iprot.ReadMessageEnd()
+		return false, err
+	}
+
+	iprot.ReadMessageEnd()
+	var err2 error
+	if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
+		return true, err2
+	}
+	return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+	handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := AgentEmitBatchArgs{}
+	if err = args.Read(iprot); err != nil {
+		iprot.ReadMessageEnd()
+		return false, err
+	}
+
+	iprot.ReadMessageEnd()
+	var err2 error
+	if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+		return true, err2
+	}
+	return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type AgentEmitZipkinBatchArgs struct {
+	Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+	return &AgentEmitZipkinBatchArgs{}
+}
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+	return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*zipkincore.Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem2 := &zipkincore.Span{}
+		if err := _elem2.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+		}
+		p.Spans = append(p.Spans, _elem2)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Batch
+type AgentEmitBatchArgs struct {
+	Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+	return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+	if !p.IsSetBatch() {
+		return AgentEmitBatchArgs_Batch_DEFAULT
+	}
+	return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+	return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+	p.Batch = &jaeger.Batch{}
+	if err := p.Batch.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+	}
+	if err := p.Batch.Write(oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+	}
+	return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa9857bb82acb5b8fd240cd867c3a1b4adf91023
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
@@ -0,0 +1,23 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..9c28f11c1ac77c1548bb850d25a0ae80f8ded0db
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
@@ -0,0 +1,21 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package agent
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+var GoUnusedProtection__ int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
new file mode 100644
index 0000000000000000000000000000000000000000..b32c37dd2615a3c1f856affcf3664d71594b7f86
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
@@ -0,0 +1,242 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type Agent interface {
+	// Parameters:
+	//  - Batch
+	EmitBatch(batch *Batch) (err error)
+}
+
+type AgentClient struct {
+	Transport       thrift.TTransport
+	ProtocolFactory thrift.TProtocolFactory
+	InputProtocol   thrift.TProtocol
+	OutputProtocol  thrift.TProtocol
+	SeqId           int32
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+	return &AgentClient{Transport: t,
+		ProtocolFactory: f,
+		InputProtocol:   f.GetProtocol(t),
+		OutputProtocol:  f.GetProtocol(t),
+		SeqId:           0,
+	}
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+	return &AgentClient{Transport: t,
+		ProtocolFactory: nil,
+		InputProtocol:   iprot,
+		OutputProtocol:  oprot,
+		SeqId:           0,
+	}
+}
+
+// Parameters:
+//  - Batch
+func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
+	if err = p.sendEmitBatch(batch); err != nil {
+		return
+	}
+	return
+}
+
+func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
+	oprot := p.OutputProtocol
+	if oprot == nil {
+		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.OutputProtocol = oprot
+	}
+	p.SeqId++
+	if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
+		return
+	}
+	args := AgentEmitBatchArgs{
+		Batch: batch,
+	}
+	if err = args.Write(oprot); err != nil {
+		return
+	}
+	if err = oprot.WriteMessageEnd(); err != nil {
+		return
+	}
+	return oprot.Flush()
+}
+
+type AgentProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+	self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+	return self6
+}
+
+func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(seqId, iprot, oprot)
+	}
+	iprot.Skip(thrift.STRUCT)
+	iprot.ReadMessageEnd()
+	x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+	x7.Write(oprot)
+	oprot.WriteMessageEnd()
+	oprot.Flush()
+	return false, x7
+
+}
+
+type agentProcessorEmitBatch struct {
+	handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := AgentEmitBatchArgs{}
+	if err = args.Read(iprot); err != nil {
+		iprot.ReadMessageEnd()
+		return false, err
+	}
+
+	iprot.ReadMessageEnd()
+	var err2 error
+	if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
+		return true, err2
+	}
+	return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Batch
+type AgentEmitBatchArgs struct {
+	Batch *Batch `thrift:"batch,1" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+	return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *Batch {
+	if !p.IsSetBatch() {
+		return AgentEmitBatchArgs_Batch_DEFAULT
+	}
+	return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+	return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
+	p.Batch = &Batch{}
+	if err := p.Batch.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+	}
+	if err := p.Batch.Write(oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+	}
+	return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..621b8b1c20f4d340bd77eb70b72ccb7a68a9359f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..e69c6d603d4379e301e6220defc09137d9f76945
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
@@ -0,0 +1,2106 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package jaeger
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type TagType int64
+
+const (
+	TagType_STRING TagType = 0
+	TagType_DOUBLE TagType = 1
+	TagType_BOOL   TagType = 2
+	TagType_LONG   TagType = 3
+	TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+	switch p {
+	case TagType_STRING:
+		return "STRING"
+	case TagType_DOUBLE:
+		return "DOUBLE"
+	case TagType_BOOL:
+		return "BOOL"
+	case TagType_LONG:
+		return "LONG"
+	case TagType_BINARY:
+		return "BINARY"
+	}
+	return ""
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+	switch s {
+	case "STRING":
+		return TagType_STRING, nil
+	case "DOUBLE":
+		return TagType_DOUBLE, nil
+	case "BOOL":
+		return TagType_BOOL, nil
+	case "LONG":
+		return TagType_LONG, nil
+	case "BINARY":
+		return TagType_BINARY, nil
+	}
+	return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+	q, err := TagTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+type SpanRefType int64
+
+const (
+	SpanRefType_CHILD_OF     SpanRefType = 0
+	SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+	switch p {
+	case SpanRefType_CHILD_OF:
+		return "CHILD_OF"
+	case SpanRefType_FOLLOWS_FROM:
+		return "FOLLOWS_FROM"
+	}
+	return ""
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+	switch s {
+	case "CHILD_OF":
+		return SpanRefType_CHILD_OF, nil
+	case "FOLLOWS_FROM":
+		return SpanRefType_FOLLOWS_FROM, nil
+	}
+	return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+	q, err := SpanRefTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+// Attributes:
+//  - Key
+//  - VType
+//  - VStr
+//  - VDouble
+//  - VBool
+//  - VLong
+//  - VBinary
+type Tag struct {
+	Key     string   `thrift:"key,1,required" json:"key"`
+	VType   TagType  `thrift:"vType,2,required" json:"vType"`
+	VStr    *string  `thrift:"vStr,3" json:"vStr,omitempty"`
+	VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"`
+	VBool   *bool    `thrift:"vBool,5" json:"vBool,omitempty"`
+	VLong   *int64   `thrift:"vLong,6" json:"vLong,omitempty"`
+	VBinary []byte   `thrift:"vBinary,7" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+	return &Tag{}
+}
+
+func (p *Tag) GetKey() string {
+	return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+	return p.VType
+}
+
+var Tag_VStr_DEFAULT string
+
+func (p *Tag) GetVStr() string {
+	if !p.IsSetVStr() {
+		return Tag_VStr_DEFAULT
+	}
+	return *p.VStr
+}
+
+var Tag_VDouble_DEFAULT float64
+
+func (p *Tag) GetVDouble() float64 {
+	if !p.IsSetVDouble() {
+		return Tag_VDouble_DEFAULT
+	}
+	return *p.VDouble
+}
+
+var Tag_VBool_DEFAULT bool
+
+func (p *Tag) GetVBool() bool {
+	if !p.IsSetVBool() {
+		return Tag_VBool_DEFAULT
+	}
+	return *p.VBool
+}
+
+var Tag_VLong_DEFAULT int64
+
+func (p *Tag) GetVLong() int64 {
+	if !p.IsSetVLong() {
+		return Tag_VLong_DEFAULT
+	}
+	return *p.VLong
+}
+
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+	return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+	return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+	return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+	return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+	return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+	return p.VBinary != nil
+}
+
+func (p *Tag) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetKey bool = false
+	var issetVType bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetKey = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetVType = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		case 5:
+			if err := p.readField5(iprot); err != nil {
+				return err
+			}
+		case 6:
+			if err := p.readField6(iprot); err != nil {
+				return err
+			}
+		case 7:
+			if err := p.readField7(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetKey {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
+	}
+	if !issetVType {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
+	}
+	return nil
+}
+
+func (p *Tag) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Key = v
+	}
+	return nil
+}
+
+func (p *Tag) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		temp := TagType(v)
+		p.VType = temp
+	}
+	return nil
+}
+
+func (p *Tag) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.VStr = &v
+	}
+	return nil
+}
+
+func (p *Tag) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.VDouble = &v
+	}
+	return nil
+}
+
+func (p *Tag) readField5(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.VBool = &v
+	}
+	return nil
+}
+
+func (p *Tag) readField6(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 6: ", err)
+	} else {
+		p.VLong = &v
+	}
+	return nil
+}
+
+func (p *Tag) readField7(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(); err != nil {
+		return thrift.PrependError("error reading field 7: ", err)
+	} else {
+		p.VBinary = v
+	}
+	return nil
+}
+
+func (p *Tag) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Tag"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField5(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField6(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField7(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.Key)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+	}
+	return err
+}
+
+func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.VType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
+	}
+	return err
+}
+
+func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) {
+	if p.IsSetVStr() {
+		if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
+		}
+		if err := oprot.WriteString(string(*p.VStr)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetVDouble() {
+		if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
+		}
+		if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) {
+	if p.IsSetVBool() {
+		if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
+		}
+		if err := oprot.WriteBool(bool(*p.VBool)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) {
+	if p.IsSetVLong() {
+		if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.VLong)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) {
+	if p.IsSetVBinary() {
+		if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
+		}
+		if err := oprot.WriteBinary(p.VBinary); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+//  - Timestamp
+//  - Fields
+type Log struct {
+	Timestamp int64  `thrift:"timestamp,1,required" json:"timestamp"`
+	Fields    []*Tag `thrift:"fields,2,required" json:"fields"`
+}
+
+func NewLog() *Log {
+	return &Log{}
+}
+
+func (p *Log) GetTimestamp() int64 {
+	return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+	return p.Fields
+}
+func (p *Log) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetTimestamp bool = false
+	var issetFields bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetTimestamp = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetFields = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetTimestamp {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
+	}
+	if !issetFields {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
+	}
+	return nil
+}
+
+func (p *Log) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Timestamp = v
+	}
+	return nil
+}
+
+func (p *Log) readField2(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Fields = tSlice
+	for i := 0; i < size; i++ {
+		_elem0 := &Tag{}
+		if err := _elem0.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+		}
+		p.Fields = append(p.Fields, _elem0)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Log) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Log"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Log) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+	}
+	return err
+}
+
+func (p *Log) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Fields {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
+	}
+	return err
+}
+
+func (p *Log) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+//  - RefType
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+type SpanRef struct {
+	RefType     SpanRefType `thrift:"refType,1,required" json:"refType"`
+	TraceIdLow  int64       `thrift:"traceIdLow,2,required" json:"traceIdLow"`
+	TraceIdHigh int64       `thrift:"traceIdHigh,3,required" json:"traceIdHigh"`
+	SpanId      int64       `thrift:"spanId,4,required" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+	return &SpanRef{}
+}
+
+func (p *SpanRef) GetRefType() SpanRefType {
+	return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+	return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+	return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+	return p.SpanId
+}
+func (p *SpanRef) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetRefType bool = false
+	var issetTraceIdLow bool = false
+	var issetTraceIdHigh bool = false
+	var issetSpanId bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetRefType = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetTraceIdLow = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+			issetTraceIdHigh = true
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+			issetSpanId = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetRefType {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
+	}
+	if !issetTraceIdLow {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+	}
+	if !issetTraceIdHigh {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+	}
+	if !issetSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+	}
+	return nil
+}
+
+func (p *SpanRef) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		temp := SpanRefType(v)
+		p.RefType = temp
+	}
+	return nil
+}
+
+func (p *SpanRef) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TraceIdLow = v
+	}
+	return nil
+}
+
+func (p *SpanRef) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.TraceIdHigh = v
+	}
+	return nil
+}
+
+func (p *SpanRef) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.SpanId = v
+	}
+	return nil
+}
+
+func (p *SpanRef) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("SpanRef"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.RefType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+//  - ParentSpanId
+//  - OperationName
+//  - References
+//  - Flags
+//  - StartTime
+//  - Duration
+//  - Tags
+//  - Logs
+type Span struct {
+	TraceIdLow    int64      `thrift:"traceIdLow,1,required" json:"traceIdLow"`
+	TraceIdHigh   int64      `thrift:"traceIdHigh,2,required" json:"traceIdHigh"`
+	SpanId        int64      `thrift:"spanId,3,required" json:"spanId"`
+	ParentSpanId  int64      `thrift:"parentSpanId,4,required" json:"parentSpanId"`
+	OperationName string     `thrift:"operationName,5,required" json:"operationName"`
+	References    []*SpanRef `thrift:"references,6" json:"references,omitempty"`
+	Flags         int32      `thrift:"flags,7,required" json:"flags"`
+	StartTime     int64      `thrift:"startTime,8,required" json:"startTime"`
+	Duration      int64      `thrift:"duration,9,required" json:"duration"`
+	Tags          []*Tag     `thrift:"tags,10" json:"tags,omitempty"`
+	Logs          []*Log     `thrift:"logs,11" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+	return &Span{}
+}
+
+func (p *Span) GetTraceIdLow() int64 {
+	return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+	return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+	return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+	return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+	return p.OperationName
+}
+
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+	return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+	return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+	return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+	return p.Duration
+}
+
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+	return p.Tags
+}
+
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+	return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+	return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+	return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+	return p.Logs != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetTraceIdLow bool = false
+	var issetTraceIdHigh bool = false
+	var issetSpanId bool = false
+	var issetParentSpanId bool = false
+	var issetOperationName bool = false
+	var issetFlags bool = false
+	var issetStartTime bool = false
+	var issetDuration bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetTraceIdLow = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetTraceIdHigh = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+			issetSpanId = true
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+			issetParentSpanId = true
+		case 5:
+			if err := p.readField5(iprot); err != nil {
+				return err
+			}
+			issetOperationName = true
+		case 6:
+			if err := p.readField6(iprot); err != nil {
+				return err
+			}
+		case 7:
+			if err := p.readField7(iprot); err != nil {
+				return err
+			}
+			issetFlags = true
+		case 8:
+			if err := p.readField8(iprot); err != nil {
+				return err
+			}
+			issetStartTime = true
+		case 9:
+			if err := p.readField9(iprot); err != nil {
+				return err
+			}
+			issetDuration = true
+		case 10:
+			if err := p.readField10(iprot); err != nil {
+				return err
+			}
+		case 11:
+			if err := p.readField11(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetTraceIdLow {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+	}
+	if !issetTraceIdHigh {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+	}
+	if !issetSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+	}
+	if !issetParentSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
+	}
+	if !issetOperationName {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
+	}
+	if !issetFlags {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
+	}
+	if !issetStartTime {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
+	}
+	if !issetDuration {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
+	}
+	return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.TraceIdLow = v
+	}
+	return nil
+}
+
+func (p *Span) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TraceIdHigh = v
+	}
+	return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.SpanId = v
+	}
+	return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.ParentSpanId = v
+	}
+	return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.OperationName = v
+	}
+	return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*SpanRef, 0, size)
+	p.References = tSlice
+	for i := 0; i < size; i++ {
+		_elem1 := &SpanRef{}
+		if err := _elem1.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+		}
+		p.References = append(p.References, _elem1)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) readField7(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 7: ", err)
+	} else {
+		p.Flags = v
+	}
+	return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 8: ", err)
+	} else {
+		p.StartTime = v
+	}
+	return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 9: ", err)
+	} else {
+		p.Duration = v
+	}
+	return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Tags = tSlice
+	for i := 0; i < size; i++ {
+		_elem2 := &Tag{}
+		if err := _elem2.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+		}
+		p.Tags = append(p.Tags, _elem2)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Log, 0, size)
+	p.Logs = tSlice
+	for i := 0; i < size; i++ {
+		_elem3 := &Log{}
+		if err := _elem3.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+		}
+		p.Logs = append(p.Logs, _elem3)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Span"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField5(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField6(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField7(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField8(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField9(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField10(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField11(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.OperationName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+	if p.IsSetReferences() {
+		if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
+		}
+		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.References {
+			if err := v.Write(oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField7(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.Flags)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.StartTime)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.Duration)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+	if p.IsSetTags() {
+		if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
+		}
+		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Tags {
+			if err := v.Write(oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+	if p.IsSetLogs() {
+		if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
+		}
+		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Logs {
+			if err := v.Write(oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - ServiceName
+//  - Tags
+type Process struct {
+	ServiceName string `thrift:"serviceName,1,required" json:"serviceName"`
+	Tags        []*Tag `thrift:"tags,2" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+	return &Process{}
+}
+
+func (p *Process) GetServiceName() string {
+	return p.ServiceName
+}
+
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+	return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+	return p.Tags != nil
+}
+
+func (p *Process) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetServiceName bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetServiceName = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetServiceName {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
+	}
+	return nil
+}
+
+func (p *Process) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.ServiceName = v
+	}
+	return nil
+}
+
+func (p *Process) readField2(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Tags = tSlice
+	for i := 0; i < size; i++ {
+		_elem4 := &Tag{}
+		if err := _elem4.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+		}
+		p.Tags = append(p.Tags, _elem4)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Process) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Process"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Process) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+	}
+	return err
+}
+
+func (p *Process) writeField2(oprot thrift.TProtocol) (err error) {
+	if p.IsSetTags() {
+		if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
+		}
+		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Tags {
+			if err := v.Write(oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Process) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+//  - FullQueueDroppedSpans
+//  - TooLargeDroppedSpans
+//  - FailedToEmitSpans
+type ClientStats struct {
+	FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" json:"fullQueueDroppedSpans"`
+	TooLargeDroppedSpans  int64 `thrift:"tooLargeDroppedSpans,2,required" json:"tooLargeDroppedSpans"`
+	FailedToEmitSpans     int64 `thrift:"failedToEmitSpans,3,required" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+	return &ClientStats{}
+}
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+	return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+	return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+	return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetFullQueueDroppedSpans bool = false
+	var issetTooLargeDroppedSpans bool = false
+	var issetFailedToEmitSpans bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetFullQueueDroppedSpans = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetTooLargeDroppedSpans = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+			issetFailedToEmitSpans = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetFullQueueDroppedSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
+	}
+	if !issetTooLargeDroppedSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
+	}
+	if !issetFailedToEmitSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
+	}
+	return nil
+}
+
+func (p *ClientStats) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.FullQueueDroppedSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TooLargeDroppedSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.FailedToEmitSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("ClientStats"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ClientStats) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.FullQueueDroppedSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TooLargeDroppedSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("failedToEmitSpans", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.FailedToEmitSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+//  - Process
+//  - Spans
+//  - SeqNo
+//  - Stats
+type Batch struct {
+	Process *Process     `thrift:"process,1,required" json:"process"`
+	Spans   []*Span      `thrift:"spans,2,required" json:"spans"`
+	SeqNo   *int64       `thrift:"seqNo,3" json:"seqNo,omitempty"`
+	Stats   *ClientStats `thrift:"stats,4" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+	return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+
+func (p *Batch) GetProcess() *Process {
+	if !p.IsSetProcess() {
+		return Batch_Process_DEFAULT
+	}
+	return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+	return p.Spans
+}
+
+var Batch_SeqNo_DEFAULT int64
+
+func (p *Batch) GetSeqNo() int64 {
+	if !p.IsSetSeqNo() {
+		return Batch_SeqNo_DEFAULT
+	}
+	return *p.SeqNo
+}
+
+var Batch_Stats_DEFAULT *ClientStats
+
+func (p *Batch) GetStats() *ClientStats {
+	if !p.IsSetStats() {
+		return Batch_Stats_DEFAULT
+	}
+	return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+	return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+	return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+	return p.Stats != nil
+}
+
+func (p *Batch) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetProcess bool = false
+	var issetSpans bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetProcess = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetSpans = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetProcess {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
+	}
+	if !issetSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
+	}
+	return nil
+}
+
+func (p *Batch) readField1(iprot thrift.TProtocol) error {
+	p.Process = &Process{}
+	if err := p.Process.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+	}
+	return nil
+}
+
+func (p *Batch) readField2(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem5 := &Span{}
+		if err := _elem5.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+		}
+		p.Spans = append(p.Spans, _elem5)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Batch) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.SeqNo = &v
+	}
+	return nil
+}
+
+func (p *Batch) readField4(iprot thrift.TProtocol) error {
+	p.Stats = &ClientStats{}
+	if err := p.Stats.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+	}
+	return nil
+}
+
+func (p *Batch) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Batch"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
+	}
+	if err := p.Process.Write(oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
+	}
+	return err
+}
+
+func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *Batch) writeField3(oprot thrift.TProtocol) (err error) {
+	if p.IsSetSeqNo() {
+		if err := oprot.WriteFieldBegin("seqNo", thrift.I64, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.SeqNo)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Batch) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetStats() {
+		if err := oprot.WriteFieldBegin("stats", thrift.STRUCT, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
+		}
+		if err := p.Stats.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Batch) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type BatchSubmitResponse struct {
+	Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+	return &BatchSubmitResponse{}
+}
+
+func (p *BatchSubmitResponse) GetOk() bool {
+	return p.Ok
+}
+func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetOk bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetOk = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetOk {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ok = v
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+	}
+	if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+	}
+	return err
+}
+
+func (p *BatchSubmitResponse) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f6e3a884d95a1bb0c346d6e829d738b7a332ba0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
@@ -0,0 +1,18 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
new file mode 100644
index 0000000000000000000000000000000000000000..33179cfeb3b9c65c6489fd88418352f9d00301f8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
@@ -0,0 +1,410 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type SamplingManager interface {
+	// Parameters:
+	//  - ServiceName
+	GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
+}
+
+type SamplingManagerClient struct {
+	Transport       thrift.TTransport
+	ProtocolFactory thrift.TProtocolFactory
+	InputProtocol   thrift.TProtocol
+	OutputProtocol  thrift.TProtocol
+	SeqId           int32
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+	return &SamplingManagerClient{Transport: t,
+		ProtocolFactory: f,
+		InputProtocol:   f.GetProtocol(t),
+		OutputProtocol:  f.GetProtocol(t),
+		SeqId:           0,
+	}
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+	return &SamplingManagerClient{Transport: t,
+		ProtocolFactory: nil,
+		InputProtocol:   iprot,
+		OutputProtocol:  oprot,
+		SeqId:           0,
+	}
+}
+
+// Parameters:
+//  - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
+	if err = p.sendGetSamplingStrategy(serviceName); err != nil {
+		return
+	}
+	return p.recvGetSamplingStrategy()
+}
+
+func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
+	oprot := p.OutputProtocol
+	if oprot == nil {
+		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.OutputProtocol = oprot
+	}
+	p.SeqId++
+	if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
+		return
+	}
+	args := SamplingManagerGetSamplingStrategyArgs{
+		ServiceName: serviceName,
+	}
+	if err = args.Write(oprot); err != nil {
+		return
+	}
+	if err = oprot.WriteMessageEnd(); err != nil {
+		return
+	}
+	return oprot.Flush()
+}
+
+func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
+	iprot := p.InputProtocol
+	if iprot == nil {
+		iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.InputProtocol = iprot
+	}
+	method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return
+	}
+	if method != "getSamplingStrategy" {
+		err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
+		return
+	}
+	if p.SeqId != seqId {
+		err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
+		return
+	}
+	if mTypeId == thrift.EXCEPTION {
+		error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+		var error2 error
+		error2, err = error1.Read(iprot)
+		if err != nil {
+			return
+		}
+		if err = iprot.ReadMessageEnd(); err != nil {
+			return
+		}
+		err = error2
+		return
+	}
+	if mTypeId != thrift.REPLY {
+		err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
+		return
+	}
+	result := SamplingManagerGetSamplingStrategyResult{}
+	if err = result.Read(iprot); err != nil {
+		return
+	}
+	if err = iprot.ReadMessageEnd(); err != nil {
+		return
+	}
+	value = result.GetSuccess()
+	return
+}
+
+type SamplingManagerProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+	self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
+	return self3
+}
+
+func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(seqId, iprot, oprot)
+	}
+	iprot.Skip(thrift.STRUCT)
+	iprot.ReadMessageEnd()
+	x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+	x4.Write(oprot)
+	oprot.WriteMessageEnd()
+	oprot.Flush()
+	return false, x4
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+	handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := SamplingManagerGetSamplingStrategyArgs{}
+	if err = args.Read(iprot); err != nil {
+		iprot.ReadMessageEnd()
+		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+		oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+		x.Write(oprot)
+		oprot.WriteMessageEnd()
+		oprot.Flush()
+		return false, err
+	}
+
+	iprot.ReadMessageEnd()
+	result := SamplingManagerGetSamplingStrategyResult{}
+	var retval *SamplingStrategyResponse
+	var err2 error
+	if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
+		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
+		oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
+		x.Write(oprot)
+		oprot.WriteMessageEnd()
+		oprot.Flush()
+		return true, err2
+	} else {
+		result.Success = retval
+	}
+	if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+		err = err2
+	}
+	if err2 = result.Write(oprot); err == nil && err2 != nil {
+		err = err2
+	}
+	if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+		err = err2
+	}
+	if err2 = oprot.Flush(); err == nil && err2 != nil {
+		err = err2
+	}
+	if err != nil {
+		return
+	}
+	return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+	ServiceName string `thrift:"serviceName,1" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+	return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+	return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.ServiceName = v
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+	}
+	return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+	Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+	return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+	if !p.IsSetSuccess() {
+		return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+	}
+	return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+	return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 0:
+			if err := p.readField0(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
+	p.Success = &SamplingStrategyResponse{}
+	if err := p.Success.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField0(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
+	if p.IsSetSuccess() {
+		if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+		}
+		if err := p.Success.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..9abaf0542d40ea69fd4a559acf5cd4e457dd6b27
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
@@ -0,0 +1,873 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package sampling
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type SamplingStrategyType int64
+
+const (
+	SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+	SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+	switch p {
+	case SamplingStrategyType_PROBABILISTIC:
+		return "PROBABILISTIC"
+	case SamplingStrategyType_RATE_LIMITING:
+		return "RATE_LIMITING"
+	}
+	return ""
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+	switch s {
+	case "PROBABILISTIC":
+		return SamplingStrategyType_PROBABILISTIC, nil
+	case "RATE_LIMITING":
+		return SamplingStrategyType_RATE_LIMITING, nil
+	}
+	return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+	q, err := SamplingStrategyTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+// Attributes:
+//  - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+	SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+	return &ProbabilisticSamplingStrategy{}
+}
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+	return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetSamplingRate bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetSamplingRate = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetSamplingRate {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
+	}
+	return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.SamplingRate = v
+	}
+	return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
+	}
+	if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
+	}
+	return err
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+	MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+	return &RateLimitingSamplingStrategy{}
+}
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+	return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetMaxTracesPerSecond bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetMaxTracesPerSecond = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetMaxTracesPerSecond {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
+	}
+	return nil
+}
+
+func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI16(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.MaxTracesPerSecond = v
+	}
+	return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
+	}
+	if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
+	}
+	return err
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - Operation
+//  - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+	Operation             string                         `thrift:"operation,1,required" json:"operation"`
+	ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+	return &OperationSamplingStrategy{}
+}
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+	return p.Operation
+}
+
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+	if !p.IsSetProbabilisticSampling() {
+		return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+	}
+	return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+	return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetOperation bool = false
+	var issetProbabilisticSampling bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetOperation = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetProbabilisticSampling = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetOperation {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
+	}
+	if !issetProbabilisticSampling {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
+	}
+	return nil
+}
+
+func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Operation = v
+	}
+	return nil
+}
+
+func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
+	p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+	if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+	}
+	return nil
+}
+
+func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.Operation)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
+	}
+	return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+	}
+	if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+	}
+	return err
+}
+
+func (p *OperationSamplingStrategy) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - DefaultSamplingProbability
+//  - DefaultLowerBoundTracesPerSecond
+//  - PerOperationStrategies
+//  - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+	DefaultSamplingProbability       float64                      `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
+	DefaultLowerBoundTracesPerSecond float64                      `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
+	PerOperationStrategies           []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
+	DefaultUpperBoundTracesPerSecond *float64                     `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+	return &PerOperationSamplingStrategies{}
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+	return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+	return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+	return p.PerOperationStrategies
+}
+
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+	if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+		return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+	}
+	return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+	return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetDefaultSamplingProbability bool = false
+	var issetDefaultLowerBoundTracesPerSecond bool = false
+	var issetPerOperationStrategies bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetDefaultSamplingProbability = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+			issetDefaultLowerBoundTracesPerSecond = true
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+			issetPerOperationStrategies = true
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetDefaultSamplingProbability {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
+	}
+	if !issetDefaultLowerBoundTracesPerSecond {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
+	}
+	if !issetPerOperationStrategies {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.DefaultSamplingProbability = v
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.DefaultLowerBoundTracesPerSecond = v
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*OperationSamplingStrategy, 0, size)
+	p.PerOperationStrategies = tSlice
+	for i := 0; i < size; i++ {
+		_elem0 := &OperationSamplingStrategy{}
+		if err := _elem0.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+		}
+		p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.DefaultUpperBoundTracesPerSecond = &v
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
+	}
+	if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
+	}
+	return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+	}
+	if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
+	}
+	return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.PerOperationStrategies {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
+	}
+	return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetDefaultUpperBoundTracesPerSecond() {
+		if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+		}
+		if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+//  - StrategyType
+//  - ProbabilisticSampling
+//  - RateLimitingSampling
+//  - OperationSampling
+type SamplingStrategyResponse struct {
+	StrategyType          SamplingStrategyType            `thrift:"strategyType,1,required" json:"strategyType"`
+	ProbabilisticSampling *ProbabilisticSamplingStrategy  `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
+	RateLimitingSampling  *RateLimitingSamplingStrategy   `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
+	OperationSampling     *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+	return &SamplingStrategyResponse{}
+}
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+	return p.StrategyType
+}
+
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+	if !p.IsSetProbabilisticSampling() {
+		return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+	}
+	return p.ProbabilisticSampling
+}
+
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+	if !p.IsSetRateLimitingSampling() {
+		return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+	}
+	return p.RateLimitingSampling
+}
+
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+	if !p.IsSetOperationSampling() {
+		return SamplingStrategyResponse_OperationSampling_DEFAULT
+	}
+	return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+	return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+	return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+	return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetStrategyType bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetStrategyType = true
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetStrategyType {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		temp := SamplingStrategyType(v)
+		p.StrategyType = temp
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
+	p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+	if err := p.ProbabilisticSampling.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
+	p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+	if err := p.RateLimitingSampling.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
+	p.OperationSampling = &PerOperationSamplingStrategies{}
+	if err := p.OperationSampling.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
+	}
+	return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
+	if p.IsSetProbabilisticSampling() {
+		if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
+		}
+		if err := p.ProbabilisticSampling.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
+	if p.IsSetRateLimitingSampling() {
+		if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
+		}
+		if err := p.RateLimitingSampling.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetOperationSampling() {
+		if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
+		}
+		if err := p.OperationSampling.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *SamplingStrategyResponse) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..a53d46f0efe950ae85f477397d636d9fa864d246
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
@@ -0,0 +1,35 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const MESSAGE_SEND = "ms"
+const MESSAGE_RECV = "mr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+const MESSAGE_ADDR = "ma"
+
+func init() {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
new file mode 100644
index 0000000000000000000000000000000000000000..15583e56b03a66468e62b2d4683ce097eaac5057
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
@@ -0,0 +1,1337 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+var GoUnusedProtection__ int
+
+type AnnotationType int64
+
+const (
+	AnnotationType_BOOL   AnnotationType = 0
+	AnnotationType_BYTES  AnnotationType = 1
+	AnnotationType_I16    AnnotationType = 2
+	AnnotationType_I32    AnnotationType = 3
+	AnnotationType_I64    AnnotationType = 4
+	AnnotationType_DOUBLE AnnotationType = 5
+	AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+	switch p {
+	case AnnotationType_BOOL:
+		return "BOOL"
+	case AnnotationType_BYTES:
+		return "BYTES"
+	case AnnotationType_I16:
+		return "I16"
+	case AnnotationType_I32:
+		return "I32"
+	case AnnotationType_I64:
+		return "I64"
+	case AnnotationType_DOUBLE:
+		return "DOUBLE"
+	case AnnotationType_STRING:
+		return "STRING"
+	}
+	return ""
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+	switch s {
+	case "BOOL":
+		return AnnotationType_BOOL, nil
+	case "BYTES":
+		return AnnotationType_BYTES, nil
+	case "I16":
+		return AnnotationType_I16, nil
+	case "I32":
+		return AnnotationType_I32, nil
+	case "I64":
+		return AnnotationType_I64, nil
+	case "DOUBLE":
+		return AnnotationType_DOUBLE, nil
+	case "STRING":
+		return AnnotationType_STRING, nil
+	}
+	return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+	q, err := AnnotationTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+//  - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+//  - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+	Ipv4        int32  `thrift:"ipv4,1" json:"ipv4"`
+	Port        int16  `thrift:"port,2" json:"port"`
+	ServiceName string `thrift:"service_name,3" json:"service_name"`
+	Ipv6        []byte `thrift:"ipv6,4" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+	return &Endpoint{}
+}
+
+func (p *Endpoint) GetIpv4() int32 {
+	return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+	return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+	return p.ServiceName
+}
+
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+	return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+	return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ipv4 = v
+	}
+	return nil
+}
+
+func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI16(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Port = v
+	}
+	return nil
+}
+
+func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.ServiceName = v
+	}
+	return nil
+}
+
+func (p *Endpoint) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.Ipv6 = v
+	}
+	return nil
+}
+
+func (p *Endpoint) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Endpoint"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
+	}
+	if err := oprot.WriteI16(int16(p.Port)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetIpv6() {
+		if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
+		}
+		if err := oprot.WriteBinary(p.Ipv6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Endpoint) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+//  - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+//  - Value
+//  - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+	Timestamp int64     `thrift:"timestamp,1" json:"timestamp"`
+	Value     string    `thrift:"value,2" json:"value"`
+	Host      *Endpoint `thrift:"host,3" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+	return &Annotation{}
+}
+
+func (p *Annotation) GetTimestamp() int64 {
+	return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+	return p.Value
+}
+
+var Annotation_Host_DEFAULT *Endpoint
+
+func (p *Annotation) GetHost() *Endpoint {
+	if !p.IsSetHost() {
+		return Annotation_Host_DEFAULT
+	}
+	return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+	return p.Host != nil
+}
+
+func (p *Annotation) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Annotation) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Timestamp = v
+	}
+	return nil
+}
+
+func (p *Annotation) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Value = v
+	}
+	return nil
+}
+
+func (p *Annotation) readField3(iprot thrift.TProtocol) error {
+	p.Host = &Endpoint{}
+	if err := p.Host.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+	}
+	return nil
+}
+
+func (p *Annotation) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Annotation"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+	}
+	return err
+}
+
+func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.Value)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+	}
+	return err
+}
+
+func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
+	if p.IsSetHost() {
+		if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
+		}
+		if err := p.Host.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Annotation) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+//  - Key
+//  - Value
+//  - AnnotationType
+//  - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+	Key            string         `thrift:"key,1" json:"key"`
+	Value          []byte         `thrift:"value,2" json:"value"`
+	AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
+	Host           *Endpoint      `thrift:"host,4" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+	return &BinaryAnnotation{}
+}
+
+func (p *BinaryAnnotation) GetKey() string {
+	return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+	return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+	return p.AnnotationType
+}
+
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+	if !p.IsSetHost() {
+		return BinaryAnnotation_Host_DEFAULT
+	}
+	return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+	return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		case 2:
+			if err := p.readField2(iprot); err != nil {
+				return err
+			}
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Key = v
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Value = v
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		temp := AnnotationType(v)
+		p.AnnotationType = temp
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
+	p.Host = &Endpoint{}
+	if err := p.Host.Read(iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField2(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.Key)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+	}
+	if err := oprot.WriteBinary(p.Value); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
+	}
+	if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
+	if p.IsSetHost() {
+		if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
+		}
+		if err := p.Host.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+//  - TraceID
+//  - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+//  - ID
+//  - ParentID
+//  - Annotations
+//  - BinaryAnnotations
+//  - Debug
+//  - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+//  - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+	TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
+	// unused field # 2
+	Name        string        `thrift:"name,3" json:"name"`
+	ID          int64         `thrift:"id,4" json:"id"`
+	ParentID    *int64        `thrift:"parent_id,5" json:"parent_id,omitempty"`
+	Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
+	// unused field # 7
+	BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
+	Debug             bool                `thrift:"debug,9" json:"debug,omitempty"`
+	Timestamp         *int64              `thrift:"timestamp,10" json:"timestamp,omitempty"`
+	Duration          *int64              `thrift:"duration,11" json:"duration,omitempty"`
+	TraceIDHigh       *int64              `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+	return &Span{}
+}
+
+func (p *Span) GetTraceID() int64 {
+	return p.TraceID
+}
+
+func (p *Span) GetName() string {
+	return p.Name
+}
+
+func (p *Span) GetID() int64 {
+	return p.ID
+}
+
+var Span_ParentID_DEFAULT int64
+
+func (p *Span) GetParentID() int64 {
+	if !p.IsSetParentID() {
+		return Span_ParentID_DEFAULT
+	}
+	return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+	return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+	return p.BinaryAnnotations
+}
+
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+	return p.Debug
+}
+
+var Span_Timestamp_DEFAULT int64
+
+func (p *Span) GetTimestamp() int64 {
+	if !p.IsSetTimestamp() {
+		return Span_Timestamp_DEFAULT
+	}
+	return *p.Timestamp
+}
+
+var Span_Duration_DEFAULT int64
+
+func (p *Span) GetDuration() int64 {
+	if !p.IsSetDuration() {
+		return Span_Duration_DEFAULT
+	}
+	return *p.Duration
+}
+
+var Span_TraceIDHigh_DEFAULT int64
+
+func (p *Span) GetTraceIDHigh() int64 {
+	if !p.IsSetTraceIDHigh() {
+		return Span_TraceIDHigh_DEFAULT
+	}
+	return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+	return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+	return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+	return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+	return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+	return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		case 3:
+			if err := p.readField3(iprot); err != nil {
+				return err
+			}
+		case 4:
+			if err := p.readField4(iprot); err != nil {
+				return err
+			}
+		case 5:
+			if err := p.readField5(iprot); err != nil {
+				return err
+			}
+		case 6:
+			if err := p.readField6(iprot); err != nil {
+				return err
+			}
+		case 8:
+			if err := p.readField8(iprot); err != nil {
+				return err
+			}
+		case 9:
+			if err := p.readField9(iprot); err != nil {
+				return err
+			}
+		case 10:
+			if err := p.readField10(iprot); err != nil {
+				return err
+			}
+		case 11:
+			if err := p.readField11(iprot); err != nil {
+				return err
+			}
+		case 12:
+			if err := p.readField12(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Span) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.TraceID = v
+	}
+	return nil
+}
+
+func (p *Span) readField3(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.Name = v
+	}
+	return nil
+}
+
+func (p *Span) readField4(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.ID = v
+	}
+	return nil
+}
+
+func (p *Span) readField5(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.ParentID = &v
+	}
+	return nil
+}
+
+func (p *Span) readField6(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Annotation, 0, size)
+	p.Annotations = tSlice
+	for i := 0; i < size; i++ {
+		_elem0 := &Annotation{}
+		if err := _elem0.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+		}
+		p.Annotations = append(p.Annotations, _elem0)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) readField8(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*BinaryAnnotation, 0, size)
+	p.BinaryAnnotations = tSlice
+	for i := 0; i < size; i++ {
+		_elem1 := &BinaryAnnotation{}
+		if err := _elem1.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+		}
+		p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) readField9(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(); err != nil {
+		return thrift.PrependError("error reading field 9: ", err)
+	} else {
+		p.Debug = v
+	}
+	return nil
+}
+
+func (p *Span) readField10(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 10: ", err)
+	} else {
+		p.Timestamp = &v
+	}
+	return nil
+}
+
+func (p *Span) readField11(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 11: ", err)
+	} else {
+		p.Duration = &v
+	}
+	return nil
+}
+
+func (p *Span) readField12(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(); err != nil {
+		return thrift.PrependError("error reading field 12: ", err)
+	} else {
+		p.TraceIDHigh = &v
+	}
+	return nil
+}
+
+func (p *Span) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Span"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField3(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField4(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField5(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField6(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField8(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField9(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField10(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField11(oprot); err != nil {
+		return err
+	}
+	if err := p.writeField12(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
+	}
+	if err := oprot.WriteString(string(p.Name)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
+	}
+	if err := oprot.WriteI64(int64(p.ID)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
+	if p.IsSetParentID() {
+		if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Annotations {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.BinaryAnnotations {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
+	if p.IsSetDebug() {
+		if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
+		}
+		if err := oprot.WriteBool(bool(p.Debug)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
+	if p.IsSetTimestamp() {
+		if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
+	if p.IsSetDuration() {
+		if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
+	if p.IsSetTraceIDHigh() {
+		if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
+		}
+		if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type Response struct {
+	Ok bool `thrift:"ok,1,required" json:"ok"`
+}
+
+func NewResponse() *Response {
+	return &Response{}
+}
+
+func (p *Response) GetOk() bool {
+	return p.Ok
+}
+func (p *Response) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetOk bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+			issetOk = true
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetOk {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+	}
+	return nil
+}
+
+func (p *Response) readField1(iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ok = v
+	}
+	return nil
+}
+
+func (p *Response) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("Response"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Response) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+	}
+	if err := oprot.WriteBool(bool(p.Ok)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+	}
+	return err
+}
+
+func (p *Response) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("Response(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
new file mode 100644
index 0000000000000000000000000000000000000000..417e883d0e31790a39d1735eb080ea376a11a90a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
@@ -0,0 +1,446 @@
+// Autogenerated by Thrift Compiler (0.9.3)
+// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+
+package zipkincore
+
+import (
+	"bytes"
+	"fmt"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = bytes.Equal
+
+type ZipkinCollector interface {
+	// Parameters:
+	//  - Spans
+	SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
+}
+
+type ZipkinCollectorClient struct {
+	Transport       thrift.TTransport
+	ProtocolFactory thrift.TProtocolFactory
+	InputProtocol   thrift.TProtocol
+	OutputProtocol  thrift.TProtocol
+	SeqId           int32
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+	return &ZipkinCollectorClient{Transport: t,
+		ProtocolFactory: f,
+		InputProtocol:   f.GetProtocol(t),
+		OutputProtocol:  f.GetProtocol(t),
+		SeqId:           0,
+	}
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+	return &ZipkinCollectorClient{Transport: t,
+		ProtocolFactory: nil,
+		InputProtocol:   iprot,
+		OutputProtocol:  oprot,
+		SeqId:           0,
+	}
+}
+
+// Parameters:
+//  - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
+	if err = p.sendSubmitZipkinBatch(spans); err != nil {
+		return
+	}
+	return p.recvSubmitZipkinBatch()
+}
+
+func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
+	oprot := p.OutputProtocol
+	if oprot == nil {
+		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.OutputProtocol = oprot
+	}
+	p.SeqId++
+	if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
+		return
+	}
+	args := ZipkinCollectorSubmitZipkinBatchArgs{
+		Spans: spans,
+	}
+	if err = args.Write(oprot); err != nil {
+		return
+	}
+	if err = oprot.WriteMessageEnd(); err != nil {
+		return
+	}
+	return oprot.Flush()
+}
+
+func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
+	iprot := p.InputProtocol
+	if iprot == nil {
+		iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+		p.InputProtocol = iprot
+	}
+	method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return
+	}
+	if method != "submitZipkinBatch" {
+		err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
+		return
+	}
+	if p.SeqId != seqId {
+		err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
+		return
+	}
+	if mTypeId == thrift.EXCEPTION {
+		error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+		var error3 error
+		error3, err = error2.Read(iprot)
+		if err != nil {
+			return
+		}
+		if err = iprot.ReadMessageEnd(); err != nil {
+			return
+		}
+		err = error3
+		return
+	}
+	if mTypeId != thrift.REPLY {
+		err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
+		return
+	}
+	result := ZipkinCollectorSubmitZipkinBatchResult{}
+	if err = result.Read(iprot); err != nil {
+		return
+	}
+	if err = iprot.ReadMessageEnd(); err != nil {
+		return
+	}
+	value = result.GetSuccess()
+	return
+}
+
+type ZipkinCollectorProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+	self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
+	return self4
+}
+
+func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err := iprot.ReadMessageBegin()
+	if err != nil {
+		return false, err
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(seqId, iprot, oprot)
+	}
+	iprot.Skip(thrift.STRUCT)
+	iprot.ReadMessageEnd()
+	x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+	x5.Write(oprot)
+	oprot.WriteMessageEnd()
+	oprot.Flush()
+	return false, x5
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+	handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := ZipkinCollectorSubmitZipkinBatchArgs{}
+	if err = args.Read(iprot); err != nil {
+		iprot.ReadMessageEnd()
+		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+		oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+		x.Write(oprot)
+		oprot.WriteMessageEnd()
+		oprot.Flush()
+		return false, err
+	}
+
+	iprot.ReadMessageEnd()
+	result := ZipkinCollectorSubmitZipkinBatchResult{}
+	var retval []*Response
+	var err2 error
+	if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
+		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
+		oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
+		x.Write(oprot)
+		oprot.WriteMessageEnd()
+		oprot.Flush()
+		return true, err2
+	} else {
+		result.Success = retval
+	}
+	if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+		err = err2
+	}
+	if err2 = result.Write(oprot); err == nil && err2 != nil {
+		err = err2
+	}
+	if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+		err = err2
+	}
+	if err2 = oprot.Flush(); err == nil && err2 != nil {
+		err = err2
+	}
+	if err != nil {
+		return
+	}
+	return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+	Spans []*Span `thrift:"spans,1" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+	return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+	return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if err := p.readField1(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem6 := &Span{}
+		if err := _elem6.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
+		}
+		p.Spans = append(p.Spans, _elem6)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField1(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+	Success []*Response `thrift:"success,0" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+	return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+	return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+	return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 0:
+			if err := p.readField0(iprot); err != nil {
+				return err
+			}
+		default:
+			if err := iprot.Skip(fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin()
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Response, 0, size)
+	p.Success = tSlice
+	for i := 0; i < size; i++ {
+		_elem7 := &Response{}
+		if err := _elem7.Read(iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
+		}
+		p.Success = append(p.Success, _elem7)
+	}
+	if err := iprot.ReadListEnd(); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if err := p.writeField0(oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteFieldStop(); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
+	if p.IsSetSuccess() {
+		if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+		}
+		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Success {
+			if err := v.Write(oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+	if p == nil {
+		return ""
+	}
+	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1d8e642e028bbd92d29f6f0ae44c3c1e7730d1e6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -0,0 +1,7 @@
+# Apache Thrift
+
+This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c).
+
+It is vendored code to avoid compatibility issues introduced in Thrift  v0.11.
+
+See https://github.com/jaegertracing/jaeger-client-go/pull/303.
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..6655cc5a97204de66ee74530d08f3a857d905343
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+const (
+	UNKNOWN_APPLICATION_EXCEPTION  = 0
+	UNKNOWN_METHOD                 = 1
+	INVALID_MESSAGE_TYPE_EXCEPTION = 2
+	WRONG_METHOD_NAME              = 3
+	BAD_SEQUENCE_ID                = 4
+	MISSING_RESULT                 = 5
+	INTERNAL_ERROR                 = 6
+	PROTOCOL_ERROR                 = 7
+)
+
+// Application level Thrift exception
+type TApplicationException interface {
+	TException
+	TypeId() int32
+	Read(iprot TProtocol) (TApplicationException, error)
+	Write(oprot TProtocol) error
+}
+
+type tApplicationException struct {
+	message string
+	type_   int32
+}
+
+func (e tApplicationException) Error() string {
+	return e.message
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+	return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+	return p.type_
+}
+
+func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
+	_, err := iprot.ReadStructBegin()
+	if err != nil {
+		return nil, err
+	}
+
+	message := ""
+	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+	for {
+		_, ttype, id, err := iprot.ReadFieldBegin()
+		if err != nil {
+			return nil, err
+		}
+		if ttype == STOP {
+			break
+		}
+		switch id {
+		case 1:
+			if ttype == STRING {
+				if message, err = iprot.ReadString(); err != nil {
+					return nil, err
+				}
+			} else {
+				if err = SkipDefaultDepth(iprot, ttype); err != nil {
+					return nil, err
+				}
+			}
+		case 2:
+			if ttype == I32 {
+				if type_, err = iprot.ReadI32(); err != nil {
+					return nil, err
+				}
+			} else {
+				if err = SkipDefaultDepth(iprot, ttype); err != nil {
+					return nil, err
+				}
+			}
+		default:
+			if err = SkipDefaultDepth(iprot, ttype); err != nil {
+				return nil, err
+			}
+		}
+		if err = iprot.ReadFieldEnd(); err != nil {
+			return nil, err
+		}
+	}
+	return NewTApplicationException(type_, message), iprot.ReadStructEnd()
+}
+
+func (p *tApplicationException) Write(oprot TProtocol) (err error) {
+	err = oprot.WriteStructBegin("TApplicationException")
+	if len(p.Error()) > 0 {
+		err = oprot.WriteFieldBegin("message", STRING, 1)
+		if err != nil {
+			return
+		}
+		err = oprot.WriteString(p.Error())
+		if err != nil {
+			return
+		}
+		err = oprot.WriteFieldEnd()
+		if err != nil {
+			return
+		}
+	}
+	err = oprot.WriteFieldBegin("type", I32, 2)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteI32(p.type_)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldEnd()
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldStop()
+	if err != nil {
+		return
+	}
+	err = oprot.WriteStructEnd()
+	return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
new file mode 100644
index 0000000000000000000000000000000000000000..690d341111b5f4e6558f23de4d450f7bc20b1c02
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -0,0 +1,514 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+type TBinaryProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+	reader        io.Reader
+	writer        io.Writer
+	strictRead    bool
+	strictWrite   bool
+	buffer        [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+	strictRead  bool
+	strictWrite bool
+}
+
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+	return NewTBinaryProtocol(t, false, true)
+}
+
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+	p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+	if et, ok := t.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(t)
+	}
+	p.reader = p.trans
+	p.writer = p.trans
+	return p
+}
+
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactory(false, true)
+}
+
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+	return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+	return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+	if p.strictWrite {
+		version := uint32(VERSION_1) | uint32(typeId)
+		e := p.WriteI32(int32(version))
+		if e != nil {
+			return e
+		}
+		e = p.WriteString(name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(seqId)
+		return e
+	} else {
+		e := p.WriteString(name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteByte(int8(typeId))
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(seqId)
+		return e
+	}
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	e := p.WriteByte(int8(typeId))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI16(id)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop() error {
+	e := p.WriteByte(STOP)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	e := p.WriteByte(int8(keyType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteByte(int8(valueType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
+	e := p.WriteByte(int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
+	e := p.WriteByte(int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(value bool) error {
+	if value {
+		return p.WriteByte(1)
+	}
+	return p.WriteByte(0)
+}
+
+func (p *TBinaryProtocol) WriteByte(value int8) error {
+	e := p.trans.WriteByte(byte(value))
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(value int16) error {
+	v := p.buffer[0:2]
+	binary.BigEndian.PutUint16(v, uint16(value))
+	_, e := p.writer.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(value int32) error {
+	v := p.buffer[0:4]
+	binary.BigEndian.PutUint32(v, uint32(value))
+	_, e := p.writer.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(value int64) error {
+	v := p.buffer[0:8]
+	binary.BigEndian.PutUint64(v, uint64(value))
+	_, err := p.writer.Write(v)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(value float64) error {
+	return p.WriteI64(int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(value string) error {
+	e := p.WriteI32(int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.WriteString(value)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(value []byte) error {
+	e := p.WriteI32(int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.writer.Write(value)
+	return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return "", typeId, 0, NewTProtocolException(e)
+	}
+	if size < 0 {
+		typeId = TMessageType(size & 0x0ff)
+		version := int64(int64(size) & VERSION_MASK)
+		if version != VERSION_1 {
+			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+		}
+		name, e = p.ReadString()
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		seqId, e = p.ReadI32()
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		return name, typeId, seqId, nil
+	}
+	if p.strictRead {
+		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+	}
+	name, e2 := p.readStringBody(size)
+	if e2 != nil {
+		return name, typeId, seqId, e2
+	}
+	b, e3 := p.ReadByte()
+	if e3 != nil {
+		return name, typeId, seqId, e3
+	}
+	typeId = TMessageType(b)
+	seqId, e4 := p.ReadI32()
+	if e4 != nil {
+		return name, typeId, seqId, e4
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+	return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
+	t, err := p.ReadByte()
+	typeId = TType(t)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if t != STOP {
+		seqId, err = p.ReadI16()
+	}
+	return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd() error {
+	return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
+	k, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	kType = TType(k)
+	v, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	vType = TType(v)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
+	b, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	return
+}
+
+func (p *TBinaryProtocol) ReadListEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+	b, e := p.ReadByte()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd() error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadBool() (bool, error) {
+	b, e := p.ReadByte()
+	v := true
+	if b != 1 {
+		v = false
+	}
+	return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte() (int8, error) {
+	v, err := p.trans.ReadByte()
+	return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+	buf := p.buffer[0:2]
+	err = p.readAll(buf)
+	value = int16(binary.BigEndian.Uint16(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+	buf := p.buffer[0:4]
+	err = p.readAll(buf)
+	value = int32(binary.BigEndian.Uint32(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(buf)
+	value = int64(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(buf)
+	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadString() (value string, err error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return "", e
+	}
+	if size < 0 {
+		err = invalidDataLength
+		return
+	}
+
+	return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
+	size, e := p.ReadI32()
+	if e != nil {
+		return nil, e
+	}
+	if size < 0 {
+		return nil, invalidDataLength
+	}
+	if uint64(size) > p.trans.RemainingBytes() {
+		return nil, invalidDataLength
+	}
+
+	isize := int(size)
+	buf := make([]byte, isize)
+	_, err := io.ReadFull(p.trans, buf)
+	return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush() (err error) {
+	return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(buf []byte) error {
+	_, err := io.ReadFull(p.reader, buf)
+	return NewTProtocolException(err)
+}
+
+const readLimit = 32768
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+	if size < 0 {
+		return "", nil
+	}
+	if uint64(size) > p.trans.RemainingBytes() {
+		return "", invalidDataLength
+	}
+
+	var (
+		buf bytes.Buffer
+		e   error
+		b   []byte
+	)
+
+	switch {
+	case int(size) <= len(p.buffer):
+		b = p.buffer[:size] // avoids allocation for small reads
+	case int(size) < readLimit:
+		b = make([]byte, size)
+	default:
+		b = make([]byte, readLimit)
+	}
+
+	for size > 0 {
+		_, e = io.ReadFull(p.trans, b)
+		buf.Write(b)
+		if e != nil {
+			break
+		}
+		size -= readLimit
+		if size < readLimit && size > 0 {
+			b = b[:size]
+		}
+	}
+	return buf.String(), NewTProtocolException(e)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9299f2fa13c7822cb58a6a4432836caf8d5f473
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -0,0 +1,815 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	COMPACT_PROTOCOL_ID       = 0x082
+	COMPACT_VERSION           = 1
+	COMPACT_VERSION_MASK      = 0x1f
+	COMPACT_TYPE_MASK         = 0x0E0
+	COMPACT_TYPE_BITS         = 0x07
+	COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+	COMPACT_BOOLEAN_TRUE  = 0x01
+	COMPACT_BOOLEAN_FALSE = 0x02
+	COMPACT_BYTE          = 0x03
+	COMPACT_I16           = 0x04
+	COMPACT_I32           = 0x05
+	COMPACT_I64           = 0x06
+	COMPACT_DOUBLE        = 0x07
+	COMPACT_BINARY        = 0x08
+	COMPACT_LIST          = 0x09
+	COMPACT_SET           = 0x0A
+	COMPACT_MAP           = 0x0B
+	COMPACT_STRUCT        = 0x0C
+)
+
+var (
+	ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+	ttypeToCompactType = map[TType]tCompactType{
+		STOP:   STOP,
+		BOOL:   COMPACT_BOOLEAN_TRUE,
+		BYTE:   COMPACT_BYTE,
+		I16:    COMPACT_I16,
+		I32:    COMPACT_I32,
+		I64:    COMPACT_I64,
+		DOUBLE: COMPACT_DOUBLE,
+		STRING: COMPACT_BINARY,
+		LIST:   COMPACT_LIST,
+		SET:    COMPACT_SET,
+		MAP:    COMPACT_MAP,
+		STRUCT: COMPACT_STRUCT,
+	}
+}
+
+type TCompactProtocolFactory struct{}
+
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+	return &TCompactProtocolFactory{}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTCompactProtocol(trans)
+}
+
+type TCompactProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+
+	// Used to keep track of the last field for the current and previous structs,
+	// so we can do the delta stuff.
+	lastField   []int
+	lastFieldId int
+
+	// If we encounter a boolean field begin, save the TField here so it can
+	// have the value incorporated.
+	booleanFieldName    string
+	booleanFieldId      int16
+	booleanFieldPending bool
+
+	// If we read a field header, and it's a boolean field, save the boolean
+	// value here so that readBool can use it.
+	boolValue          bool
+	boolValueIsNotNull bool
+	buffer             [64]byte
+}
+
+// Create a TCompactProtocol given a TTransport
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+	p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+	if et, ok := trans.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(trans)
+	}
+
+	return p
+
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	_, err = p.writeVarint32(seqid)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	e := p.WriteString(name)
+	return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(name string) error {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd() error {
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	if typeId == BOOL {
+		// we want to possibly include the value, so we'll wait.
+		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+		return nil
+	}
+	_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+	return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+	// short lastField = lastField_.pop();
+
+	// if there's a type override, use that.
+	var typeToWrite byte
+	if typeOverride == 0xFF {
+		typeToWrite = byte(p.getCompactType(typeId))
+	} else {
+		typeToWrite = typeOverride
+	}
+	// check if we can use delta encoding for the field id
+	fieldId := int(id)
+	written := 0
+	if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+		// write them together
+		err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+	} else {
+		// write them separate
+		err := p.writeByteDirect(typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+		err = p.WriteI16(id)
+		written = 1 + 2
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	p.lastFieldId = fieldId
+	// p.lastField.Push(field.id);
+	return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop() error {
+	err := p.writeByteDirect(STOP)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	if size == 0 {
+		err := p.writeByteDirect(0)
+		return NewTProtocolException(err)
+	}
+	_, err := p.writeVarint32(int32(size))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd() error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+
+func (p *TCompactProtocol) WriteBool(value bool) error {
+	v := byte(COMPACT_BOOLEAN_FALSE)
+	if value {
+		v = byte(COMPACT_BOOLEAN_TRUE)
+	}
+	if p.booleanFieldPending {
+		// we haven't written the field header yet
+		_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+		p.booleanFieldPending = false
+		return NewTProtocolException(err)
+	}
+	// we're not part of a field, so just write the value.
+	err := p.writeByteDirect(v)
+	return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(value int8) error {
+	err := p.writeByteDirect(byte(value))
+	return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(value int16) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+	return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(value int32) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(value int64) error {
+	_, err := p.writeVarint64(p.int64ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(value float64) error {
+	buf := p.buffer[0:8]
+	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+	_, err := p.trans.Write(buf)
+	return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(value string) error {
+	_, e := p.writeVarint32(int32(len(value)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(value) > 0 {
+	}
+	_, e = p.trans.WriteString(value)
+	return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+	_, e := p.writeVarint32(int32(len(bin)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(bin) > 0 {
+		_, e = p.trans.Write(bin)
+		return NewTProtocolException(e)
+	}
+	return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+
+	protocolId, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	if protocolId != COMPACT_PROTOCOL_ID {
+		e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+		return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+	}
+
+	versionAndType, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	version := versionAndType & COMPACT_VERSION_MASK
+	typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+	if version != COMPACT_VERSION {
+		e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+		err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+		return
+	}
+	seqId, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	name, err = p.ReadString()
+	return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd() error {
+	// consume the last field we read off the wire.
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+	t, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	// if it's a stop, then we can return immediately, as the struct is over.
+	if (t & 0x0f) == STOP {
+		return "", STOP, 0, nil
+	}
+
+	// mask off the 4 MSB of the type header. it could contain a field id delta.
+	modifier := int16((t & 0xf0) >> 4)
+	if modifier == 0 {
+		// not a delta. look ahead for the zigzag varint field id.
+		id, err = p.ReadI16()
+		if err != nil {
+			return
+		}
+	} else {
+		// has a delta. add the delta to the last read field id.
+		id = int16(p.lastFieldId) + modifier
+	}
+	typeId, e := p.getTType(tCompactType(t & 0x0f))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+
+	// if this happens to be a boolean field, the value is encoded in the type
+	if p.isBoolType(t) {
+		// save the boolean value in a special instance variable.
+		p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+		p.boolValueIsNotNull = true
+	}
+
+	// push the new field onto the field stack so we can keep the deltas going.
+	p.lastFieldId = int(id)
+	return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+	size32, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	keyAndValueType := byte(STOP)
+	if size != 0 {
+		keyAndValueType, err = p.readByteDirect()
+		if err != nil {
+			return
+		}
+	}
+	keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+	valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+	return
+}
+
+func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+	size_and_type, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+	size = int((size_and_type >> 4) & 0x0f)
+	if size == 15 {
+		size2, e := p.readVarint32()
+		if e != nil {
+			err = NewTProtocolException(e)
+			return
+		}
+		if size2 < 0 {
+			err = invalidDataLength
+			return
+		}
+		size = int(size2)
+	}
+	elemType, e := p.getTType(tCompactType(size_and_type))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	return
+}
+
+func (p *TCompactProtocol) ReadListEnd() error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
+	return p.ReadListBegin()
+}
+
+func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+	if p.boolValueIsNotNull {
+		p.boolValueIsNotNull = false
+		return p.boolValue, nil
+	}
+	v, err := p.readByteDirect()
+	return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte() (int8, error) {
+	v, err := p.readByteDirect()
+	if err != nil {
+		return 0, NewTProtocolException(err)
+	}
+	return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16() (value int16, err error) {
+	v, err := p.ReadI32()
+	return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+	v, e := p.readVarint32()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt32(v)
+	return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+	v, e := p.readVarint64()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt64(v)
+	return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+	longBits := p.buffer[0:8]
+	_, e := io.ReadFull(p.trans, longBits)
+	if e != nil {
+		return 0.0, NewTProtocolException(e)
+	}
+	return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString() (value string, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return "", NewTProtocolException(e)
+	}
+	if length < 0 {
+		return "", invalidDataLength
+	}
+	if uint64(length) > p.trans.RemainingBytes() {
+		return "", invalidDataLength
+	}
+
+	if length == 0 {
+		return "", nil
+	}
+	var buf []byte
+	if length <= int32(len(p.buffer)) {
+		buf = p.buffer[0:length]
+	} else {
+		buf = make([]byte, length)
+	}
+	_, e = io.ReadFull(p.trans, buf)
+	return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return nil, NewTProtocolException(e)
+	}
+	if length == 0 {
+		return []byte{}, nil
+	}
+	if length < 0 {
+		return nil, invalidDataLength
+	}
+	if uint64(length) > p.trans.RemainingBytes() {
+		return nil, invalidDataLength
+	}
+
+	buf := make([]byte, length)
+	_, e = io.ReadFull(p.trans, buf)
+	return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush() (err error) {
+	return NewTProtocolException(p.trans.Flush())
+}
+
+func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+	if size <= 14 {
+		return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+	}
+	err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+	if err != nil {
+		return 0, err
+	}
+	m, err := p.writeVarint32(int32(size))
+	return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+	i32buf := p.buffer[0:5]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			i32buf[idx] = byte(n)
+			idx++
+			// p.writeByteDirect(byte(n));
+			break
+			// return;
+		} else {
+			i32buf[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+			u := uint32(n)
+			n = int32(u >> 7)
+		}
+	}
+	return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+	varint64out := p.buffer[0:10]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			varint64out[idx] = byte(n)
+			idx++
+			break
+		} else {
+			varint64out[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			u := uint64(n)
+			n = int64(u >> 7)
+		}
+	}
+	return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+	return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+	return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+	return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+	return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+	// if the wire contains the right stuff, this will just truncate the i64 we
+	// read and get us the right sign.
+	v, err := p.readVarint64()
+	return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+	shift := uint(0)
+	result := int64(0)
+	for {
+		b, err := p.readByteDirect()
+		if err != nil {
+			return 0, err
+		}
+		result |= int64(b&0x7f) << shift
+		if (b & 0x80) != 0x80 {
+			break
+		}
+		shift += 7
+	}
+	return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+	return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+	u := uint32(n)
+	return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+	u := uint64(n)
+	return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+	return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+	return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+	return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+	switch byte(t) & 0x0f {
+	case STOP:
+		return STOP, nil
+	case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+		return BOOL, nil
+	case COMPACT_BYTE:
+		return BYTE, nil
+	case COMPACT_I16:
+		return I16, nil
+	case COMPACT_I32:
+		return I32, nil
+	case COMPACT_I64:
+		return I64, nil
+	case COMPACT_DOUBLE:
+		return DOUBLE, nil
+	case COMPACT_BINARY:
+		return STRING, nil
+	case COMPACT_LIST:
+		return LIST, nil
+	case COMPACT_SET:
+		return SET, nil
+	case COMPACT_MAP:
+		return MAP, nil
+	case COMPACT_STRUCT:
+		return STRUCT, nil
+	}
+	return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+	return ttypeToCompactType[t]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea8d6f66114ce3c9401b59816b54120064153af7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+	error
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+	if t, ok := err.(TTransportException); ok {
+		return NewTTransportException(t.TypeId(), prepend+t.Error())
+	}
+	if t, ok := err.(TProtocolException); ok {
+		return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
+	}
+	if t, ok := err.(TApplicationException); ok {
+		return NewTApplicationException(t.TypeId(), prepend+t.Error())
+	}
+
+	return errors.New(prepend + err.Error())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
new file mode 100644
index 0000000000000000000000000000000000000000..b62fd56f063461bff9ba28f1a5b0aa9242a72f7d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+	*bytes.Buffer
+	size int
+}
+
+type TMemoryBufferTransportFactory struct {
+	size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
+	if trans != nil {
+		t, ok := trans.(*TMemoryBuffer)
+		if ok && t.size > 0 {
+			return NewTMemoryBufferLen(t.size)
+		}
+	}
+	return NewTMemoryBufferLen(p.size)
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+	return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+	return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+	buf := make([]byte, 0, size)
+	return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+	return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+	p.Buffer.Reset()
+	return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
new file mode 100644
index 0000000000000000000000000000000000000000..25ab2e98a256e84b175b2d894a795a1f6cc7f5c4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+	INVALID_TMESSAGE_TYPE TMessageType = 0
+	CALL                  TMessageType = 1
+	REPLY                 TMessageType = 2
+	EXCEPTION             TMessageType = 3
+	ONEWAY                TMessageType = 4
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa8daa9b54f9263497cf70145dfbdc46a3dd2e60
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"math"
+	"strconv"
+)
+
+type Numeric interface {
+	Int64() int64
+	Int32() int32
+	Int16() int16
+	Byte() byte
+	Int() int
+	Float64() float64
+	Float32() float32
+	String() string
+	isNull() bool
+}
+
+type numeric struct {
+	iValue int64
+	dValue float64
+	sValue string
+	isNil  bool
+}
+
+var (
+	INFINITY          Numeric
+	NEGATIVE_INFINITY Numeric
+	NAN               Numeric
+	ZERO              Numeric
+	NUMERIC_NULL      Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+	if math.IsInf(dValue, 1) {
+		return INFINITY
+	}
+	if math.IsInf(dValue, -1) {
+		return NEGATIVE_INFINITY
+	}
+	if math.IsNaN(dValue) {
+		return NAN
+	}
+	iValue := int64(dValue)
+	sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+	dValue := float64(iValue)
+	sValue := string(iValue)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+	dValue := float64(iValue)
+	sValue := string(iValue)
+	isNil := false
+	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+	if sValue == INFINITY.String() {
+		return INFINITY
+	}
+	if sValue == NEGATIVE_INFINITY.String() {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == NAN.String() {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	isNil := len(sValue) == 0
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+	if isNull {
+		return NewNullNumeric()
+	}
+	if sValue == JSON_INFINITY {
+		return INFINITY
+	}
+	if sValue == JSON_NEGATIVE_INFINITY {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == JSON_NAN {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+	return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+	return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+	return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+	return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+	return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+	return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+	return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+	return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+	return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+	return p.isNil
+}
+
+func init() {
+	INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+	NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+	NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+	ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+	NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca0d3faf20ee2a23a15dbcb5b1f3c7282da58076
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(in, out TProtocol) (bool, TException)
+}
+
+type TProcessorFunction interface {
+	Process(seqId int32, in, out TProtocol) (bool, TException)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
new file mode 100644
index 0000000000000000000000000000000000000000..45fa202e741c2d8c495134bf64de322481fff099
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+)
+
+const (
+	VERSION_MASK = 0xffff0000
+	VERSION_1    = 0x80010000
+)
+
+type TProtocol interface {
+	WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
+	WriteMessageEnd() error
+	WriteStructBegin(name string) error
+	WriteStructEnd() error
+	WriteFieldBegin(name string, typeId TType, id int16) error
+	WriteFieldEnd() error
+	WriteFieldStop() error
+	WriteMapBegin(keyType TType, valueType TType, size int) error
+	WriteMapEnd() error
+	WriteListBegin(elemType TType, size int) error
+	WriteListEnd() error
+	WriteSetBegin(elemType TType, size int) error
+	WriteSetEnd() error
+	WriteBool(value bool) error
+	WriteByte(value int8) error
+	WriteI16(value int16) error
+	WriteI32(value int32) error
+	WriteI64(value int64) error
+	WriteDouble(value float64) error
+	WriteString(value string) error
+	WriteBinary(value []byte) error
+
+	ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
+	ReadMessageEnd() error
+	ReadStructBegin() (name string, err error)
+	ReadStructEnd() error
+	ReadFieldBegin() (name string, typeId TType, id int16, err error)
+	ReadFieldEnd() error
+	ReadMapBegin() (keyType TType, valueType TType, size int, err error)
+	ReadMapEnd() error
+	ReadListBegin() (elemType TType, size int, err error)
+	ReadListEnd() error
+	ReadSetBegin() (elemType TType, size int, err error)
+	ReadSetEnd() error
+	ReadBool() (value bool, err error)
+	ReadByte() (value int8, err error)
+	ReadI16() (value int16, err error)
+	ReadI32() (value int32, err error)
+	ReadI64() (value int64, err error)
+	ReadDouble() (value float64, err error)
+	ReadString() (value string, err error)
+	ReadBinary() (value []byte, err error)
+
+	Skip(fieldType TType) (err error)
+	Flush() (err error)
+
+	Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
+	return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
+	
+    if maxDepth <= 0 {
+		return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+	}
+
+	switch fieldType {
+	case STOP:
+		return
+	case BOOL:
+		_, err = self.ReadBool()
+		return
+	case BYTE:
+		_, err = self.ReadByte()
+		return
+	case I16:
+		_, err = self.ReadI16()
+		return
+	case I32:
+		_, err = self.ReadI32()
+		return
+	case I64:
+		_, err = self.ReadI64()
+		return
+	case DOUBLE:
+		_, err = self.ReadDouble()
+		return
+	case STRING:
+		_, err = self.ReadString()
+		return
+	case STRUCT:
+		if _, err = self.ReadStructBegin(); err != nil {
+			return err
+		}
+		for {
+			_, typeId, _, _ := self.ReadFieldBegin()
+			if typeId == STOP {
+				break
+			}
+			err := Skip(self, typeId, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.ReadFieldEnd()
+		}
+		return self.ReadStructEnd()
+	case MAP:
+		keyType, valueType, size, err := self.ReadMapBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, keyType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.Skip(valueType)
+		}
+		return self.ReadMapEnd()
+	case SET:
+		elemType, size, err := self.ReadSetBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadSetEnd()
+	case LIST:
+		elemType, size, err := self.ReadListBegin()
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadListEnd()
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..6e357ee890dfbae7a5c4191dcb7921959d69ad49
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/base64"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+	TException
+	TypeId() int
+}
+
+const (
+	UNKNOWN_PROTOCOL_EXCEPTION = 0
+	INVALID_DATA               = 1
+	NEGATIVE_SIZE              = 2
+	SIZE_LIMIT                 = 3
+	BAD_VERSION                = 4
+	NOT_IMPLEMENTED            = 5
+	DEPTH_LIMIT                = 6
+)
+
+type tProtocolException struct {
+	typeId  int
+	message string
+}
+
+func (p *tProtocolException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+	return p.message
+}
+
+func (p *tProtocolException) Error() string {
+	return p.message
+}
+
+func NewTProtocolException(err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	if e,ok := err.(TProtocolException); ok {
+		return e
+	}
+	if _, ok := err.(base64.CorruptInputError); ok {
+		return &tProtocolException{INVALID_DATA, err.Error()}
+	}
+	return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	return &tProtocolException{errType, err.Error()}
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
new file mode 100644
index 0000000000000000000000000000000000000000..c40f796d886ac91f1bff9bad58b79e6e69120a6f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+	GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..8e296a99b5f919638863654abf37b3da89c1b854
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "io"
+
+type RichTransport struct {
+	TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+	return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+	return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+	return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+	return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+	return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+	v := [1]byte{0}
+	n, err := r.Read(v[0:1])
+	if n > 0 && (err == nil || err == io.EOF) {
+		return v[0], nil
+	}
+	if n > 0 && err != nil {
+		return v[0], err
+	}
+	if err != nil {
+		return 0, err
+	}
+	return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+	v := [1]byte{c}
+	_, err := w.Write(v[0:1])
+	return err
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
new file mode 100644
index 0000000000000000000000000000000000000000..771222999091601a903a9da198c7bb034fa27e65
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TSerializer struct {
+	Transport *TMemoryBuffer
+	Protocol  TProtocol
+}
+
+type TStruct interface {
+	Write(p TProtocol) error
+	Read(p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+	transport := NewTMemoryBufferLen(1024)
+	protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+
+	return &TSerializer{
+		transport,
+		protocol}
+}
+
+func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(); err != nil {
+		return
+	}
+	if err = t.Transport.Flush(); err != nil {
+		return
+	}
+
+	return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(); err != nil {
+		return
+	}
+
+	if err = t.Transport.Flush(); err != nil {
+		return
+	}
+
+	b = append(b, t.Transport.Bytes()...)
+	return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
new file mode 100644
index 0000000000000000000000000000000000000000..412a482d055ab472b533cc57a8e7583189918c00
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -0,0 +1,1337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+)
+
+type _ParseContext int
+
+const (
+	_CONTEXT_IN_TOPLEVEL          _ParseContext = 1
+	_CONTEXT_IN_LIST_FIRST        _ParseContext = 2
+	_CONTEXT_IN_LIST              _ParseContext = 3
+	_CONTEXT_IN_OBJECT_FIRST      _ParseContext = 4
+	_CONTEXT_IN_OBJECT_NEXT_KEY   _ParseContext = 5
+	_CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+)
+
+func (p _ParseContext) String() string {
+	switch p {
+	case _CONTEXT_IN_TOPLEVEL:
+		return "TOPLEVEL"
+	case _CONTEXT_IN_LIST_FIRST:
+		return "LIST-FIRST"
+	case _CONTEXT_IN_LIST:
+		return "LIST"
+	case _CONTEXT_IN_OBJECT_FIRST:
+		return "OBJECT-FIRST"
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		return "OBJECT-NEXT-KEY"
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		return "OBJECT-NEXT-VALUE"
+	}
+	return "UNKNOWN-PARSE-CONTEXT"
+}
+
+// JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages.  It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+	trans TTransport
+
+	parseContextStack []int
+	dumpContext       []int
+
+	writer *bufio.Writer
+	reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+	v := &TSimpleJSONProtocol{trans: t,
+		writer: bufio.NewWriter(t),
+		reader: bufio.NewReader(t),
+	}
+	v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
+	v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+	return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+	return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+	JSON_COMMA                   []byte
+	JSON_COLON                   []byte
+	JSON_LBRACE                  []byte
+	JSON_RBRACE                  []byte
+	JSON_LBRACKET                []byte
+	JSON_RBRACKET                []byte
+	JSON_QUOTE                   byte
+	JSON_QUOTE_BYTES             []byte
+	JSON_NULL                    []byte
+	JSON_TRUE                    []byte
+	JSON_FALSE                   []byte
+	JSON_INFINITY                string
+	JSON_NEGATIVE_INFINITY       string
+	JSON_NAN                     string
+	JSON_INFINITY_BYTES          []byte
+	JSON_NEGATIVE_INFINITY_BYTES []byte
+	JSON_NAN_BYTES               []byte
+	json_nonbase_map_elem_bytes  []byte
+)
+
+func init() {
+	JSON_COMMA = []byte{','}
+	JSON_COLON = []byte{':'}
+	JSON_LBRACE = []byte{'{'}
+	JSON_RBRACE = []byte{'}'}
+	JSON_LBRACKET = []byte{'['}
+	JSON_RBRACKET = []byte{']'}
+	JSON_QUOTE = '"'
+	JSON_QUOTE_BYTES = []byte{'"'}
+	JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+	JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+	JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+	JSON_INFINITY = "Infinity"
+	JSON_NEGATIVE_INFINITY = "-Infinity"
+	JSON_NAN = "NaN"
+	JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+	json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+	b, _ := json.Marshal(s)
+	s1 := string(b)
+	return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+	s1 := new(string)
+	err := json.Unmarshal([]byte(s), s1)
+	return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteString(name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+	if e := p.WriteString(name); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
+	//return p.OutputListEnd()
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(keyType)); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(valueType)); e != nil {
+		return e
+	}
+	return p.WriteI32(int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+	return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
+	return p.WriteI32(int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
+	return p.WriteI32(int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	if name, err = p.ReadString(); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte()
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return "", STOP, 0, err
+	}
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 {
+		switch b[0] {
+		case JSON_RBRACE[0]:
+			return "", STOP, 0, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			name, err := p.ParseStringBody()
+			// simplejson is not meant to be read back into thrift
+			// - see http://wiki.apache.org/thrift/ThriftUsageJava
+			// - use JSON instead
+			if err != nil {
+				return name, STOP, 0, err
+			}
+			return name, STOP, -1, p.ParsePostValue()
+			/*
+			   if err = p.ParsePostValue(); err != nil {
+			     return name, STOP, 0, err
+			   }
+			   if isNull, err := p.ParseListBegin(); isNull || err != nil {
+			     return name, STOP, 0, err
+			   }
+			   bType, err := p.ReadByte()
+			   thetype := TType(bType)
+			   if err != nil {
+			     return name, thetype, 0, err
+			   }
+			   id, err := p.ReadI16()
+			   return name, thetype, id, err
+			*/
+		}
+		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+	return nil
+	//return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	bKeyType, e := p.ReadByte()
+	keyType = TType(bKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	bValueType, e := p.ReadByte()
+	valueType = TType(bValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, err := p.ReadI64()
+	size = int(iSize)
+	return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+	var value bool
+
+	if err := p.ParsePreValue(); err != nil {
+		return value, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 {
+		switch f[0] {
+		case JSON_TRUE[0]:
+			b := make([]byte, len(JSON_TRUE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_TRUE) {
+				value = true
+			} else {
+				e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_FALSE[0]:
+			b := make([]byte, len(JSON_FALSE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_FALSE) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_NULL[0]:
+			b := make([]byte, len(JSON_NULL))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_NULL) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		default:
+			e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+			return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
+	v, err := p.ReadI64()
+	return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
+	v, err := p.ReadI64()
+	return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
+	v, err := p.ReadI64()
+	return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush() (err error) {
+	return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
+	return SkipDefaultDepth(p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if _, e := p.write(JSON_COMMA); e != nil {
+			return NewTProtocolException(e)
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if _, e := p.write(JSON_COLON); e != nil {
+			return NewTProtocolException(e)
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
+		break
+	case _CONTEXT_IN_OBJECT_FIRST:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if value {
+		v = string(JSON_TRUE)
+	} else {
+		v = string(JSON_FALSE)
+	}
+	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	default:
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_NULL); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if math.IsNaN(value) {
+		v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+	} else if math.IsInf(value, 1) {
+		v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+	} else if math.IsInf(value, -1) {
+		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+	} else {
+		v = strconv.FormatFloat(value, 'g', -1, 64)
+		switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+		default:
+		}
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	v := strconv.FormatInt(value, 10)
+	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	default:
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if e := p.OutputStringData(jsonQuote(s)); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+	_, e := p.write([]byte(s))
+	return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+	if _, e := p.write(JSON_RBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+	if _, e := p.write(JSON_RBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(int8(elemType)); e != nil {
+		return e
+	}
+	if e := p.WriteI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	b, _ := p.reader.Peek(1)
+	switch cxt {
+	case _CONTEXT_IN_LIST:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACKET[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACE[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_COLON[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
+		break
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
+		break
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+	for {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return nil
+		}
+		switch b[0] {
+		case ' ', '\r', '\n', '\t':
+			p.reader.ReadByte()
+			continue
+		default:
+			break
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+		if !ok {
+			return "", NewTProtocolException(err)
+		}
+		return v, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	str := string(JSON_QUOTE) + line + s
+	v, ok := jsonUnquote(str)
+	if !ok {
+		e := fmt.Errorf("Unable to parse as JSON string %s", str)
+		return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		return line, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	v := line + s
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+	line, err := p.reader.ReadBytes(JSON_QUOTE)
+	if err != nil {
+		return line, NewTProtocolException(err)
+	}
+	line2 := line[0 : len(line)-1]
+	l := len(line2)
+	if (l % 4) != 0 {
+		pad := 4 - (l % 4)
+		fill := [...]byte{'=', '=', '='}
+		line2 = append(line2, fill[:pad]...)
+		l = len(line2)
+	}
+	output := make([]byte, base64.StdEncoding.DecodedLen(l))
+	n, err := base64.StdEncoding.Decode(output, line2)
+	return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value int64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Int64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value float64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Float64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return false, err
+	}
+	var b []byte
+	b, err := p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+		p.reader.ReadByte()
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+		return false, nil
+	} else if p.safePeekContains(JSON_NULL) {
+		return true, nil
+	}
+	e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+	return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACE[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', '}':
+			break
+		}
+	}
+	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+	if e := p.ParsePreValue(); e != nil {
+		return false, e
+	}
+	var b []byte
+	b, err = p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+		p.reader.ReadByte()
+		isNull = false
+	} else if p.safePeekContains(JSON_NULL) {
+		isNull = true
+	} else {
+		err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+	}
+	return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	bElemType, err := p.ReadByte()
+	elemType = TType(bElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, err2 := p.ReadI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	if cxt != _CONTEXT_IN_LIST {
+		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACKET[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+			break
+		}
+	}
+	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+	if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+		return nil
+	}
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+	e := p.readNonSignificantWhitespace()
+	if e != nil {
+		return nil, VOID, NewTProtocolException(e)
+	}
+	b, e := p.reader.Peek(1)
+	if len(b) > 0 {
+		c := b[0]
+		switch c {
+		case JSON_NULL[0]:
+			buf := make([]byte, len(JSON_NULL))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return nil, VOID, NewTProtocolException(e)
+			}
+			if string(JSON_NULL) != string(buf) {
+				e = mismatch(string(JSON_NULL), string(buf))
+				return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return nil, VOID, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			v, e := p.ParseStringBody()
+			if e != nil {
+				return v, UTF8, NewTProtocolException(e)
+			}
+			if v == JSON_INFINITY {
+				return INFINITY, DOUBLE, nil
+			} else if v == JSON_NEGATIVE_INFINITY {
+				return NEGATIVE_INFINITY, DOUBLE, nil
+			} else if v == JSON_NAN {
+				return NAN, DOUBLE, nil
+			}
+			return v, UTF8, nil
+		case JSON_TRUE[0]:
+			buf := make([]byte, len(JSON_TRUE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return true, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_TRUE) != string(buf) {
+				e := mismatch(string(JSON_TRUE), string(buf))
+				return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return true, BOOL, nil
+		case JSON_FALSE[0]:
+			buf := make([]byte, len(JSON_FALSE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return false, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_FALSE) != string(buf) {
+				e := mismatch(string(JSON_FALSE), string(buf))
+				return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return false, BOOL, nil
+		case JSON_LBRACKET[0]:
+			_, e := p.reader.ReadByte()
+			return make([]interface{}, 0), LIST, NewTProtocolException(e)
+		case JSON_LBRACE[0]:
+			_, e := p.reader.ReadByte()
+			return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+			// assume numeric
+			v, e := p.readNumeric()
+			return v, DOUBLE, e
+		default:
+			e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+			return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+	return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+	cont := true
+	for cont {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return false, nil
+		}
+		switch b[0] {
+		default:
+			return false, nil
+		case JSON_NULL[0]:
+			cont = false
+			break
+		case ' ', '\n', '\r', '\t':
+			p.reader.ReadByte()
+			break
+		}
+	}
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		return true, nil
+	}
+	return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 && b[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+	}
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+	isNull, err := p.readIfNull()
+	if isNull || err != nil {
+		return NUMERIC_NULL, err
+	}
+	hasDecimalPoint := false
+	nextCanBeSign := true
+	hasE := false
+	MAX_LEN := 40
+	buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+	continueFor := true
+	inQuotes := false
+	for continueFor {
+		c, err := p.reader.ReadByte()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return NUMERIC_NULL, NewTProtocolException(err)
+		}
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case '.':
+			if hasDecimalPoint {
+				e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasDecimalPoint, nextCanBeSign = true, false
+		case 'e', 'E':
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasE, nextCanBeSign = true, true
+		case '-', '+':
+			if !nextCanBeSign {
+				e := fmt.Errorf("Negative sign within number")
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+			p.reader.UnreadByte()
+			continueFor = false
+		case JSON_NAN[0]:
+			if buf.Len() == 0 {
+				buffer := make([]byte, len(JSON_NAN))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NAN != string(buffer) {
+					e := mismatch(JSON_NAN, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NAN, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_INFINITY[0]:
+			if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+				buffer := make([]byte, len(JSON_INFINITY))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_INFINITY != string(buffer) {
+					e := mismatch(JSON_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return INFINITY, nil
+			} else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+				buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+				buffer[0] = JSON_NEGATIVE_INFINITY[0]
+				buffer[1] = c
+				_, e := p.reader.Read(buffer[2:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NEGATIVE_INFINITY != string(buffer) {
+					e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NEGATIVE_INFINITY, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_QUOTE:
+			if !inQuotes {
+				inQuotes = true
+			} else {
+				break
+			}
+		default:
+			e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+			return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	if buf.Len() == 0 {
+		e := fmt.Errorf("Unable to parse number from empty string ''")
+		return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+	for i := 0; i < len(b); i++ {
+		a, _ := p.reader.Peek(i + 1)
+		if len(a) == 0 || a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+	p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
+	p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+	n, err := p.writer.Write(b)
+	if err != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+	}
+	return n, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..453899651fc21af73421b6d2f6c8b3654e2426c8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+	Flush() (err error)
+}
+
+type ReadSizeProvider interface {
+	RemainingBytes() (num_bytes uint64)
+}
+
+
+// Encapsulates the I/O layer
+type TTransport interface {
+	io.ReadWriteCloser
+	Flusher
+	ReadSizeProvider
+
+	// Opens the transport for communication
+	Open() error
+
+	// Returns true if the transport is open
+	IsOpen() bool
+}
+
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+	io.ReadWriter
+	io.ByteReader
+	io.ByteWriter
+	stringWriter
+	Flusher
+	ReadSizeProvider
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
new file mode 100644
index 0000000000000000000000000000000000000000..9505b44612d051938f4dc41af54eea42e03bfe07
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type timeoutable interface {
+	Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+	TException
+	TypeId() int
+	Err() error
+}
+
+const (
+	UNKNOWN_TRANSPORT_EXCEPTION = 0
+	NOT_OPEN                    = 1
+	ALREADY_OPEN                = 2
+	TIMED_OUT                   = 3
+	END_OF_FILE                 = 4
+)
+
+type tTransportException struct {
+	typeId int
+	err    error
+}
+
+func (p *tTransportException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+	return p.err.Error()
+}
+
+func (p *tTransportException) Err() error {
+	return p.err
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+	return &tTransportException{typeId: t, err: errors.New(e)}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+	if e == nil {
+		return nil
+	}
+
+	if t, ok := e.(TTransportException); ok {
+		return t
+	}
+
+	switch v := e.(type) {
+	case TTransportException:
+		return v
+	case timeoutable:
+		if v.Timeout() {
+			return &tTransportException{typeId: TIMED_OUT, err: e}
+		}
+	}
+
+	if e == io.EOF {
+		return &tTransportException{typeId: END_OF_FILE, err: e}
+	}
+
+	return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
new file mode 100644
index 0000000000000000000000000000000000000000..533d1b437533ba9caa50763de0870f2a8a967ed3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+	GetTransport(trans TTransport) TTransport
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
+	return trans
+}
+
+func NewTTransportFactory() TTransportFactory {
+	return &tTransportFactory{}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
new file mode 100644
index 0000000000000000000000000000000000000000..4292ffcadb130e9fd17776fd9d5a5d509d4bb73e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+	STOP   = 0
+	VOID   = 1
+	BOOL   = 2
+	BYTE   = 3
+	I08    = 3
+	DOUBLE = 4
+	I16    = 6
+	I32    = 8
+	I64    = 10
+	STRING = 11
+	UTF7   = 11
+	STRUCT = 12
+	MAP    = 13
+	SET    = 14
+	LIST   = 15
+	UTF8   = 16
+	UTF16  = 17
+	//BINARY = 18   wrong and unusued
+)
+
+var typeNames = map[int]string{
+	STOP:   "STOP",
+	VOID:   "VOID",
+	BOOL:   "BOOL",
+	BYTE:   "BYTE",
+	DOUBLE: "DOUBLE",
+	I16:    "I16",
+	I32:    "I32",
+	I64:    "I64",
+	STRING: "STRING",
+	STRUCT: "STRUCT",
+	MAP:    "MAP",
+	SET:    "SET",
+	LIST:   "LIST",
+	UTF8:   "UTF8",
+	UTF16:  "UTF16",
+}
+
+func (p TType) String() string {
+	if s, ok := typeNames[int(p)]; ok {
+		return s
+	}
+	return "Unknown"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a3fc97abbcc879084bf750a2364ed88ba41f2ff
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -0,0 +1,488 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"io"
+	"math/rand"
+	"os"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+	"github.com/uber/jaeger-client-go/internal/throttler"
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// Tracer implements opentracing.Tracer.
+type Tracer struct {
+	serviceName string
+	hostIPv4    uint32 // this is for zipkin endpoint conversion
+
+	sampler  SamplerV2
+	reporter Reporter
+	metrics  Metrics
+	logger   log.DebugLogger
+
+	timeNow      func() time.Time
+	randomNumber func() uint64
+
+	options struct {
+		gen128Bit                   bool // whether to generate 128bit trace IDs
+		zipkinSharedRPCSpan         bool
+		highTraceIDGenerator        func() uint64 // custom high trace ID generator
+		maxTagValueLength           int
+		noDebugFlagOnForcedSampling bool
+		maxLogsPerSpan              int
+		// more options to come
+	}
+	// allocator of Span objects
+	spanAllocator SpanAllocator
+
+	injectors  map[interface{}]Injector
+	extractors map[interface{}]Extractor
+
+	observer compositeObserver
+
+	tags    []Tag
+	process Process
+
+	baggageRestrictionManager baggage.RestrictionManager
+	baggageSetter             *baggageSetter
+
+	debugThrottler throttler.Throttler
+}
+
+// NewTracer creates Tracer implementation that reports tracing to Jaeger.
+// The returned io.Closer can be used in shutdown hooks to ensure that the internal
+// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+// TODO (breaking change) return *Tracer only, without closer.
+func NewTracer(
+	serviceName string,
+	sampler Sampler,
+	reporter Reporter,
+	options ...TracerOption,
+) (opentracing.Tracer, io.Closer) {
+	t := &Tracer{
+		serviceName:   serviceName,
+		sampler:       samplerV1toV2(sampler),
+		reporter:      reporter,
+		injectors:     make(map[interface{}]Injector),
+		extractors:    make(map[interface{}]Extractor),
+		metrics:       *NewNullMetrics(),
+		spanAllocator: simpleSpanAllocator{},
+	}
+
+	for _, option := range options {
+		option(t)
+	}
+
+	// register default injectors/extractors unless they are already provided via options
+	textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+	t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+	httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+	t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+
+	binaryPropagator := NewBinaryPropagator(t)
+	t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
+
+	// TODO remove after TChannel supports OpenTracing
+	interopPropagator := &jaegerTraceContextPropagator{tracer: t}
+	t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
+
+	zipkinPropagator := &zipkinPropagator{tracer: t}
+	t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
+
+	if t.baggageRestrictionManager != nil {
+		t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
+	} else {
+		t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
+	}
+	if t.debugThrottler == nil {
+		t.debugThrottler = throttler.DefaultThrottler{}
+	}
+
+	if t.randomNumber == nil {
+		seedGenerator := utils.NewRand(time.Now().UnixNano())
+		pool := sync.Pool{
+			New: func() interface{} {
+				return rand.NewSource(seedGenerator.Int63())
+			},
+		}
+
+		t.randomNumber = func() uint64 {
+			generator := pool.Get().(rand.Source)
+			number := uint64(generator.Int63())
+			pool.Put(generator)
+			return number
+		}
+	}
+	if t.timeNow == nil {
+		t.timeNow = time.Now
+	}
+	if t.logger == nil {
+		t.logger = log.NullLogger
+	}
+	// Set tracer-level tags
+	t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
+	if hostname, err := os.Hostname(); err == nil {
+		t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
+	}
+	if ipval, ok := t.getTag(TracerIPTagKey); ok {
+		ipv4, err := utils.ParseIPToUint32(ipval.(string))
+		if err != nil {
+			t.hostIPv4 = 0
+			t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
+		} else {
+			t.hostIPv4 = ipv4
+		}
+	} else if ip, err := utils.HostIP(); err == nil {
+		t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
+		t.hostIPv4 = utils.PackIPAsUint32(ip)
+	} else {
+		t.logger.Error("Unable to determine this host's IP address: " + err.Error())
+	}
+
+	if t.options.gen128Bit {
+		if t.options.highTraceIDGenerator == nil {
+			t.options.highTraceIDGenerator = t.randomNumber
+		}
+	} else if t.options.highTraceIDGenerator != nil {
+		t.logger.Error("Overriding high trace ID generator but not generating " +
+			"128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
+	}
+	if t.options.maxTagValueLength == 0 {
+		t.options.maxTagValueLength = DefaultMaxTagValueLength
+	}
+	t.process = Process{
+		Service: serviceName,
+		UUID:    strconv.FormatUint(t.randomNumber(), 16),
+		Tags:    t.tags,
+	}
+	if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
+		throttler.SetProcess(t.process)
+	}
+
+	return t, t
+}
+
+// addCodec adds registers injector and extractor for given propagation format if not already defined.
+func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
+	if _, ok := t.injectors[format]; !ok {
+		t.injectors[format] = injector
+	}
+	if _, ok := t.extractors[format]; !ok {
+		t.extractors[format] = extractor
+	}
+}
+
+// StartSpan implements StartSpan() method of opentracing.Tracer.
+func (t *Tracer) StartSpan(
+	operationName string,
+	options ...opentracing.StartSpanOption,
+) opentracing.Span {
+	sso := opentracing.StartSpanOptions{}
+	for _, o := range options {
+		o.Apply(&sso)
+	}
+	return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *Tracer) startSpanWithOptions(
+	operationName string,
+	options opentracing.StartSpanOptions,
+) opentracing.Span {
+	if options.StartTime.IsZero() {
+		options.StartTime = t.timeNow()
+	}
+
+	// Predicate whether the given span context is a valid reference
+	// which may be used as parent / debug ID / baggage items source
+	isValidReference := func(ctx SpanContext) bool {
+		return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0
+	}
+
+	var references []Reference
+	var parent SpanContext
+	var hasParent bool // need this because `parent` is a value, not reference
+	var ctx SpanContext
+	var isSelfRef bool
+	for _, ref := range options.References {
+		ctxRef, ok := ref.ReferencedContext.(SpanContext)
+		if !ok {
+			t.logger.Error(fmt.Sprintf(
+				"Reference contains invalid type of SpanReference: %s",
+				reflect.ValueOf(ref.ReferencedContext)))
+			continue
+		}
+		if !isValidReference(ctxRef) {
+			continue
+		}
+
+		if ref.Type == selfRefType {
+			isSelfRef = true
+			ctx = ctxRef
+			continue
+		}
+
+		references = append(references, Reference{Type: ref.Type, Context: ctxRef})
+
+		if !hasParent {
+			parent = ctxRef
+			hasParent = ref.Type == opentracing.ChildOfRef
+		}
+	}
+	if !hasParent && isValidReference(parent) {
+		// If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
+		// the FollowFromRef as the parent
+		hasParent = true
+	}
+
+	rpcServer := false
+	if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
+		rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
+	}
+
+	var internalTags []Tag
+	newTrace := false
+	if !isSelfRef {
+		if !hasParent || !parent.IsValid() {
+			newTrace = true
+			ctx.traceID.Low = t.randomID()
+			if t.options.gen128Bit {
+				ctx.traceID.High = t.options.highTraceIDGenerator()
+			}
+			ctx.spanID = SpanID(ctx.traceID.Low)
+			ctx.parentID = 0
+			ctx.samplingState = &samplingState{
+				localRootSpan: ctx.spanID,
+			}
+			if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
+				ctx.samplingState.setDebugAndSampled()
+				internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
+			}
+		} else {
+			ctx.traceID = parent.traceID
+			if rpcServer && t.options.zipkinSharedRPCSpan {
+				// Support Zipkin's one-span-per-RPC model
+				ctx.spanID = parent.spanID
+				ctx.parentID = parent.parentID
+			} else {
+				ctx.spanID = SpanID(t.randomID())
+				ctx.parentID = parent.spanID
+			}
+			ctx.samplingState = parent.samplingState
+			if parent.remote {
+				ctx.samplingState.setFinal()
+				ctx.samplingState.localRootSpan = ctx.spanID
+			}
+		}
+		if hasParent {
+			// copy baggage items
+			if l := len(parent.baggage); l > 0 {
+				ctx.baggage = make(map[string]string, len(parent.baggage))
+				for k, v := range parent.baggage {
+					ctx.baggage[k] = v
+				}
+			}
+		}
+	}
+
+	sp := t.newSpan()
+	sp.context = ctx
+	sp.tracer = t
+	sp.operationName = operationName
+	sp.startTime = options.StartTime
+	sp.duration = 0
+	sp.references = references
+	sp.firstInProcess = rpcServer || sp.context.parentID == 0
+
+	if !sp.isSamplingFinalized() {
+		decision := t.sampler.OnCreateSpan(sp)
+		sp.applySamplingDecision(decision, false)
+	}
+	sp.observer = t.observer.OnStartSpan(sp, operationName, options)
+
+	if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
+		if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
+			sp.tags = make([]Tag, 0, tagsTotalLength)
+		}
+		sp.tags = append(sp.tags, internalTags...)
+		for k, v := range options.Tags {
+			sp.setTagInternal(k, v, false)
+		}
+	}
+	t.emitNewSpanMetrics(sp, newTrace)
+	return sp
+}
+
+// Inject implements Inject() method of opentracing.Tracer
+func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	c, ok := ctx.(SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidSpanContext
+	}
+	if injector, ok := t.injectors[format]; ok {
+		return injector.Inject(c, carrier)
+	}
+	return opentracing.ErrUnsupportedFormat
+}
+
+// Extract implements Extract() method of opentracing.Tracer
+func (t *Tracer) Extract(
+	format interface{},
+	carrier interface{},
+) (opentracing.SpanContext, error) {
+	if extractor, ok := t.extractors[format]; ok {
+		spanCtx, err := extractor.Extract(carrier)
+		if err != nil {
+			return nil, err // ensure returned spanCtx is nil
+		}
+		spanCtx.remote = true
+		return spanCtx, nil
+	}
+	return nil, opentracing.ErrUnsupportedFormat
+}
+
+// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
+func (t *Tracer) Close() error {
+	t.logger.Debugf("closing tracer")
+	t.reporter.Close()
+	t.sampler.Close()
+	if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
+		_ = mgr.Close()
+	}
+	if throttler, ok := t.debugThrottler.(io.Closer); ok {
+		_ = throttler.Close()
+	}
+	return nil
+}
+
+// Tags returns a slice of tracer-level tags.
+func (t *Tracer) Tags() []opentracing.Tag {
+	tags := make([]opentracing.Tag, len(t.tags))
+	for i, tag := range t.tags {
+		tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
+	}
+	return tags
+}
+
+// getTag returns the value of specific tag, if not exists, return nil.
+// TODO only used by tests, move there.
+func (t *Tracer) getTag(key string) (interface{}, bool) {
+	for _, tag := range t.tags {
+		if tag.key == key {
+			return tag.value, true
+		}
+	}
+	return nil, false
+}
+
+// newSpan returns an instance of a clean Span object.
+// If options.PoolSpans is true, the spans are retrieved from an object pool.
+func (t *Tracer) newSpan() *Span {
+	return t.spanAllocator.Get()
+}
+
+// emitNewSpanMetrics generates metrics on the number of started spans and traces.
+// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
+// server-side RPC span has the exact same trace/span/parent IDs as the
+// calling client-side span, but obviously the server side span is
+// no longer a root span of the trace.
+func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
+	if !sp.isSamplingFinalized() {
+		t.metrics.SpansStartedDelayedSampling.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedDelayedSampling.Inc(1)
+		}
+		// joining a trace is not possible, because sampling decision inherited from upstream is final
+	} else if sp.context.IsSampled() {
+		t.metrics.SpansStartedSampled.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedSampled.Inc(1)
+		} else if sp.firstInProcess {
+			t.metrics.TracesJoinedSampled.Inc(1)
+		}
+	} else {
+		t.metrics.SpansStartedNotSampled.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedNotSampled.Inc(1)
+		} else if sp.firstInProcess {
+			t.metrics.TracesJoinedNotSampled.Inc(1)
+		}
+	}
+}
+
+func (t *Tracer) reportSpan(sp *Span) {
+	if !sp.isSamplingFinalized() {
+		t.metrics.SpansFinishedDelayedSampling.Inc(1)
+	} else if sp.context.IsSampled() {
+		t.metrics.SpansFinishedSampled.Inc(1)
+	} else {
+		t.metrics.SpansFinishedNotSampled.Inc(1)
+	}
+
+	// Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
+	// and then Release() it when no longer needed.
+	// Otherwise, the span may be reused for another trace and its data may be overwritten.
+	if sp.context.IsSampled() {
+		t.reporter.Report(sp)
+	}
+
+	sp.Release()
+}
+
+// randomID generates a random trace/span ID, using tracer.random() generator.
+// It never returns 0.
+func (t *Tracer) randomID() uint64 {
+	val := t.randomNumber()
+	for val == 0 {
+		val = t.randomNumber()
+	}
+	return val
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) setBaggage(sp *Span, key, value string) {
+	t.baggageSetter.setBaggage(sp, key, value)
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) isDebugAllowed(operation string) bool {
+	return t.debugThrottler.IsAllowed(operation)
+}
+
+// Sampler returns the sampler given to the tracer at creation.
+func (t *Tracer) Sampler() SamplerV2 {
+	return t.sampler
+}
+
+// SelfRef creates an opentracing compliant SpanReference from a jaeger
+// SpanContext. This is a factory function in order to encapsulate jaeger specific
+// types.
+func SelfRef(ctx SpanContext) opentracing.SpanReference {
+	return opentracing.SpanReference{
+		Type:              selfRefType,
+		ReferencedContext: ctx,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0734b772f0968c9b405d8249db824df354b5aea
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -0,0 +1,182 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+	"github.com/uber/jaeger-client-go/internal/throttler"
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// TracerOption is a function that sets some option on the tracer
+type TracerOption func(tracer *Tracer)
+
+// TracerOptions is a factory for all available TracerOption's
+var TracerOptions tracerOptions
+
+type tracerOptions struct{}
+
+// Metrics creates a TracerOption that initializes Metrics on the tracer,
+// which is used to emit statistics.
+func (tracerOptions) Metrics(m *Metrics) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.metrics = *m
+	}
+}
+
+// Logger creates a TracerOption that gives the tracer a Logger.
+func (tracerOptions) Logger(logger Logger) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.logger = log.DebugLogAdapter(logger)
+	}
+}
+
+func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+	return func(tracer *Tracer) {
+		if headerKeys == nil {
+			return
+		}
+		textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+		tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+		httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+		tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+	}
+}
+
+// TimeNow creates a TracerOption that gives the tracer a function
+// used to generate timestamps for spans.
+func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.timeNow = timeNow
+	}
+}
+
+// RandomNumber creates a TracerOption that gives the tracer
+// a thread-safe random number generator function for generating trace IDs.
+func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.randomNumber = randomNumber
+	}
+}
+
+// PoolSpans creates a TracerOption that tells the tracer whether it should use
+// an object pool to minimize span allocations.
+// This should be used with care, only if the service is not running any async tasks
+// that can access parent spans after those spans have been finished.
+func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+	return func(tracer *Tracer) {
+		if poolSpans {
+			tracer.spanAllocator = newSyncPollSpanAllocator()
+		} else {
+			tracer.spanAllocator = simpleSpanAllocator{}
+		}
+	}
+}
+
+// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// If not set, the factory method will obtain the current IP address.
+// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
+func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.hostIPv4 = hostIPv4
+	}
+}
+
+func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.injectors[format] = injector
+	}
+}
+
+func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.extractors[format] = extractor
+	}
+}
+
+func (t tracerOptions) Observer(observer Observer) TracerOption {
+	return t.ContribObserver(&oldObserver{obs: observer})
+}
+
+func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.observer.append(observer)
+	}
+}
+
+func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.gen128Bit = gen128Bit
+	}
+}
+
+func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
+	}
+}
+
+func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.highTraceIDGenerator = highTraceIDGenerator
+	}
+}
+
+func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.maxTagValueLength = maxTagValueLength
+	}
+}
+
+// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
+// value). If a span has more logs than this value, logs are dropped as
+// necessary (and replaced with a log describing how many were dropped).
+//
+// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
+// half are the newest logs.
+func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.maxLogsPerSpan = maxLogsPerSpan
+	}
+}
+
+func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+	}
+}
+
+func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.tags = append(tracer.tags, Tag{key: key, value: value})
+	}
+}
+
+func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.baggageRestrictionManager = mgr
+	}
+}
+
+func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.debugThrottler = throttler
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5f5b19551fe33956397e3a691302f9f7f56cbf2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"io"
+)
+
+// Transport abstracts the method of sending spans out of process.
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+type Transport interface {
+	// Append converts the span to the wire representation and adds it
+	// to sender's internal buffer.  If the buffer exceeds its designated
+	// size, the transport should call Flush() and return the number of spans
+	// flushed, otherwise return 0. If error is returned, the returned number
+	// of spans is treated as failed span, and reported to metrics accordingly.
+	Append(span *Span) (int, error)
+
+	// Flush submits the internal buffer to the remote server. It returns the
+	// number of spans flushed. If error is returned, the returned number of
+	// spans is treated as failed span, and reported to metrics accordingly.
+	Flush() (int, error)
+
+	io.Closer
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
new file mode 100644
index 0000000000000000000000000000000000000000..7370d8007518d1d598bd50a70d22702168fdbdf0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/uber/jaeger-client-go/internal/reporterstats"
+	"github.com/uber/jaeger-client-go/thrift"
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// Empirically obtained constant for how many bytes in the message are used for envelope.
+// The total datagram size is:
+// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
+//
+// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
+// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
+//
+// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
+const emitBatchOverhead = 70
+
+var errSpanTooLarge = errors.New("span is too large")
+
+type udpSender struct {
+	client          *utils.AgentClientUDP
+	maxPacketSize   int                   // max size of datagram in bytes
+	maxSpanBytes    int                   // max number of bytes to record spans (excluding envelope) in the datagram
+	byteBufferSize  int                   // current number of span bytes accumulated in the buffer
+	spanBuffer      []*j.Span             // spans buffered before a flush
+	thriftBuffer    *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+	thriftProtocol  thrift.TProtocol
+	process         *j.Process
+	processByteSize int
+
+	// reporterStats provides access to stats that are only known to Reporter
+	reporterStats reporterstats.ReporterStats
+
+	// The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
+	// so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
+	batchSeqNo           int64
+	tooLargeDroppedSpans int64
+	failedToEmitSpans    int64
+}
+
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+	if len(hostPort) == 0 {
+		hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+	}
+	if maxPacketSize == 0 {
+		maxPacketSize = utils.UDPPacketMaxLength
+	}
+
+	protocolFactory := thrift.NewTCompactProtocolFactory()
+
+	// Each span is first written to thriftBuffer to determine its size in bytes.
+	thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+	thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+
+	client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize)
+	if err != nil {
+		return nil, err
+	}
+
+	return &udpSender{
+		client:         client,
+		maxSpanBytes:   maxPacketSize - emitBatchOverhead,
+		thriftBuffer:   thriftBuffer,
+		thriftProtocol: thriftProtocol,
+	}, nil
+}
+
+// SetReporterStats implements reporterstats.Receiver.
+func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
+	s.reporterStats = rs
+}
+
+func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
+	s.thriftBuffer.Reset()
+	_ = thriftStruct.Write(s.thriftProtocol)
+	return s.thriftBuffer.Len()
+}
+
+func (s *udpSender) Append(span *Span) (int, error) {
+	if s.process == nil {
+		s.process = BuildJaegerProcessThrift(span)
+		s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
+		s.byteBufferSize += s.processByteSize
+	}
+	jSpan := BuildJaegerThrift(span)
+	spanSize := s.calcSizeOfSerializedThrift(jSpan)
+	if spanSize > s.maxSpanBytes {
+		s.tooLargeDroppedSpans++
+		return 1, errSpanTooLarge
+	}
+
+	s.byteBufferSize += spanSize
+	if s.byteBufferSize <= s.maxSpanBytes {
+		s.spanBuffer = append(s.spanBuffer, jSpan)
+		if s.byteBufferSize < s.maxSpanBytes {
+			return 0, nil
+		}
+		return s.Flush()
+	}
+	// the latest span did not fit in the buffer
+	n, err := s.Flush()
+	s.spanBuffer = append(s.spanBuffer, jSpan)
+	s.byteBufferSize = spanSize + s.processByteSize
+	return n, err
+}
+
+func (s *udpSender) Flush() (int, error) {
+	n := len(s.spanBuffer)
+	if n == 0 {
+		return 0, nil
+	}
+	s.batchSeqNo++
+	batchSeqNo := int64(s.batchSeqNo)
+	err := s.client.EmitBatch(&j.Batch{
+		Process: s.process,
+		Spans:   s.spanBuffer,
+		SeqNo:   &batchSeqNo,
+		Stats:   s.makeStats(),
+	})
+	s.resetBuffers()
+	if err != nil {
+		s.failedToEmitSpans += int64(n)
+	}
+	return n, err
+}
+
+func (s *udpSender) Close() error {
+	return s.client.Close()
+}
+
+func (s *udpSender) resetBuffers() {
+	for i := range s.spanBuffer {
+		s.spanBuffer[i] = nil
+	}
+	s.spanBuffer = s.spanBuffer[:0]
+	s.byteBufferSize = s.processByteSize
+}
+
+func (s *udpSender) makeStats() *j.ClientStats {
+	var dropped int64
+	if s.reporterStats != nil {
+		dropped = s.reporterStats.SpansDroppedFromQueue()
+	}
+	return &j.ClientStats{
+		FullQueueDroppedSpans: dropped,
+		TooLargeDroppedSpans:  s.tooLargeDroppedSpans,
+		FailedToEmitSpans:     s.failedToEmitSpans,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
new file mode 100644
index 0000000000000000000000000000000000000000..237211f8224b2da0720944f7b4afa9f6bb56bec8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+)
+
+// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
+func GetJSON(url string, out interface{}) error {
+	resp, err := http.Get(url)
+	if err != nil {
+		return err
+	}
+	return ReadJSON(resp, out)
+}
+
+// ReadJSON reads JSON from http.Response and parses it into `out`
+func ReadJSON(resp *http.Response, out interface{}) error {
+	defer resp.Body.Close()
+
+	if resp.StatusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return err
+		}
+
+		return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+	}
+
+	if out == nil {
+		io.Copy(ioutil.Discard, resp.Body)
+		return nil
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+	return decoder.Decode(out)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
new file mode 100644
index 0000000000000000000000000000000000000000..b51af7713f7cfb162ff9e8dba4e11ba46ccdc44d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"errors"
+	"net"
+)
+
+// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
+
+// scoreAddr scores how likely the given addr is to be a remote address and returns the
+// IP to use when listening. Any address which receives a negative score should not be used.
+// Scores are calculated as:
+// -1 for any unknown IP addresses.
+// +300 for IPv4 addresses
+// +100 for non-local addresses, extra +100 for "up" interaces.
+func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
+	var ip net.IP
+	if netAddr, ok := addr.(*net.IPNet); ok {
+		ip = netAddr.IP
+	} else if netIP, ok := addr.(*net.IPAddr); ok {
+		ip = netIP.IP
+	} else {
+		return -1, nil
+	}
+
+	var score int
+	if ip.To4() != nil {
+		score += 300
+	}
+	if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
+		score += 100
+		if iface.Flags&net.FlagUp != 0 {
+			score += 100
+		}
+	}
+	return score, ip
+}
+
+// HostIP tries to find an IP that can be used by other machines to reach this machine.
+func HostIP() (net.IP, error) {
+	interfaces, err := net.Interfaces()
+	if err != nil {
+		return nil, err
+	}
+
+	bestScore := -1
+	var bestIP net.IP
+	// Select the highest scoring IP as the best IP.
+	for _, iface := range interfaces {
+		addrs, err := iface.Addrs()
+		if err != nil {
+			// Skip this interface if there is an error.
+			continue
+		}
+
+		for _, addr := range addrs {
+			score, ip := scoreAddr(iface, addr)
+			if score > bestScore {
+				bestScore = score
+				bestIP = ip
+			}
+		}
+	}
+
+	if bestScore == -1 {
+		return nil, errors.New("no addresses to listen on")
+	}
+
+	return bestIP, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
new file mode 100644
index 0000000000000000000000000000000000000000..9875f7f55cbd1230a71bef97f7837745e9e228aa
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"math/rand"
+	"sync"
+)
+
+// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
+// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
+type lockedSource struct {
+	mut sync.Mutex
+	src rand.Source
+}
+
+// NewRand returns a rand.Rand that is threadsafe.
+func NewRand(seed int64) *rand.Rand {
+	return rand.New(&lockedSource{src: rand.NewSource(seed)})
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.mut.Lock()
+	n = r.src.Int63()
+	r.mut.Unlock()
+	return
+}
+
+// Seed implements Seed() of Source
+func (r *lockedSource) Seed(seed int64) {
+	r.mut.Lock()
+	r.src.Seed(seed)
+	r.mut.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf2f13165bf1fa10896e11a49e0dc89521ac7d11
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"sync"
+	"time"
+)
+
+// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+//
+// TODO (breaking change) remove this interface in favor of public struct below
+//
+// Deprecated, use ReconfigurableRateLimiter.
+type RateLimiter interface {
+	CheckCredit(itemCost float64) bool
+}
+
+// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
+// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
+// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
+// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
+// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
+//
+// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
+// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
+// to determine if the message is within the rate limit.
+//
+// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
+// as bytes/second, and calling CheckCredit() with the actual message size.
+//
+// TODO (breaking change) rename to RateLimiter once the interface is removed
+type ReconfigurableRateLimiter struct {
+	lock sync.Mutex
+
+	creditsPerSecond float64
+	balance          float64
+	maxBalance       float64
+	lastTick         time.Time
+
+	timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new ReconfigurableRateLimiter.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
+	return &ReconfigurableRateLimiter{
+		creditsPerSecond: creditsPerSecond,
+		balance:          maxBalance,
+		maxBalance:       maxBalance,
+		lastTick:         time.Now(),
+		timeNow:          time.Now,
+	}
+}
+
+// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
+// is not lest than itemCost.
+func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
+	rl.lock.Lock()
+	defer rl.lock.Unlock()
+
+	// if we have enough credits to pay for current item, then reduce balance and allow
+	if rl.balance >= itemCost {
+		rl.balance -= itemCost
+		return true
+	}
+	// otherwise check if balance can be increased due to time elapsed, and try again
+	rl.updateBalance()
+	if rl.balance >= itemCost {
+		rl.balance -= itemCost
+		return true
+	}
+	return false
+}
+
+// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
+func (rl *ReconfigurableRateLimiter) updateBalance() {
+	// calculate how much time passed since the last tick, and update current tick
+	currentTime := rl.timeNow()
+	elapsedTime := currentTime.Sub(rl.lastTick)
+	rl.lastTick = currentTime
+	// calculate how much credit have we accumulated since the last tick
+	rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
+	if rl.balance > rl.maxBalance {
+		rl.balance = rl.maxBalance
+	}
+}
+
+// Update changes the main parameters of the rate limiter in-place, while retaining
+// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
+// instead of creating a new rate limiter helps to avoid thundering herd when sampling
+// strategies are updated.
+func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
+	rl.lock.Lock()
+	defer rl.lock.Unlock()
+
+	rl.updateBalance() // get up to date balance
+	rl.balance = rl.balance * maxBalance / rl.maxBalance
+	rl.creditsPerSecond = creditsPerSecond
+	rl.maxBalance = maxBalance
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
new file mode 100644
index 0000000000000000000000000000000000000000..fadd73e49aea2770c1c26565bb63e69a4a48d00f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+
+	"github.com/uber/jaeger-client-go/thrift"
+
+	"github.com/uber/jaeger-client-go/thrift-gen/agent"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
+const UDPPacketMaxLength = 65000
+
+// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
+type AgentClientUDP struct {
+	agent.Agent
+	io.Closer
+
+	connUDP       *net.UDPConn
+	client        *agent.AgentClient
+	maxPacketSize int                   // max size of datagram in bytes
+	thriftBuffer  *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+	if maxPacketSize == 0 {
+		maxPacketSize = UDPPacketMaxLength
+	}
+
+	thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+	protocolFactory := thrift.NewTCompactProtocolFactory()
+	client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+	destAddr, err := net.ResolveUDPAddr("udp", hostPort)
+	if err != nil {
+		return nil, err
+	}
+
+	connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr)
+	if err != nil {
+		return nil, err
+	}
+	if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil {
+		return nil, err
+	}
+
+	clientUDP := &AgentClientUDP{
+		connUDP:       connUDP,
+		client:        client,
+		maxPacketSize: maxPacketSize,
+		thriftBuffer:  thriftBuffer}
+	return clientUDP, nil
+}
+
+// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
+func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error {
+	return errors.New("Not implemented")
+}
+
+// EmitBatch implements EmitBatch() of Agent interface
+func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
+	a.thriftBuffer.Reset()
+	a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages
+	if err := a.client.EmitBatch(batch); err != nil {
+		return err
+	}
+	if a.thriftBuffer.Len() > a.maxPacketSize {
+		return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
+			a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+	}
+	_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+	return err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *AgentClientUDP) Close() error {
+	return a.connUDP.Close()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..ac3c325d1ede79586b9759d8b26dd2fc1c1643a4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	// ErrEmptyIP an error for empty ip strings
+	ErrEmptyIP = errors.New("empty string given for ip")
+
+	// ErrNotHostColonPort an error for invalid host port string
+	ErrNotHostColonPort = errors.New("expecting host:port")
+
+	// ErrNotFourOctets an error for the wrong number of octets after splitting a string
+	ErrNotFourOctets = errors.New("Wrong number of octets")
+)
+
+// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
+func ParseIPToUint32(ip string) (uint32, error) {
+	if ip == "" {
+		return 0, ErrEmptyIP
+	}
+
+	if ip == "localhost" {
+		return 127<<24 | 1, nil
+	}
+
+	octets := strings.Split(ip, ".")
+	if len(octets) != 4 {
+		return 0, ErrNotFourOctets
+	}
+
+	var intIP uint32
+	for i := 0; i < 4; i++ {
+		octet, err := strconv.Atoi(octets[i])
+		if err != nil {
+			return 0, err
+		}
+		intIP = (intIP << 8) | uint32(octet)
+	}
+
+	return intIP, nil
+}
+
+// ParsePort converts port number from string to uin16
+func ParsePort(portString string) (uint16, error) {
+	port, err := strconv.ParseUint(portString, 10, 16)
+	return uint16(port), err
+}
+
+// PackIPAsUint32 packs an IPv4 as uint32
+func PackIPAsUint32(ip net.IP) uint32 {
+	if ipv4 := ip.To4(); ipv4 != nil {
+		return binary.BigEndian.Uint32(ipv4)
+	}
+	return 0
+}
+
+// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
+// representing time since epoch in microseconds, which is used expected
+// in the Jaeger spans encoded as Thrift.
+func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
+	// ^^^ Passing time.Time by value is faster than passing a pointer!
+	// BenchmarkTimeByValue-8	2000000000	         1.37 ns/op
+	// BenchmarkTimeByPtr-8  	2000000000	         1.98 ns/op
+
+	return t.UnixNano() / 1000
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
new file mode 100644
index 0000000000000000000000000000000000000000..98cab4b6ef9896f9381d49ed5f480b886db5881f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go"
+)
+
+// ZipkinSpanFormat is an OpenTracing carrier format constant
+const ZipkinSpanFormat = "zipkin-span-format"
+
+// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type ExtractableZipkinSpan interface {
+	TraceID() uint64
+	SpanID() uint64
+	ParentID() uint64
+	Flags() byte
+}
+
+// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type InjectableZipkinSpan interface {
+	SetTraceID(traceID uint64)
+	SetSpanID(spanID uint64)
+	SetParentID(parentID uint64)
+	SetFlags(flags byte)
+}
+
+type zipkinPropagator struct {
+	tracer *Tracer
+}
+
+func (p *zipkinPropagator) Inject(
+	ctx SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(InjectableZipkinSpan)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
+	carrier.SetSpanID(uint64(ctx.SpanID()))
+	carrier.SetParentID(uint64(ctx.ParentID()))
+	carrier.SetFlags(ctx.samplingState.flags())
+	return nil
+}
+
+func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	if carrier.TraceID() == 0 {
+		return emptyContext, opentracing.ErrSpanContextNotFound
+	}
+	var ctx SpanContext
+	ctx.traceID.Low = carrier.TraceID()
+	ctx.spanID = SpanID(carrier.SpanID())
+	ctx.parentID = SpanID(carrier.ParentID())
+	ctx.samplingState = &samplingState{}
+	ctx.samplingState.setFlags(carrier.Flags())
+	return ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
new file mode 100644
index 0000000000000000000000000000000000000000..73aeb000f895741c206adb26533eaae6cb96f635
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -0,0 +1,329 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"encoding/binary"
+	"fmt"
+	"time"
+
+	"github.com/opentracing/opentracing-go/ext"
+
+	"github.com/uber/jaeger-client-go/internal/spanlog"
+	z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+	// Zipkin UI does not work well with non-string tag values
+	allowPackedNumbers = false
+)
+
+var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
+	string(ext.SpanKind):     setSpanKind,
+	string(ext.PeerHostIPv4): setPeerIPv4,
+	string(ext.PeerPort):     setPeerPort,
+	string(ext.PeerService):  setPeerService,
+	TracerIPTagKey:           removeTag,
+}
+
+// BuildZipkinThrift builds thrift span based on internal span.
+// TODO: (breaking change) move to transport/zipkin and make private.
+func BuildZipkinThrift(s *Span) *z.Span {
+	span := &zipkinSpan{Span: s}
+	span.handleSpecialTags()
+	parentID := int64(span.context.parentID)
+	var ptrParentID *int64
+	if parentID != 0 {
+		ptrParentID = &parentID
+	}
+	traceIDHigh := int64(span.context.traceID.High)
+	var ptrTraceIDHigh *int64
+	if traceIDHigh != 0 {
+		ptrTraceIDHigh = &traceIDHigh
+	}
+	timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+	duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+	endpoint := &z.Endpoint{
+		ServiceName: span.tracer.serviceName,
+		Ipv4:        int32(span.tracer.hostIPv4)}
+	thriftSpan := &z.Span{
+		TraceID:           int64(span.context.traceID.Low),
+		TraceIDHigh:       ptrTraceIDHigh,
+		ID:                int64(span.context.spanID),
+		ParentID:          ptrParentID,
+		Name:              span.operationName,
+		Timestamp:         ×tamp,
+		Duration:          &duration,
+		Debug:             span.context.IsDebug(),
+		Annotations:       buildAnnotations(span, endpoint),
+		BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
+	return thriftSpan
+}
+
+func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
+	// automatically adding 2 Zipkin CoreAnnotations
+	annotations := make([]*z.Annotation, 0, 2+len(span.logs))
+	var startLabel, endLabel string
+	if span.spanKind == string(ext.SpanKindRPCClientEnum) {
+		startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
+	} else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
+		startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
+	}
+	if !span.startTime.IsZero() && startLabel != "" {
+		start := &z.Annotation{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
+			Value:     startLabel,
+			Host:      endpoint}
+		annotations = append(annotations, start)
+		if span.duration != 0 {
+			endTs := span.startTime.Add(span.duration)
+			end := &z.Annotation{
+				Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
+				Value:     endLabel,
+				Host:      endpoint}
+			annotations = append(annotations, end)
+		}
+	}
+	for _, log := range span.logs {
+		anno := &z.Annotation{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+			Host:      endpoint}
+		if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
+			anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
+		} else {
+			anno.Value = err.Error()
+		}
+		annotations = append(annotations, anno)
+	}
+	return annotations
+}
+
+func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
+	// automatically adding local component or server/client address tag, and client version
+	annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
+
+	if span.peerDefined() && span.isRPC() {
+		peer := z.Endpoint{
+			Ipv4:        span.peer.Ipv4,
+			Port:        span.peer.Port,
+			ServiceName: span.peer.ServiceName}
+		label := z.CLIENT_ADDR
+		if span.isRPCClient() {
+			label = z.SERVER_ADDR
+		}
+		anno := &z.BinaryAnnotation{
+			Key:            label,
+			Value:          []byte{1},
+			AnnotationType: z.AnnotationType_BOOL,
+			Host:           &peer}
+		annotations = append(annotations, anno)
+	}
+	if !span.isRPC() {
+		componentName := endpoint.ServiceName
+		for _, tag := range span.tags {
+			if tag.key == string(ext.Component) {
+				componentName = stringify(tag.value)
+				break
+			}
+		}
+		local := &z.BinaryAnnotation{
+			Key:            z.LOCAL_COMPONENT,
+			Value:          []byte(componentName),
+			AnnotationType: z.AnnotationType_STRING,
+			Host:           endpoint}
+		annotations = append(annotations, local)
+	}
+	for _, tag := range span.tags {
+		// "Special tags" are already handled by this point, we'd be double reporting the
+		// tags if we don't skip here
+		if _, ok := specialTagHandlers[tag.key]; ok {
+			continue
+		}
+		if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
+			annotations = append(annotations, anno)
+		}
+	}
+	return annotations
+}
+
+func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
+	bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
+	if value, ok := val.(string); ok {
+		bann.Value = []byte(truncateString(value, maxTagValueLength))
+		bann.AnnotationType = z.AnnotationType_STRING
+	} else if value, ok := val.([]byte); ok {
+		if len(value) > maxTagValueLength {
+			value = value[:maxTagValueLength]
+		}
+		bann.Value = value
+		bann.AnnotationType = z.AnnotationType_BYTES
+	} else if value, ok := val.(int32); ok && allowPackedNumbers {
+		bann.Value = int32ToBytes(value)
+		bann.AnnotationType = z.AnnotationType_I32
+	} else if value, ok := val.(int64); ok && allowPackedNumbers {
+		bann.Value = int64ToBytes(value)
+		bann.AnnotationType = z.AnnotationType_I64
+	} else if value, ok := val.(int); ok && allowPackedNumbers {
+		bann.Value = int64ToBytes(int64(value))
+		bann.AnnotationType = z.AnnotationType_I64
+	} else if value, ok := val.(bool); ok {
+		bann.Value = []byte{boolToByte(value)}
+		bann.AnnotationType = z.AnnotationType_BOOL
+	} else {
+		value := stringify(val)
+		bann.Value = []byte(truncateString(value, maxTagValueLength))
+		bann.AnnotationType = z.AnnotationType_STRING
+	}
+	return bann
+}
+
+func stringify(value interface{}) string {
+	if s, ok := value.(string); ok {
+		return s
+	}
+	return fmt.Sprintf("%+v", value)
+}
+
+func truncateString(value string, maxLength int) string {
+	// we ignore the problem of utf8 runes possibly being sliced in the middle,
+	// as it is rather expensive to iterate through each tag just to find rune
+	// boundaries.
+	if len(value) > maxLength {
+		return value[:maxLength]
+	}
+	return value
+}
+
+func boolToByte(b bool) byte {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// int32ToBytes converts int32 to bytes.
+func int32ToBytes(i int32) []byte {
+	buf := make([]byte, 4)
+	binary.BigEndian.PutUint32(buf, uint32(i))
+	return buf
+}
+
+// int64ToBytes converts int64 to bytes.
+func int64ToBytes(i int64) []byte {
+	buf := make([]byte, 8)
+	binary.BigEndian.PutUint64(buf, uint64(i))
+	return buf
+}
+
+type zipkinSpan struct {
+	*Span
+
+	// peer points to the peer service participating in this span,
+	// e.g. the Client if this span is a server span,
+	// or Server if this span is a client span
+	peer struct {
+		Ipv4        int32
+		Port        int16
+		ServiceName string
+	}
+
+	// used to distinguish local vs. RPC Server vs. RPC Client spans
+	spanKind string
+}
+
+func (s *zipkinSpan) handleSpecialTags() {
+	s.Lock()
+	defer s.Unlock()
+	if s.firstInProcess {
+		// append the process tags
+		s.tags = append(s.tags, s.tracer.tags...)
+	}
+	filteredTags := make([]Tag, 0, len(s.tags))
+	for _, tag := range s.tags {
+		if handler, ok := specialTagHandlers[tag.key]; ok {
+			handler(s, tag.value)
+		} else {
+			filteredTags = append(filteredTags, tag)
+		}
+	}
+	s.tags = filteredTags
+}
+
+func setSpanKind(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		s.spanKind = val
+		return
+	}
+	if val, ok := value.(ext.SpanKindEnum); ok {
+		s.spanKind = string(val)
+	}
+}
+
+func setPeerIPv4(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		if ip, err := utils.ParseIPToUint32(val); err == nil {
+			s.peer.Ipv4 = int32(ip)
+			return
+		}
+	}
+	if val, ok := value.(uint32); ok {
+		s.peer.Ipv4 = int32(val)
+		return
+	}
+	if val, ok := value.(int32); ok {
+		s.peer.Ipv4 = val
+	}
+}
+
+func setPeerPort(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		if port, err := utils.ParsePort(val); err == nil {
+			s.peer.Port = int16(port)
+			return
+		}
+	}
+	if val, ok := value.(uint16); ok {
+		s.peer.Port = int16(val)
+		return
+	}
+	if val, ok := value.(int); ok {
+		s.peer.Port = int16(val)
+	}
+}
+
+func setPeerService(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		s.peer.ServiceName = val
+	}
+}
+
+func removeTag(s *zipkinSpan, value interface{}) {}
+
+func (s *zipkinSpan) peerDefined() bool {
+	return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
+}
+
+func (s *zipkinSpan) isRPC() bool {
+	s.RLock()
+	defer s.RUnlock()
+	return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
+}
+
+func (s *zipkinSpan) isRPCClient() bool {
+	s.RLock()
+	defer s.RUnlock()
+	return s.spanKind == string(ext.SpanKindRPCClientEnum)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a6a43efdb458629465bbf1fbf6b7b300c466b31
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Counter tracks the number of times an event has occurred
+type Counter interface {
+	// Inc adds the given value to the counter.
+	Inc(int64)
+}
+
+// NullCounter counter that does nothing
+var NullCounter Counter = nullCounter{}
+
+type nullCounter struct{}
+
+func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ead061ebd64c6a83c3cfee2f9321cb82091dab6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// NSOptions defines the name and tags map associated with a factory namespace
+type NSOptions struct {
+	Name string
+	Tags map[string]string
+}
+
+// Options defines the information associated with a metric
+type Options struct {
+	Name string
+	Tags map[string]string
+	Help string
+}
+
+// TimerOptions defines the information associated with a metric
+type TimerOptions struct {
+	Name    string
+	Tags    map[string]string
+	Help    string
+	Buckets []time.Duration
+}
+
+// HistogramOptions defines the information associated with a metric
+type HistogramOptions struct {
+	Name    string
+	Tags    map[string]string
+	Help    string
+	Buckets []float64
+}
+
+// Factory creates new metrics
+type Factory interface {
+	Counter(metric Options) Counter
+	Timer(metric TimerOptions) Timer
+	Gauge(metric Options) Gauge
+	Histogram(metric HistogramOptions) Histogram
+
+	// Namespace returns a nested metrics factory.
+	Namespace(scope NSOptions) Factory
+}
+
+// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
+var NullFactory Factory = nullFactory{}
+
+type nullFactory struct{}
+
+func (nullFactory) Counter(options Options) Counter {
+	return NullCounter
+}
+func (nullFactory) Timer(options TimerOptions) Timer {
+	return NullTimer
+}
+func (nullFactory) Gauge(options Options) Gauge {
+	return NullGauge
+}
+func (nullFactory) Histogram(options HistogramOptions) Histogram {
+	return NullHistogram
+}
+func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c606391a095089cbf3007a5b3de339524cc0d58
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Gauge returns instantaneous measurements of something as an int64 value
+type Gauge interface {
+	// Update the gauge to the value passed in.
+	Update(int64)
+}
+
+// NullGauge gauge that does nothing
+var NullGauge Gauge = nullGauge{}
+
+type nullGauge struct{}
+
+func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3bd6174fe84ca12bcaaea0baf6ce8ea06b6d7b2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Jaeger Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Histogram that keeps track of a distribution of values.
+type Histogram interface {
+	// Records the value passed in.
+	Record(float64)
+}
+
+// NullHistogram that does nothing
+var NullHistogram Histogram = nullHistogram{}
+
+type nullHistogram struct{}
+
+func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
new file mode 100644
index 0000000000000000000000000000000000000000..c24445a1069082005784631561ea3040e0a15044
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"sort"
+)
+
+// GetKey converts name+tags into a single string of the form
+// "name|tag1=value1|...|tagN=valueN", where tag names are
+// sorted alphabetically.
+func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
+	keys := make([]string, 0, len(tags))
+	for k := range tags {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	key := name
+	for _, k := range keys {
+		key = key + tagsSep + k + tagKVSep + tags[k]
+	}
+	return key
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..0df0c662e367e2da8d1cd074d809a863c95bb619
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
+//
+// It uses reflection to initialize a struct containing metrics fields
+// by assigning new Counter/Gauge/Timer values with the metric name retrieved
+// from the `metric` tag and stats tags retrieved from the `tags` tag.
+//
+// Note: all fields of the struct must be exported, have a `metric` tag, and be
+// of type Counter or Gauge or Timer.
+//
+// Errors during Init lead to a panic.
+func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
+	if err := Init(metrics, factory, globalTags); err != nil {
+		panic(err.Error())
+	}
+}
+
+// Init does the same as MustInit, but returns an error instead of
+// panicking.
+func Init(m interface{}, factory Factory, globalTags map[string]string) error {
+	// Allow user to opt out of reporting metrics by passing in nil.
+	if factory == nil {
+		factory = NullFactory
+	}
+
+	counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
+	gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
+	timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
+	histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
+
+	v := reflect.ValueOf(m).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		tags := make(map[string]string)
+		for k, v := range globalTags {
+			tags[k] = v
+		}
+		var buckets []float64
+		field := t.Field(i)
+		metric := field.Tag.Get("metric")
+		if metric == "" {
+			return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
+		}
+		if tagString := field.Tag.Get("tags"); tagString != "" {
+			tagPairs := strings.Split(tagString, ",")
+			for _, tagPair := range tagPairs {
+				tag := strings.Split(tagPair, "=")
+				if len(tag) != 2 {
+					return fmt.Errorf(
+						"Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
+						field.Name, tagPair, tagString)
+				}
+				tags[tag[0]] = tag[1]
+			}
+		}
+		if bucketString := field.Tag.Get("buckets"); bucketString != "" {
+			if field.Type.AssignableTo(timerPtrType) {
+				// TODO: Parse timer duration buckets
+				return fmt.Errorf(
+					"Field [%s]: Buckets are not currently initialized for timer metrics",
+					field.Name)
+			} else if field.Type.AssignableTo(histogramPtrType) {
+				bucketValues := strings.Split(bucketString, ",")
+				for _, bucket := range bucketValues {
+					b, err := strconv.ParseFloat(bucket, 64)
+					if err != nil {
+						return fmt.Errorf(
+							"Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
+							field.Name, bucket, bucketString)
+					}
+					buckets = append(buckets, b)
+				}
+			} else {
+				return fmt.Errorf(
+					"Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
+					field.Name)
+			}
+		}
+		help := field.Tag.Get("help")
+		var obj interface{}
+		if field.Type.AssignableTo(counterPtrType) {
+			obj = factory.Counter(Options{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(gaugePtrType) {
+			obj = factory.Gauge(Options{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(timerPtrType) {
+			// TODO: Add buckets once parsed (see TODO above)
+			obj = factory.Timer(TimerOptions{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(histogramPtrType) {
+			obj = factory.Histogram(HistogramOptions{
+				Name:    metric,
+				Tags:    tags,
+				Help:    help,
+				Buckets: buckets,
+			})
+		} else {
+			return fmt.Errorf(
+				"Field %s is not a pointer to timer, gauge, or counter",
+				field.Name)
+		}
+		v.Field(i).Set(reflect.ValueOf(obj))
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
new file mode 100644
index 0000000000000000000000000000000000000000..4a8abdb539f90abf1b29821c71cb6253bfe163b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// StartStopwatch begins recording the executing time of an event, returning
+// a Stopwatch that should be used to stop the recording the time for
+// that event.  Multiple events can be occurring simultaneously each
+// represented by different active Stopwatches
+func StartStopwatch(timer Timer) Stopwatch {
+	return Stopwatch{t: timer, start: time.Now()}
+}
+
+// A Stopwatch tracks the execution time of a specific event
+type Stopwatch struct {
+	t     Timer
+	start time.Time
+}
+
+// Stop stops executing of the stopwatch and records the amount of elapsed time
+func (s Stopwatch) Stop() {
+	s.t.Record(s.ElapsedTime())
+}
+
+// ElapsedTime returns the amount of elapsed time (in time.Duration)
+func (s Stopwatch) ElapsedTime() time.Duration {
+	return time.Since(s.start)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..e18d222abb4abb846a630afd69cfd268fcf0dff8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// Timer accumulates observations about how long some operation took,
+// and also maintains a historgam of percentiles.
+type Timer interface {
+	// Records the time passed in.
+	Record(time.Duration)
+}
+
+// NullTimer timer that does nothing
+var NullTimer Timer = nullTimer{}
+
+type nullTimer struct{}
+
+func (nullTimer) Record(time.Duration) {}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
new file mode 100644
index 0000000000000000000000000000000000000000..9857fe53d3c91c527ee9b363df2c6d3ed8c52576
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errgroup provides synchronization, error propagation, and Context
+// cancelation for groups of goroutines working on subtasks of a common task.
+package errgroup
+
+import (
+	"context"
+	"sync"
+)
+
+// A Group is a collection of goroutines working on subtasks that are part of
+// the same overall task.
+//
+// A zero Group is valid and does not cancel on error.
+type Group struct {
+	cancel func()
+
+	wg sync.WaitGroup
+
+	errOnce sync.Once
+	err     error
+}
+
+// WithContext returns a new Group and an associated Context derived from ctx.
+//
+// The derived Context is canceled the first time a function passed to Go
+// returns a non-nil error or the first time Wait returns, whichever occurs
+// first.
+func WithContext(ctx context.Context) (*Group, context.Context) {
+	ctx, cancel := context.WithCancel(ctx)
+	return &Group{cancel: cancel}, ctx
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
+func (g *Group) Wait() error {
+	g.wg.Wait()
+	if g.cancel != nil {
+		g.cancel()
+	}
+	return g.err
+}
+
+// Go calls the given function in a new goroutine.
+//
+// The first call to return a non-nil error cancels the group; its error will be
+// returned by Wait.
+func (g *Group) Go(f func() error) {
+	g.wg.Add(1)
+
+	go func() {
+		defer g.wg.Done()
+
+		if err := f(); err != nil {
+			g.errOnce.Do(func() {
+				g.err = err
+				if g.cancel != nil {
+					g.cancel()
+				}
+			})
+		}
+	}()
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9278804ba5f11fd74415bcf37f105e64ea2f838f..cbc9ec0cbe6948b3008dced788868c8a1503edf4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -26,7 +26,7 @@ github.com/alecthomas/units
 github.com/andybalholm/cascadia
 # github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 => github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496
 github.com/asaskevich/govalidator
-# github.com/aws/aws-sdk-go v1.30.12 => github.com/aws/aws-sdk-go v1.30.12
+# github.com/aws/aws-sdk-go v1.33.12 => github.com/aws/aws-sdk-go v1.30.12
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/arn
 github.com/aws/aws-sdk-go/aws/awserr
@@ -75,7 +75,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface
 github.com/beevik/etree
 # github.com/beorn7/perks v1.0.1 => github.com/beorn7/perks v1.0.1
 github.com/beorn7/perks/quantile
-# github.com/blang/semver v3.5.0+incompatible => github.com/blang/semver v3.5.0+incompatible
+# github.com/blang/semver v3.5.1+incompatible => github.com/blang/semver v3.5.0+incompatible
 github.com/blang/semver
 # github.com/cespare/xxhash v1.1.0 => github.com/cespare/xxhash v1.1.0
 github.com/cespare/xxhash
@@ -87,7 +87,7 @@ github.com/container-storage-interface/spec/lib/go/csi
 github.com/containernetworking/cni/pkg/types
 github.com/containernetworking/cni/pkg/types/020
 github.com/containernetworking/cni/pkg/types/current
-# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f => github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
+# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf => github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
 github.com/coreos/go-systemd/daemon
 github.com/coreos/go-systemd/journal
 # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f => github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
@@ -133,6 +133,8 @@ github.com/docker/go-units
 # github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c => github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c
 github.com/docker/spdystream
 github.com/docker/spdystream/spdy
+# github.com/edsrzf/mmap-go v1.0.0 => github.com/edsrzf/mmap-go v1.0.0
+github.com/edsrzf/mmap-go
 # github.com/elastic/go-elasticsearch/v5 v5.6.1 => github.com/elastic/go-elasticsearch/v5 v5.6.1
 github.com/elastic/go-elasticsearch/v5
 github.com/elastic/go-elasticsearch/v5/esapi
@@ -150,9 +152,9 @@ github.com/elastic/go-elasticsearch/v7/estransport
 github.com/elastic/go-elasticsearch/v7/internal/version
 # github.com/emicklei/go-restful v2.14.3+incompatible => github.com/emicklei/go-restful v2.14.3+incompatible
 github.com/emicklei/go-restful
+github.com/emicklei/go-restful/log
 # github.com/emicklei/go-restful-openapi v1.4.1 => github.com/emicklei/go-restful-openapi v1.4.1
 github.com/emicklei/go-restful-openapi
-github.com/emicklei/go-restful/log
 # github.com/emirpasic/gods v1.12.0 => github.com/emirpasic/gods v1.12.0
 github.com/emirpasic/gods/containers
 github.com/emirpasic/gods/lists
@@ -262,7 +264,9 @@ github.com/golang/protobuf/ptypes/empty
 github.com/golang/protobuf/ptypes/struct
 github.com/golang/protobuf/ptypes/timestamp
 github.com/golang/protobuf/ptypes/wrappers
-# github.com/google/go-cmp v0.4.0 => github.com/google/go-cmp v0.4.0
+# github.com/golang/snappy v0.0.1 => github.com/golang/snappy v0.0.1
+github.com/golang/snappy
+# github.com/google/go-cmp v0.5.0 => github.com/google/go-cmp v0.4.0
 github.com/google/go-cmp/cmp
 github.com/google/go-cmp/cmp/internal/diff
 github.com/google/go-cmp/cmp/internal/flags
@@ -367,6 +371,8 @@ github.com/nxadm/tail/ratelimiter
 github.com/nxadm/tail/util
 github.com/nxadm/tail/watch
 github.com/nxadm/tail/winfile
+# github.com/oklog/ulid v1.3.1 => github.com/oklog/ulid v1.3.1
+github.com/oklog/ulid
 # github.com/onsi/ginkgo v1.14.0 => github.com/onsi/ginkgo v1.14.0
 github.com/onsi/ginkgo
 github.com/onsi/ginkgo/config
@@ -442,6 +448,10 @@ github.com/opencontainers/go-digest
 # github.com/opencontainers/image-spec v1.0.1 => github.com/opencontainers/image-spec v1.0.1
 github.com/opencontainers/image-spec/specs-go
 github.com/opencontainers/image-spec/specs-go/v1
+# github.com/opentracing/opentracing-go v1.2.0 => github.com/opentracing/opentracing-go v1.1.0
+github.com/opentracing/opentracing-go
+github.com/opentracing/opentracing-go/ext
+github.com/opentracing/opentracing-go/log
 # github.com/patrickmn/go-cache v2.1.0+incompatible => github.com/patrickmn/go-cache v2.1.0+incompatible
 github.com/patrickmn/go-cache
 # github.com/pelletier/go-buffruneio v0.2.0 => github.com/pelletier/go-buffruneio v0.2.0
@@ -475,7 +485,21 @@ github.com/projectcalico/libcalico-go/lib/selector/tokenizer
 github.com/projectcalico/libcalico-go/lib/set
 # github.com/prometheus-community/prom-label-proxy v0.2.0 => github.com/prometheus-community/prom-label-proxy v0.2.0
 github.com/prometheus-community/prom-label-proxy/injectproxy
-# github.com/prometheus/alertmanager v0.20.0 => github.com/prometheus/alertmanager v0.20.0
+# github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a
+github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions
+github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/internalinterfaces
+github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring
+github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1
+github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1
+github.com/prometheus-operator/prometheus-operator/pkg/client/versioned
+github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake
+github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme
+github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1
+github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/fake
+# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1
+# github.com/prometheus/alertmanager v0.21.0 => github.com/prometheus/alertmanager v0.20.0
 github.com/prometheus/alertmanager/api/v2/client
 github.com/prometheus/alertmanager/api/v2/client/alert
 github.com/prometheus/alertmanager/api/v2/client/alertgroup
@@ -494,7 +518,7 @@ github.com/prometheus/client_golang/prometheus/testutil
 github.com/prometheus/client_golang/prometheus/testutil/promlint
 # github.com/prometheus/client_model v0.2.0 => github.com/prometheus/client_model v0.2.0
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.10.0 => github.com/prometheus/common v0.10.0
+# github.com/prometheus/common v0.11.1 => github.com/prometheus/common v0.10.0
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
 github.com/prometheus/common/log
@@ -503,26 +527,41 @@ github.com/prometheus/common/model
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1 => github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1
+# github.com/prometheus/prometheus v1.8.2-0.20200907175821-8219b442c864 => github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1
+github.com/prometheus/prometheus/pkg/exemplar
 github.com/prometheus/prometheus/pkg/labels
+github.com/prometheus/prometheus/pkg/rulefmt
+github.com/prometheus/prometheus/pkg/textparse
+github.com/prometheus/prometheus/pkg/timestamp
 github.com/prometheus/prometheus/pkg/value
+github.com/prometheus/prometheus/promql
 github.com/prometheus/prometheus/promql/parser
+github.com/prometheus/prometheus/rules
 github.com/prometheus/prometheus/storage
+github.com/prometheus/prometheus/template
+github.com/prometheus/prometheus/tsdb
 github.com/prometheus/prometheus/tsdb/chunkenc
 github.com/prometheus/prometheus/tsdb/chunks
 github.com/prometheus/prometheus/tsdb/encoding
 github.com/prometheus/prometheus/tsdb/errors
 github.com/prometheus/prometheus/tsdb/fileutil
+github.com/prometheus/prometheus/tsdb/goversion
+github.com/prometheus/prometheus/tsdb/index
+github.com/prometheus/prometheus/tsdb/record
 github.com/prometheus/prometheus/tsdb/tombstones
 github.com/prometheus/prometheus/tsdb/tsdbutil
+github.com/prometheus/prometheus/tsdb/wal
+github.com/prometheus/prometheus/util/stats
 github.com/prometheus/prometheus/util/strutil
+github.com/prometheus/prometheus/util/teststorage
+github.com/prometheus/prometheus/util/testutil
 # github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a => github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
 github.com/rcrowley/go-metrics
 # github.com/sergi/go-diff v1.0.0 => github.com/sergi/go-diff v1.0.0
 github.com/sergi/go-diff/diffmatchpatch
-# github.com/sirupsen/logrus v1.4.2 => github.com/sirupsen/logrus v1.4.2
+# github.com/sirupsen/logrus v1.6.0 => github.com/sirupsen/logrus v1.4.2
 github.com/sirupsen/logrus
-# github.com/sony/sonyflake v1.0.0 => github.com/sony/sonyflake v1.0.0
+# github.com/sony/sonyflake v1.0.0 => github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009
 github.com/sony/sonyflake
 # github.com/speps/go-hashids v2.0.0+incompatible => github.com/speps/go-hashids v2.0.0+incompatible
 github.com/speps/go-hashids
@@ -531,7 +570,7 @@ github.com/spf13/afero
 github.com/spf13/afero/mem
 # github.com/spf13/cast v1.3.0 => github.com/spf13/cast v1.3.0
 github.com/spf13/cast
-# github.com/spf13/cobra v1.0.0 => github.com/spf13/cobra v1.0.0
+# github.com/spf13/cobra v1.0.0 => github.com/spf13/cobra v0.0.5
 github.com/spf13/cobra
 # github.com/spf13/jwalterweatherman v1.0.0 => github.com/spf13/jwalterweatherman v1.0.0
 github.com/spf13/jwalterweatherman
@@ -544,8 +583,23 @@ github.com/src-d/gcfg
 github.com/src-d/gcfg/scanner
 github.com/src-d/gcfg/token
 github.com/src-d/gcfg/types
-# github.com/stretchr/testify v1.6.1 => github.com/stretchr/testify v1.6.1
+# github.com/stretchr/testify v1.6.1 => github.com/stretchr/testify v1.4.0
 github.com/stretchr/testify/assert
+# github.com/uber/jaeger-client-go v2.25.0+incompatible => github.com/uber/jaeger-client-go v2.23.0+incompatible
+github.com/uber/jaeger-client-go
+github.com/uber/jaeger-client-go/internal/baggage
+github.com/uber/jaeger-client-go/internal/reporterstats
+github.com/uber/jaeger-client-go/internal/spanlog
+github.com/uber/jaeger-client-go/internal/throttler
+github.com/uber/jaeger-client-go/log
+github.com/uber/jaeger-client-go/thrift
+github.com/uber/jaeger-client-go/thrift-gen/agent
+github.com/uber/jaeger-client-go/thrift-gen/jaeger
+github.com/uber/jaeger-client-go/thrift-gen/sampling
+github.com/uber/jaeger-client-go/thrift-gen/zipkincore
+github.com/uber/jaeger-client-go/utils
+# github.com/uber/jaeger-lib v2.2.0+incompatible => github.com/uber/jaeger-lib v2.2.0+incompatible
+github.com/uber/jaeger-lib/metrics
 # github.com/xanzy/ssh-agent v0.2.1 => github.com/xanzy/ssh-agent v0.2.1
 github.com/xanzy/ssh-agent
 # github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b => github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b
@@ -581,18 +635,18 @@ go.mongodb.org/mongo-driver/bson/primitive
 go.mongodb.org/mongo-driver/x/bsonx/bsoncore
 # go.uber.org/atomic v1.6.0 => go.uber.org/atomic v1.6.0
 go.uber.org/atomic
-# go.uber.org/multierr v1.3.0 => go.uber.org/multierr v1.3.0
+# go.uber.org/multierr v1.4.0 => go.uber.org/multierr v1.3.0
 go.uber.org/multierr
 # go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee => go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee
 go.uber.org/tools/update-license
-# go.uber.org/zap v1.13.0 => go.uber.org/zap v1.13.0
+# go.uber.org/zap v1.14.1 => go.uber.org/zap v1.13.0
 go.uber.org/zap
 go.uber.org/zap/buffer
 go.uber.org/zap/internal/bufferpool
 go.uber.org/zap/internal/color
 go.uber.org/zap/internal/exit
 go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.0.0-20200422194213-44a606286825 => golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+# golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de => golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
 golang.org/x/crypto/bcrypt
 golang.org/x/crypto/blowfish
 golang.org/x/crypto/cast5
@@ -619,7 +673,7 @@ golang.org/x/crypto/ssh/terminal
 # golang.org/x/lint v0.0.0-20200302205851-738671d3881b => golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f
 golang.org/x/lint
 golang.org/x/lint/golint
-# golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 => golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+# golang.org/x/net v0.0.0-20200707034311-ab3426394381 => golang.org/x/net v0.0.0-20190620200207-3b0461eec859
 golang.org/x/net/context
 golang.org/x/net/context/ctxhttp
 golang.org/x/net/html
@@ -637,9 +691,10 @@ golang.org/x/net/websocket
 # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a => golang.org/x/sync v0.0.0-20190423024810-112230192c58
+# golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 => golang.org/x/sync v0.0.0-20190423024810-112230192c58
+golang.org/x/sync/errgroup
 golang.org/x/sync/singleflight
-# golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
+# golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
 golang.org/x/sys/cpu
 golang.org/x/sys/unix
 golang.org/x/sys/windows
@@ -665,7 +720,7 @@ golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
 golang.org/x/text/width
-# golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
+# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
 golang.org/x/time/rate
 # golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 => golang.org/x/tools v0.0.0-20190710153321-831012c29e42
 golang.org/x/tools/go/analysis
@@ -700,12 +755,12 @@ google.golang.org/appengine/internal/log
 google.golang.org/appengine/internal/remote_api
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb => google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb
+# google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 => google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb
 google.golang.org/genproto/googleapis/api/annotations
 google.golang.org/genproto/googleapis/api/httpbody
 google.golang.org/genproto/googleapis/rpc/status
 google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/grpc v1.29.0 => google.golang.org/grpc v1.26.0
+# google.golang.org/grpc v1.30.0 => google.golang.org/grpc v1.26.0
 google.golang.org/grpc
 google.golang.org/grpc/attributes
 google.golang.org/grpc/backoff
@@ -847,7 +902,7 @@ gopkg.in/tomb.v1
 gopkg.in/warnings.v0
 # gopkg.in/yaml.v2 v2.3.0 => gopkg.in/yaml.v2 v2.3.0
 gopkg.in/yaml.v2
-# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
+# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
 gopkg.in/yaml.v3
 # gotest.tools v2.2.0+incompatible => gotest.tools v2.2.0+incompatible
 gotest.tools/assert
@@ -1175,7 +1230,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
 k8s.io/apiserver/plugin/pkg/authorizer/webhook
 # k8s.io/cli-runtime v0.18.6 => k8s.io/cli-runtime v0.18.6
 k8s.io/cli-runtime/pkg/printers
-# k8s.io/client-go v0.19.0 => k8s.io/client-go v0.18.6
+# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.18.6
 k8s.io/client-go/discovery
 k8s.io/client-go/discovery/fake
 k8s.io/client-go/dynamic