提交 e9e399de 编写于 作者: 7 710leo

Update vendor

上级 99bd1089
此差异已折叠。
......@@ -8,8 +8,8 @@ import (
"github.com/didi/nightingale/src/modules/transfer/cache"
"github.com/didi/nightingale/src/toolkits/stats"
"github.com/didi/nightingale/src/toolkits/str"
"github.com/influxdata/influxdb/client/v2"
client "github.com/influxdata/influxdb/client/v2"
"github.com/toolkits/pkg/concurrent/semaphore"
"github.com/toolkits/pkg/container/list"
"github.com/toolkits/pkg/logger"
......@@ -308,14 +308,14 @@ func (c *InfluxClient) Send(items []*dataobj.InfluxdbItem) error {
Precision: c.Precision,
})
if err != nil {
logger.Errorf("create batch points error: ", err)
logger.Error("create batch points error: ", err)
return err
}
for _, item := range items {
pt, err := client.NewPoint(item.Measurement, item.Tags, item.Fields, time.Unix(item.Timestamp, 0))
if err != nil {
logger.Errorf("create new points error: ", err)
logger.Error("create new points error: ", err)
continue
}
bp.AddPoint(pt)
......
The MIT License (MIT)
Copyright (c) 2013-2018 InfluxData Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package client
import (
"encoding/json"
"time"
)
type (
// Identifier is an identifier value.
Identifier string
// StringValue is a string literal.
StringValue string
// RegexValue is a regexp literal.
RegexValue string
// NumberValue is a number literal.
NumberValue float64
// IntegerValue is an integer literal.
IntegerValue int64
// BooleanValue is a boolean literal.
BooleanValue bool
// TimeValue is a time literal.
TimeValue time.Time
// DurationValue is a duration literal.
DurationValue time.Duration
)
func (v Identifier) MarshalJSON() ([]byte, error) {
m := map[string]string{"identifier": string(v)}
return json.Marshal(m)
}
func (v StringValue) MarshalJSON() ([]byte, error) {
m := map[string]string{"string": string(v)}
return json.Marshal(m)
}
func (v RegexValue) MarshalJSON() ([]byte, error) {
m := map[string]string{"regex": string(v)}
return json.Marshal(m)
}
func (v NumberValue) MarshalJSON() ([]byte, error) {
m := map[string]float64{"number": float64(v)}
return json.Marshal(m)
}
func (v IntegerValue) MarshalJSON() ([]byte, error) {
m := map[string]int64{"integer": int64(v)}
return json.Marshal(m)
}
func (v BooleanValue) MarshalJSON() ([]byte, error) {
m := map[string]bool{"boolean": bool(v)}
return json.Marshal(m)
}
func (v TimeValue) MarshalJSON() ([]byte, error) {
t := time.Time(v)
m := map[string]string{"string": t.Format(time.RFC3339Nano)}
return json.Marshal(m)
}
func (v DurationValue) MarshalJSON() ([]byte, error) {
m := map[string]int64{"duration": int64(v)}
return json.Marshal(m)
}
package client
import (
"context"
"fmt"
"io"
"net"
"time"
)
const (
// UDPPayloadSize is a reasonable default payload size for UDP packets that
// could be travelling over the internet.
UDPPayloadSize = 512
)
// UDPConfig is the config data needed to create a UDP Client.
type UDPConfig struct {
// Addr should be of the form "host:port"
// or "[ipv6-host%zone]:port".
Addr string
// PayloadSize is the maximum size of a UDP client message, optional
// Tune this based on your network. Defaults to UDPPayloadSize.
PayloadSize int
}
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
// service from the given config.
func NewUDPClient(conf UDPConfig) (Client, error) {
var udpAddr *net.UDPAddr
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, err
}
payloadSize := conf.PayloadSize
if payloadSize == 0 {
payloadSize = UDPPayloadSize
}
return &udpclient{
conn: conn,
payloadSize: payloadSize,
}, nil
}
// Close releases the udpclient's resources.
func (uc *udpclient) Close() error {
return uc.conn.Close()
}
type udpclient struct {
conn io.WriteCloser
payloadSize int
}
func (uc *udpclient) Write(bp BatchPoints) error {
var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
var d, _ = time.ParseDuration("1" + bp.Precision())
var delayedError error
var checkBuffer = func(n int) {
if len(b) > 0 && len(b)+n > uc.payloadSize {
if _, err := uc.conn.Write(b); err != nil {
delayedError = err
}
b = b[:0]
}
}
for _, p := range bp.Points() {
p.pt.Round(d)
pointSize := p.pt.StringSize() + 1 // include newline in size
//point := p.pt.RoundedString(d) + "\n"
checkBuffer(pointSize)
if p.Time().IsZero() || pointSize <= uc.payloadSize {
b = p.pt.AppendString(b)
b = append(b, '\n')
continue
}
points := p.pt.Split(uc.payloadSize - 1) // account for newline character
for _, sp := range points {
checkBuffer(sp.StringSize() + 1)
b = sp.AppendString(b)
b = append(b, '\n')
}
}
if len(b) > 0 {
if _, err := uc.conn.Write(b); err != nil {
return err
}
}
return delayedError
}
func (uc *udpclient) Query(q Query) (*Response, error) {
return nil, fmt.Errorf("querying via UDP is not supported")
}
func (uc *udpclient) QueryCtx(ctx context.Context, q Query) (*Response, error) {
return nil, fmt.Errorf("querying via UDP is not supported")
}
func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) {
return nil, fmt.Errorf("querying via UDP is not supported")
}
func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
return 0, "", nil
}
package models
import (
"errors"
"strings"
)
// ConsistencyLevel represent a required replication criteria before a write can
// be returned as successful.
//
// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
type ConsistencyLevel int
const (
// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
ConsistencyLevelAny ConsistencyLevel = iota
// ConsistencyLevelOne requires at least one data node acknowledged a write.
ConsistencyLevelOne
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
ConsistencyLevelQuorum
// ConsistencyLevelAll requires all data nodes to acknowledge a write.
ConsistencyLevelAll
)
var (
// ErrInvalidConsistencyLevel is returned when parsing the string version
// of a consistency level.
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
)
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
switch strings.ToLower(level) {
case "any":
return ConsistencyLevelAny, nil
case "one":
return ConsistencyLevelOne, nil
case "quorum":
return ConsistencyLevelQuorum, nil
case "all":
return ConsistencyLevelAll, nil
default:
return 0, ErrInvalidConsistencyLevel
}
}
// Code generated by "stringer -type=FieldType"; DO NOT EDIT.
package models
import "strconv"
const _FieldType_name = "IntegerFloatBooleanStringEmptyUnsigned"
var _FieldType_index = [...]uint8{0, 7, 12, 19, 25, 30, 38}
func (i FieldType) String() string {
if i < 0 || i >= FieldType(len(_FieldType_index)-1) {
return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]]
}
package models
//go:generate stringer -type=FieldType
package models // import "github.com/influxdata/influxdb/models"
// from stdlib hash/fnv/fnv.go
const (
prime64 = 1099511628211
offset64 = 14695981039346656037
)
// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
type InlineFNV64a uint64
// NewInlineFNV64a returns a new instance of InlineFNV64a.
func NewInlineFNV64a() InlineFNV64a {
return offset64
}
// Write adds data to the running hash.
func (s *InlineFNV64a) Write(data []byte) (int, error) {
hash := uint64(*s)
for _, c := range data {
hash ^= uint64(c)
hash *= prime64
}
*s = InlineFNV64a(hash)
return len(data), nil
}
// Sum64 returns the uint64 of the current resulting hash.
func (s *InlineFNV64a) Sum64() uint64 {
return uint64(*s)
}
package models // import "github.com/influxdata/influxdb/models"
import (
"reflect"
"strconv"
"unsafe"
)
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseUint(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b)
return strconv.ParseFloat(s, bitSize)
}
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
func parseBoolBytes(b []byte) (bool, error) {
return strconv.ParseBool(unsafeBytesToString(b))
}
// unsafeBytesToString converts a []byte to a string without a heap allocation.
//
// It is unsafe, and is intended to prepare input to short-lived functions
// that require strings.
func unsafeBytesToString(in []byte) string {
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
s := *(*string)(unsafe.Pointer(&dst))
return s
}
此差异已折叠。
package models
import (
"sort"
)
// Row represents a single row returned from the execution of a statement.
type Row struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Columns []string `json:"columns,omitempty"`
Values [][]interface{} `json:"values,omitempty"`
Partial bool `json:"partial,omitempty"`
}
// SameSeries returns true if r contains values for the same series as o.
func (r *Row) SameSeries(o *Row) bool {
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
}
// tagsHash returns a hash of tag key/value pairs.
func (r *Row) tagsHash() uint64 {
h := NewInlineFNV64a()
keys := r.tagsKeys()
for _, k := range keys {
h.Write([]byte(k))
h.Write([]byte(r.Tags[k]))
}
return h.Sum64()
}
// tagKeys returns a sorted list of tag keys.
func (r *Row) tagsKeys() []string {
a := make([]string, 0, len(r.Tags))
for k := range r.Tags {
a = append(a, k)
}
sort.Strings(a)
return a
}
// Rows represents a collection of rows. Rows implements sort.Interface.
type Rows []*Row
// Len implements sort.Interface.
func (p Rows) Len() int { return len(p) }
// Less implements sort.Interface.
func (p Rows) Less(i, j int) bool {
// Sort by name first.
if p[i].Name != p[j].Name {
return p[i].Name < p[j].Name
}
// Sort by tag set hash. Tags don't have a meaningful sort order so we
// just compute a hash and sort by that instead. This allows the tests
// to receive rows in a predictable order every time.
return p[i].tagsHash() < p[j].tagsHash()
}
// Swap implements sort.Interface.
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
package models
// Statistic is the representation of a statistic used by the monitoring service.
type Statistic struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Values map[string]interface{} `json:"values"`
}
// NewStatistic returns an initialized Statistic.
func NewStatistic(name string) Statistic {
return Statistic{
Name: name,
Tags: make(map[string]string),
Values: make(map[string]interface{}),
}
}
// StatisticTags is a map that can be merged with others without causing
// mutations to either map.
type StatisticTags map[string]string
// Merge creates a new map containing the merged contents of tags and t.
// If both tags and the receiver map contain the same key, the value in tags
// is used in the resulting map.
//
// Merge always returns a usable map.
func (t StatisticTags) Merge(tags map[string]string) map[string]string {
// Add everything in tags to the result.
out := make(map[string]string, len(tags))
for k, v := range tags {
out[k] = v
}
// Only add values from t that don't appear in tags.
for k, v := range t {
if _, ok := tags[k]; !ok {
out[k] = v
}
}
return out
}
package models
import (
"bytes"
"strings"
)
// TagKeysSet provides set operations for combining Tags.
type TagKeysSet struct {
i int
keys [2][][]byte
tmp [][]byte
}
// Clear removes all the elements of TagKeysSet and ensures all internal
// buffers are reset.
func (set *TagKeysSet) Clear() {
set.clear(set.keys[0])
set.clear(set.keys[1])
set.clear(set.tmp)
set.i = 0
set.keys[0] = set.keys[0][:0]
}
func (set *TagKeysSet) clear(b [][]byte) {
b = b[:cap(b)]
for i := range b {
b[i] = nil
}
}
// KeysBytes returns the merged keys in lexicographical order.
// The slice is valid until the next call to UnionKeys, UnionBytes or Reset.
func (set *TagKeysSet) KeysBytes() [][]byte {
return set.keys[set.i&1]
}
// Keys returns a copy of the merged keys in lexicographical order.
func (set *TagKeysSet) Keys() []string {
keys := set.KeysBytes()
s := make([]string, 0, len(keys))
for i := range keys {
s = append(s, string(keys[i]))
}
return s
}
func (set *TagKeysSet) String() string {
var s []string
for _, k := range set.KeysBytes() {
s = append(s, string(k))
}
return strings.Join(s, ",")
}
// IsSupersetKeys returns true if the TagKeysSet is a superset of all the keys
// contained in other.
func (set *TagKeysSet) IsSupersetKeys(other Tags) bool {
keys := set.keys[set.i&1]
i, j := 0, 0
for i < len(keys) && j < len(other) {
if cmp := bytes.Compare(keys[i], other[j].Key); cmp > 0 {
return false
} else if cmp == 0 {
j++
}
i++
}
return j == len(other)
}
// IsSupersetBytes returns true if the TagKeysSet is a superset of all the keys
// in other.
// Other must be lexicographically sorted or the results are undefined.
func (set *TagKeysSet) IsSupersetBytes(other [][]byte) bool {
keys := set.keys[set.i&1]
i, j := 0, 0
for i < len(keys) && j < len(other) {
if cmp := bytes.Compare(keys[i], other[j]); cmp > 0 {
return false
} else if cmp == 0 {
j++
}
i++
}
return j == len(other)
}
// UnionKeys updates the set so that it is the union of itself and all the
// keys contained in other.
func (set *TagKeysSet) UnionKeys(other Tags) {
if set.IsSupersetKeys(other) {
return
}
if l := len(other); cap(set.tmp) < l {
set.tmp = make([][]byte, l)
} else {
set.tmp = set.tmp[:l]
}
for i := range other {
set.tmp[i] = other[i].Key
}
set.merge(set.tmp)
}
// UnionBytes updates the set so that it is the union of itself and all the
// keys contained in other.
// Other must be lexicographically sorted or the results are undefined.
func (set *TagKeysSet) UnionBytes(other [][]byte) {
if set.IsSupersetBytes(other) {
return
}
set.merge(other)
}
func (set *TagKeysSet) merge(in [][]byte) {
keys := set.keys[set.i&1]
l := len(keys) + len(in)
set.i = (set.i + 1) & 1
keya := set.keys[set.i&1]
if cap(keya) < l {
keya = make([][]byte, 0, l)
} else {
keya = keya[:0]
}
i, j := 0, 0
for i < len(keys) && j < len(in) {
ki, kj := keys[i], in[j]
if cmp := bytes.Compare(ki, kj); cmp < 0 {
i++
} else if cmp > 0 {
ki = kj
j++
} else {
i++
j++
}
keya = append(keya, ki)
}
if i < len(keys) {
keya = append(keya, keys[i:]...)
} else if j < len(in) {
keya = append(keya, in[j:]...)
}
set.keys[set.i&1] = keya
}
package models
// Helper time methods since parsing time can easily overflow and we only support a
// specific time range.
import (
"fmt"
"math"
"time"
)
const (
// MinNanoTime is the minimum time that can be represented.
//
// 1677-09-21 00:12:43.145224194 +0000 UTC
//
// The two lowest minimum integers are used as sentinel values. The
// minimum value needs to be used as a value lower than any other value for
// comparisons and another separate value is needed to act as a sentinel
// default value that is unusable by the user, but usable internally.
// Because these two values need to be used for a special purpose, we do
// not allow users to write points at these two times.
MinNanoTime = int64(math.MinInt64) + 2
// MaxNanoTime is the maximum time that can be represented.
//
// 2262-04-11 23:47:16.854775806 +0000 UTC
//
// The highest time represented by a nanosecond needs to be used for an
// exclusive range in the shard group, so the maximum time needs to be one
// less than the possible maximum number of nanoseconds representable by an
// int64 so that we don't lose a point at that one time.
MaxNanoTime = int64(math.MaxInt64) - 1
)
var (
minNanoTime = time.Unix(0, MinNanoTime).UTC()
maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
)
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
// supported range.
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
mult := GetPrecisionMultiplier(precision)
if t, ok := safeSignedMult(timestamp, mult); ok {
tme := time.Unix(0, t).UTC()
return tme, CheckTime(tme)
}
return time.Time{}, ErrTimeOutOfRange
}
// CheckTime checks that a time is within the safe range.
func CheckTime(t time.Time) error {
if t.Before(minNanoTime) || t.After(maxNanoTime) {
return ErrTimeOutOfRange
}
return nil
}
// Perform the multiplication and check to make sure it didn't overflow.
func safeSignedMult(a, b int64) (int64, bool) {
if a == 0 || b == 0 || a == 1 || b == 1 {
return a * b, true
}
if a == MinNanoTime || b == MaxNanoTime {
return 0, false
}
c := a * b
return c, c/b == a
}
// +build uint uint64
package models
func init() {
EnableUintSupport()
}
// Package escape contains utilities for escaping parts of InfluxQL
// and InfluxDB line protocol.
package escape // import "github.com/influxdata/influxdb/pkg/escape"
import (
"bytes"
"strings"
)
// Codes is a map of bytes to be escaped.
var Codes = map[byte][]byte{
',': []byte(`\,`),
'"': []byte(`\"`),
' ': []byte(`\ `),
'=': []byte(`\=`),
}
// Bytes escapes characters on the input slice, as defined by Codes.
func Bytes(in []byte) []byte {
for b, esc := range Codes {
in = bytes.Replace(in, []byte{b}, esc, -1)
}
return in
}
const escapeChars = `," =`
// IsEscaped returns whether b has any escaped characters,
// i.e. whether b seems to have been processed by Bytes.
func IsEscaped(b []byte) bool {
for len(b) > 0 {
i := bytes.IndexByte(b, '\\')
if i < 0 {
return false
}
if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
return true
}
b = b[i+1:]
}
return false
}
// AppendUnescaped appends the unescaped version of src to dst
// and returns the resulting slice.
func AppendUnescaped(dst, src []byte) []byte {
var pos int
for len(src) > 0 {
next := bytes.IndexByte(src[pos:], '\\')
if next < 0 || pos+next+1 >= len(src) {
return append(dst, src...)
}
if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
if pos+next > 0 {
dst = append(dst, src[:pos+next]...)
}
src = src[pos+next+1:]
pos = 0
} else {
pos += next + 1
}
}
return dst
}
// Unescape returns a new slice containing the unescaped version of in.
func Unescape(in []byte) []byte {
if len(in) == 0 {
return nil
}
if bytes.IndexByte(in, '\\') == -1 {
return in
}
i := 0
inLen := len(in)
// The output size will be no more than inLen. Preallocating the
// capacity of the output is faster and uses less memory than
// letting append() do its own (over)allocation.
out := make([]byte, 0, inLen)
for {
if i >= inLen {
break
}
if in[i] == '\\' && i+1 < inLen {
switch in[i+1] {
case ',':
out = append(out, ',')
i += 2
continue
case '"':
out = append(out, '"')
i += 2
continue
case ' ':
out = append(out, ' ')
i += 2
continue
case '=':
out = append(out, '=')
i += 2
continue
}
}
out = append(out, in[i])
i += 1
}
return out
}
package escape
import "strings"
var (
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
)
// UnescapeString returns unescaped version of in.
func UnescapeString(in string) string {
if strings.IndexByte(in, '\\') == -1 {
return in
}
return unescaper.Replace(in)
}
// String returns the escaped version of in.
func String(in string) string {
return escaper.Replace(in)
}
......@@ -78,8 +78,8 @@ type SpanningTransformer interface {
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be be returned
// regardless of whether atEOF is true. If err is nil, then then n must
// output produced by the Transformer. A nil error can be returned
// regardless of whether atEOF is true. If err is nil, then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
......@@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
return dstL.n, srcL.p, err
}
// Deprecated: use runes.Remove instead.
// Deprecated: Use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
......
......@@ -461,6 +461,10 @@ func (rb *reorderBuffer) combineHangul(s, i, k int) {
// It should only be used to recompose a single segment, as it will not
// handle alternations between Hangul and non-Hangul characters correctly.
func (rb *reorderBuffer) compose() {
// Lazily load the map used by the combine func below, but do
// it outside of the loop.
recompMapOnce.Do(buildRecompMap)
// UAX #15, section X5 , including Corrigendum #5
// "In any character sequence beginning with starter S, a character C is
// blocked from S if and only if there is some character B between S
......
......@@ -199,9 +199,14 @@ func buildRecompMap() {
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
//
// The caller is responsible for calling
// recompMapOnce.Do(buildRecompMap) sometime before this is called.
func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
recompMapOnce.Do(buildRecompMap)
if recompMap == nil {
panic("caller error") // see func comment
}
return recompMap[key]
}
......
......@@ -128,8 +128,9 @@ func (i *Iter) Next() []byte {
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[i.p:p]
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
......
......@@ -60,8 +60,8 @@ func (w *normWriter) Close() error {
}
// Writer returns a new writer that implements Write(b)
// by writing f(b) to w. The returned writer may use an
// an internal buffer to maintain state across Write calls.
// by writing f(b) to w. The returned writer may use an
// internal buffer to maintain state across Write calls.
// Calling its Close method writes any buffered data to w.
func (f Form) Writer(w io.Writer) io.WriteCloser {
wr := &normWriter{rb: reorderBuffer{}, w: w}
......
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.10
// +build go1.10,!go1.13
package norm
......
此差异已折叠。
......@@ -18,7 +18,6 @@ func (Form) Reset() {}
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
// least of size MaxTransformChunkSize to be guaranteed of progress.
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := 0
// Cap the maximum number of src bytes to check.
b := src
eof := atEOF
......@@ -27,13 +26,14 @@ func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
eof = false
b = b[:ns]
}
i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof)
n += copy(dst[n:], b[n:i])
i, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), eof)
n := copy(dst, b[:i])
if !ok {
nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF)
return nDst + n, nSrc + n, err
}
if n < len(src) && !atEOF {
if err == nil && n < len(src) && !atEOF {
err = transform.ErrShortSrc
}
return n, n, err
......@@ -79,7 +79,7 @@ func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
nSrc += n
nDst += n
if ok {
if n < rb.nsrc && !atEOF {
if err == nil && n < rb.nsrc && !atEOF {
err = transform.ErrShortSrc
}
return nDst, nSrc, err
......
......@@ -3,6 +3,7 @@
The analysis package defines the interface between a modular static
analysis and an analysis driver program.
Background
A static analysis is a function that inspects a package of Go code and
......@@ -41,9 +42,9 @@ the go/analysis/passes/ subdirectory:
package unusedresult
var Analyzer = &analysis.Analyzer{
Name: "unusedresult",
Doc: "check for unused results of calls to some functions",
Run: run,
Name: "unusedresult",
Doc: "check for unused results of calls to some functions",
Run: run,
...
}
......@@ -51,7 +52,6 @@ the go/analysis/passes/ subdirectory:
...
}
An analysis driver is a program such as vet that runs a set of
analyses and prints the diagnostics that they report.
The driver program must import the list of Analyzers it needs.
......@@ -107,14 +107,14 @@ multiple analyzers. It is based on the multichecker package
The Analyzer type has more fields besides those shown above:
type Analyzer struct {
Name string
Doc string
Flags flag.FlagSet
Run func(*Pass) (interface{}, error)
RunDespiteErrors bool
ResultType reflect.Type
Requires []*Analyzer
FactTypes []Fact
Name string
Doc string
Flags flag.FlagSet
Run func(*Pass) (interface{}, error)
RunDespiteErrors bool
ResultType reflect.Type
Requires []*Analyzer
FactTypes []Fact
}
The Flags field declares a set of named (global) flag variables that
......@@ -154,13 +154,13 @@ package being analyzed, and provides operations to the Run function for
reporting diagnostics and other information back to the driver.
type Pass struct {
Fset *token.FileSet
Files []*ast.File
OtherFiles []string
Pkg *types.Package
TypesInfo *types.Info
ResultOf map[*Analyzer]interface{}
Report func(Diagnostic)
Fset *token.FileSet
Files []*ast.File
OtherFiles []string
Pkg *types.Package
TypesInfo *types.Info
ResultOf map[*Analyzer]interface{}
Report func(Diagnostic)
...
}
......@@ -245,7 +245,7 @@ package.
An Analyzer that uses facts must declare their types:
var Analyzer = &analysis.Analyzer{
Name: "printf",
Name: "printf",
FactTypes: []analysis.Fact{new(isWrapper)},
...
}
......@@ -330,7 +330,5 @@ entirety as:
A tool that provides multiple analyzers can use multichecker in a
similar way, giving it the list of Analyzers.
*/
package analysis
......@@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
reported about the loaded packages, with each mode returning all the data of the
previous mode with some extra added. See the documentation for type LoadMode
reported about the loaded packages. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
......
......@@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver {
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = stderr
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
}
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
}
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
}
var response driverResponse
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
return nil, err
......
......@@ -26,7 +26,6 @@ import (
"golang.org/x/tools/go/internal/packagesdriver"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/semver"
"golang.org/x/tools/internal/span"
)
// debug controls verbose logging.
......@@ -254,12 +253,7 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu
if len(pkgs) == 0 {
return nil
}
drivercfg := *cfg
if getGoInfo().env.modulesOn {
drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly")
}
dr, err := driver(&drivercfg, pkgs...)
dr, err := driver(cfg, pkgs...)
if err != nil {
return err
}
......@@ -284,42 +278,43 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
}
dirResponse, err := driver(cfg, pattern)
if err != nil {
if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) {
// There was an error loading the package. Try to load the file as an ad-hoc package.
// Usually the error will appear in a returned package, but may not if we're in modules mode
// and the ad-hoc is located outside a module.
var queryErr error
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
return err // return the original error
dirResponse, queryErr = driver(cfg, query)
if queryErr != nil {
// Return the original error if the attempt to fall back failed.
return err
}
}
// `go list` can report errors for files that are not listed as part of a package's GoFiles.
// In the case of an invalid Go file, we should assume that it is part of package if only
// one package is in the response. The file may have valid contents in an overlay.
if len(dirResponse.Packages) == 1 {
pkg := dirResponse.Packages[0]
for i, err := range pkg.Errors {
s := errorSpan(err)
if !s.IsValid() {
break
}
if len(pkg.CompiledGoFiles) == 0 {
break
}
dir := filepath.Dir(pkg.CompiledGoFiles[0])
filename := filepath.Join(dir, filepath.Base(s.URI().Filename()))
if info, err := os.Stat(filename); err != nil || info.IsDir() {
break
}
if !contains(pkg.CompiledGoFiles, filename) {
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
pkg.GoFiles = append(pkg.GoFiles, filename)
pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...)
}
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
if len(dirResponse.Packages) == 0 && queryErr == nil {
dirResponse.Packages = append(dirResponse.Packages, &Package{
ID: "command-line-arguments",
PkgPath: query,
GoFiles: []string{query},
CompiledGoFiles: []string{query},
Imports: make(map[string]*Package),
})
dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
}
}
// A final attempt to construct an ad-hoc package.
if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 {
var queryErr error
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
return err // return the original error
// Special case to handle issue #33482:
// If this is a file= query for ad-hoc packages where the file only exists on an overlay,
// and exists outside of a module, add the file in for the package.
if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" ||
filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) {
if len(dirResponse.Packages[0].GoFiles) == 0 {
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
// TODO(matloob): check if the file is outside of a root dir?
for path := range cfg.Overlay {
if path == filename {
dirResponse.Packages[0].Errors = nil
dirResponse.Packages[0].GoFiles = []string{path}
dirResponse.Packages[0].CompiledGoFiles = []string{path}
}
}
}
}
}
isRoot := make(map[string]bool, len(dirResponse.Roots))
......@@ -347,75 +342,6 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
return nil
}
// adHocPackage attempts to construct an ad-hoc package given a query that failed.
func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) {
// There was an error loading the package. Try to load the file as an ad-hoc package.
// Usually the error will appear in a returned package, but may not if we're in modules mode
// and the ad-hoc is located outside a module.
dirResponse, err := driver(cfg, query)
if err != nil {
return nil, err
}
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
if len(dirResponse.Packages) == 0 && err == nil {
dirResponse.Packages = append(dirResponse.Packages, &Package{
ID: "command-line-arguments",
PkgPath: query,
GoFiles: []string{query},
CompiledGoFiles: []string{query},
Imports: make(map[string]*Package),
})
dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
}
// Special case to handle issue #33482:
// If this is a file= query for ad-hoc packages where the file only exists on an overlay,
// and exists outside of a module, add the file in for the package.
if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" ||
filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) {
if len(dirResponse.Packages[0].GoFiles) == 0 {
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
// TODO(matloob): check if the file is outside of a root dir?
for path := range cfg.Overlay {
if path == filename {
dirResponse.Packages[0].Errors = nil
dirResponse.Packages[0].GoFiles = []string{path}
dirResponse.Packages[0].CompiledGoFiles = []string{path}
}
}
}
}
return dirResponse, nil
}
func contains(files []string, filename string) bool {
for _, f := range files {
if f == filename {
return true
}
}
return false
}
// errorSpan attempts to parse a standard `go list` error message
// by stripping off the trailing error message.
//
// It works only on errors whose message is prefixed by colon,
// followed by a space (": "). For example:
//
// attributes.go:13:1: expected 'package', found 'type'
//
func errorSpan(err Error) span.Span {
if err.Pos == "" {
input := strings.TrimSpace(err.Msg)
msgIndex := strings.Index(input, ": ")
if msgIndex < 0 {
return span.Parse(input)
}
return span.Parse(input[:msgIndex])
}
return span.Parse(err.Pos)
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
......@@ -747,7 +673,7 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
// Run "go list" for complete
// information on the specified packages.
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
buf, err := invokeGo(cfg, "list", golistargs(cfg, words)...)
if err != nil {
return nil, err
}
......@@ -879,9 +805,15 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
}
if p.Error != nil {
msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
// Address golang.org/issue/35964 by appending import stack to error message.
if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
}
pkg.Errors = append(pkg.Errors, Error{
Pos: p.Error.Pos,
Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363.
Pos: p.Error.Pos,
Msg: msg,
Kind: ListError,
})
}
......@@ -945,7 +877,7 @@ func absJoin(dir string, fileses ...[]string) (res []string) {
func golistargs(cfg *Config, words []string) []string {
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
fullargs := []string{
"list", "-e", "-json",
"-e", "-json",
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
......@@ -961,10 +893,13 @@ func golistargs(cfg *Config, words []string) []string {
}
// invokeGo returns the stdout of a go command invocation.
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, "go", args...)
goArgs := []string{verb}
goArgs = append(goArgs, cfg.BuildFlags...)
goArgs = append(goArgs, args...)
cmd := exec.CommandContext(cfg.Context, "go", goArgs...)
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
......
......@@ -160,7 +160,7 @@ type Config struct {
Tests bool
// Overlay provides a mapping of absolute file paths to file contents.
// If the file with the given path already exists, the parser will use the
// If the file with the given path already exists, the parser will use the
// alternative file contents provided by the map.
//
// Overlays provide incomplete support for when a given file doesn't
......@@ -713,7 +713,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
// which would then require that such created packages be explicitly
// inserted back into the Import graph as a final step after export data loading.
// The Diamond test exercises this case.
if !lpkg.needtypes {
if !lpkg.needtypes && !lpkg.needsrc {
return
}
if !lpkg.needsrc {
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册