提交 e396c3f4 编写于 作者: N neza2017 提交者: yefu.chen

Add zap log

Signed-off-by: Nneza2017 <yefu.chen@zilliz.com>
上级 4c491471
...@@ -2,19 +2,29 @@ package main ...@@ -2,19 +2,29 @@ package main
import ( import (
"context" "context"
"log"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
distributed "github.com/zilliztech/milvus-distributed/cmd/distributed/components" distributed "github.com/zilliztech/milvus-distributed/cmd/distributed/components"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms" "github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
"go.uber.org/zap"
) )
func main() { func main() {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
masterservice.Params.Init()
log.SetupLogger(&masterservice.Params.Log)
defer func() {
if err := log.Sync(); err != nil {
panic(err)
}
}()
msFactory := pulsarms.NewFactory() msFactory := pulsarms.NewFactory()
ms, err := distributed.NewMasterService(ctx, msFactory) ms, err := distributed.NewMasterService(ctx, msFactory)
if err != nil { if err != nil {
...@@ -31,7 +41,7 @@ func main() { ...@@ -31,7 +41,7 @@ func main() {
syscall.SIGTERM, syscall.SIGTERM,
syscall.SIGQUIT) syscall.SIGQUIT)
sig := <-sc sig := <-sc
log.Printf("Got %s signal to exit", sig.String()) log.Info("Get signal to exit", zap.String("signal", sig.String()))
err = ms.Stop() err = ms.Stop()
if err != nil { if err != nil {
panic(err) panic(err)
......
...@@ -4,27 +4,27 @@ import ( ...@@ -4,27 +4,27 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"log"
"strconv" "strconv"
"time" "time"
"net" "net"
"sync" "sync"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
dsc "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client" dsc "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client"
isc "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client" isc "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
psc "github.com/zilliztech/milvus-distributed/internal/distributed/proxyservice/client" psc "github.com/zilliztech/milvus-distributed/internal/distributed/proxyservice/client"
qsc "github.com/zilliztech/milvus-distributed/internal/distributed/queryservice/client" qsc "github.com/zilliztech/milvus-distributed/internal/distributed/queryservice/client"
"github.com/zilliztech/milvus-distributed/internal/util/funcutil" "github.com/zilliztech/milvus-distributed/internal/log"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice" cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/msgstream" "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb" "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2" "github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb" "github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb" "github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
"go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
...@@ -102,7 +102,7 @@ func (s *Server) Run() error { ...@@ -102,7 +102,7 @@ func (s *Server) Run() error {
func (s *Server) init() error { func (s *Server) init() error {
Params.Init() Params.Init()
log.Println("init params done") log.Info("init params done")
err := s.startGrpc() err := s.startGrpc()
if err != nil { if err != nil {
...@@ -112,7 +112,7 @@ func (s *Server) init() error { ...@@ -112,7 +112,7 @@ func (s *Server) init() error {
s.core.UpdateStateCode(internalpb2.StateCode_INITIALIZING) s.core.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
if s.connectProxyService { if s.connectProxyService {
log.Printf("proxy service address : %s", Params.ProxyServiceAddress) log.Info("proxy service", zap.String("address", Params.ProxyServiceAddress))
proxyService := psc.NewClient(Params.ProxyServiceAddress) proxyService := psc.NewClient(Params.ProxyServiceAddress)
if err := proxyService.Init(); err != nil { if err := proxyService.Init(); err != nil {
panic(err) panic(err)
...@@ -128,7 +128,7 @@ func (s *Server) init() error { ...@@ -128,7 +128,7 @@ func (s *Server) init() error {
} }
} }
if s.connectDataService { if s.connectDataService {
log.Printf("data service address : %s", Params.DataServiceAddress) log.Info("data service", zap.String("address", Params.DataServiceAddress))
dataService := dsc.NewClient(Params.DataServiceAddress) dataService := dsc.NewClient(Params.DataServiceAddress)
if err := dataService.Init(); err != nil { if err := dataService.Init(); err != nil {
panic(err) panic(err)
...@@ -146,7 +146,7 @@ func (s *Server) init() error { ...@@ -146,7 +146,7 @@ func (s *Server) init() error {
} }
} }
if s.connectIndexService { if s.connectIndexService {
log.Printf("index service address : %s", Params.IndexServiceAddress) log.Info("index service", zap.String("address", Params.IndexServiceAddress))
indexService := isc.NewClient(Params.IndexServiceAddress) indexService := isc.NewClient(Params.IndexServiceAddress)
if err := indexService.Init(); err != nil { if err := indexService.Init(); err != nil {
panic(err) panic(err)
...@@ -173,7 +173,7 @@ func (s *Server) init() error { ...@@ -173,7 +173,7 @@ func (s *Server) init() error {
} }
} }
cms.Params.Init() cms.Params.Init()
log.Println("grpc init done ...") log.Info("grpc init done ...")
if err := s.core.Init(); err != nil { if err := s.core.Init(); err != nil {
return err return err
...@@ -193,10 +193,10 @@ func (s *Server) startGrpcLoop(grpcPort int) { ...@@ -193,10 +193,10 @@ func (s *Server) startGrpcLoop(grpcPort int) {
defer s.wg.Done() defer s.wg.Done()
log.Println("network port: ", grpcPort) log.Info("start grpc ", zap.Int("port", grpcPort))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort)) lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
if err != nil { if err != nil {
log.Printf("GrpcServer:failed to listen: %v", err) log.Warn("GrpcServer:failed to listen", zap.String("error", err.Error()))
s.grpcErrChan <- err s.grpcErrChan <- err
return return
} }
...@@ -215,7 +215,7 @@ func (s *Server) startGrpcLoop(grpcPort int) { ...@@ -215,7 +215,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
} }
func (s *Server) start() error { func (s *Server) start() error {
log.Println("Master Core start ...") log.Info("Master Core start ...")
if err := s.core.Start(); err != nil { if err := s.core.Start(); err != nil {
return err return err
} }
......
package masterservice package masterservice
import ( import (
"log"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/zilliztech/milvus-distributed/internal/errors" "github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv" "github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil" "github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil" "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap" "go.uber.org/zap"
...@@ -77,7 +77,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) { ...@@ -77,7 +77,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO)) current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO))
if current == nil || current.physical.Equal(typeutil.ZeroTime) { if current == nil || current.physical.Equal(typeutil.ZeroTime) {
// If it's leader, maybe SyncTimestamp hasn't completed yet // If it's leader, maybe SyncTimestamp hasn't completed yet
log.Println("sync hasn't completed yet, wait for a while") log.Debug("sync hasn't completed yet, wait for a while")
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
continue continue
} }
...@@ -85,8 +85,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) { ...@@ -85,8 +85,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
physical = current.physical.UnixNano() / int64(time.Millisecond) physical = current.physical.UnixNano() / int64(time.Millisecond)
logical = atomic.AddInt64(&current.logical, int64(count)) logical = atomic.AddInt64(&current.logical, int64(count))
if logical >= maxLogical { if logical >= maxLogical {
log.Println("logical part outside of max logical interval, please check ntp time", log.Debug("logical part outside of max logical interval, please check ntp time", zap.Int("retry-count", i))
zap.Int("retry-count", i))
time.Sleep(UpdateTimestampStep) time.Sleep(UpdateTimestampStep)
continue continue
} }
......
package masterservice package masterservice
import ( import (
"log"
"path" "path"
"strconv" "strconv"
"sync" "sync"
...@@ -9,11 +8,13 @@ import ( ...@@ -9,11 +8,13 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors" "github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv" "github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb" "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb" "github.com/zilliztech/milvus-distributed/internal/proto/datapb"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb" pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb" "github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil" "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
) )
const ( const (
...@@ -127,7 +128,7 @@ func (mt *metaTable) reloadFromKV() error { ...@@ -127,7 +128,7 @@ func (mt *metaTable) reloadFromKV() error {
} }
collID, ok := mt.partitionID2CollID[partitionInfo.PartitionID] collID, ok := mt.partitionID2CollID[partitionInfo.PartitionID]
if !ok { if !ok {
log.Printf("partition id %d not belong to any collection", partitionInfo.PartitionID) log.Warn("partition does not belong to any collection", zap.Int64("partition id", partitionInfo.PartitionID))
continue continue
} }
mt.partitionID2Meta[partitionInfo.PartitionID] = partitionInfo mt.partitionID2Meta[partitionInfo.PartitionID] = partitionInfo
...@@ -235,14 +236,14 @@ func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID) error { ...@@ -235,14 +236,14 @@ func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID) error {
metaKeys = append(metaKeys, path.Join(PartitionMetaPrefix, strconv.FormatInt(partID, 10))) metaKeys = append(metaKeys, path.Join(PartitionMetaPrefix, strconv.FormatInt(partID, 10)))
partMeta, ok := mt.partitionID2Meta[partID] partMeta, ok := mt.partitionID2Meta[partID]
if !ok { if !ok {
log.Printf("partition id = %d not exist", partID) log.Warn("partition id not exist", zap.Int64("partition id", partID))
continue continue
} }
delete(mt.partitionID2Meta, partID) delete(mt.partitionID2Meta, partID)
for _, segID := range partMeta.SegmentIDs { for _, segID := range partMeta.SegmentIDs {
segIndexMeta, ok := mt.segID2IndexMeta[segID] segIndexMeta, ok := mt.segID2IndexMeta[segID]
if !ok { if !ok {
log.Printf("segment id = %d not exist", segID) log.Warn("segment id not exist", zap.Int64("segment id", segID))
continue continue
} }
delete(mt.segID2IndexMeta, segID) delete(mt.segID2IndexMeta, segID)
...@@ -346,7 +347,7 @@ func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionName string ...@@ -346,7 +347,7 @@ func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionName string
for _, t := range coll.PartitionIDs { for _, t := range coll.PartitionIDs {
part, ok := mt.partitionID2Meta[t] part, ok := mt.partitionID2Meta[t]
if !ok { if !ok {
log.Printf("partition id = %d not exist", t) log.Warn("partition id not exist", zap.Int64("partition id", t))
continue continue
} }
if part.PartitionName == partitionName { if part.PartitionName == partitionName {
...@@ -441,7 +442,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str ...@@ -441,7 +442,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
for _, segID := range partMeta.SegmentIDs { for _, segID := range partMeta.SegmentIDs {
segIndexMeta, ok := mt.segID2IndexMeta[segID] segIndexMeta, ok := mt.segID2IndexMeta[segID]
if !ok { if !ok {
log.Printf("segment id = %d has no index meta", segID) log.Warn("segment has no index meta", zap.Int64("segment id", segID))
continue continue
} }
delete(mt.segID2IndexMeta, segID) delete(mt.segID2IndexMeta, segID)
...@@ -449,7 +450,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str ...@@ -449,7 +450,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
delMetaKeys = append(delMetaKeys, path.Join(SegmentIndexMetaPrefix, strconv.FormatInt(segID, 10), strconv.FormatInt(indexID, 10))) delMetaKeys = append(delMetaKeys, path.Join(SegmentIndexMetaPrefix, strconv.FormatInt(segID, 10), strconv.FormatInt(indexID, 10)))
indexMeta, ok := mt.indexID2Meta[segIdxMeta.IndexID] indexMeta, ok := mt.indexID2Meta[segIdxMeta.IndexID]
if !ok { if !ok {
log.Printf("index id = %d not exist", segIdxMeta.IndexID) log.Warn("index id not exist", zap.Int64("index id", segIdxMeta.IndexID))
continue continue
} }
delete(mt.indexID2Meta, segIdxMeta.IndexID) delete(mt.indexID2Meta, segIdxMeta.IndexID)
...@@ -589,7 +590,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil. ...@@ -589,7 +590,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
idxMeta, ok := mt.indexID2Meta[info.IndexID] idxMeta, ok := mt.indexID2Meta[info.IndexID]
if !ok { if !ok {
fieldIdxInfo = append(fieldIdxInfo, info) fieldIdxInfo = append(fieldIdxInfo, info)
log.Printf("index id = %d not has meta", info.IndexID) log.Warn("index id not has meta", zap.Int64("index id", info.IndexID))
continue continue
} }
if idxMeta.IndexName != indexName { if idxMeta.IndexName != indexName {
...@@ -601,7 +602,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil. ...@@ -601,7 +602,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
break break
} }
if len(fieldIdxInfo) == len(collMeta.FieldIndexes) { if len(fieldIdxInfo) == len(collMeta.FieldIndexes) {
log.Printf("collection = %s, field = %s, index = %s not found", collName, fieldName, indexName) log.Warn("drop index,index not found", zap.String("collection name", collName), zap.String("filed name", fieldName), zap.String("index name", indexName))
return 0, false, nil return 0, false, nil
} }
collMeta.FieldIndexes = fieldIdxInfo collMeta.FieldIndexes = fieldIdxInfo
...@@ -614,7 +615,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil. ...@@ -614,7 +615,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
for _, partID := range collMeta.PartitionIDs { for _, partID := range collMeta.PartitionIDs {
partMeta, ok := mt.partitionID2Meta[partID] partMeta, ok := mt.partitionID2Meta[partID]
if !ok { if !ok {
log.Printf("partition id = %d not exist", partID) log.Warn("partition not exist", zap.Int64("partition id", partID))
continue continue
} }
for _, segID := range partMeta.SegmentIDs { for _, segID := range partMeta.SegmentIDs {
......
package masterservice package masterservice
import ( import (
"fmt"
"path"
"strconv"
"sync" "sync"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable" "github.com/zilliztech/milvus-distributed/internal/util/paramtable"
) )
...@@ -30,6 +34,8 @@ type ParamTable struct { ...@@ -30,6 +34,8 @@ type ParamTable struct {
DefaultIndexName string DefaultIndexName string
Timeout int Timeout int
Log log.Config
} }
func (p *ParamTable) Init() { func (p *ParamTable) Init() {
...@@ -58,6 +64,8 @@ func (p *ParamTable) Init() { ...@@ -58,6 +64,8 @@ func (p *ParamTable) Init() {
p.initDefaultIndexName() p.initDefaultIndexName()
p.initTimeout() p.initTimeout()
p.initLogCfg()
}) })
} }
...@@ -160,3 +168,34 @@ func (p *ParamTable) initDefaultIndexName() { ...@@ -160,3 +168,34 @@ func (p *ParamTable) initDefaultIndexName() {
func (p *ParamTable) initTimeout() { func (p *ParamTable) initTimeout() {
p.Timeout = p.ParseInt("master.timeout") p.Timeout = p.ParseInt("master.timeout")
} }
func (p *ParamTable) initLogCfg() {
p.Log = log.Config{}
format, err := p.Load("log.format")
if err != nil {
panic(err)
}
p.Log.Format = format
level, err := p.Load("log.level")
if err != nil {
panic(err)
}
p.Log.Level = level
devStr, err := p.Load("log.dev")
if err != nil {
panic(err)
}
dev, err := strconv.ParseBool(devStr)
if err != nil {
panic(err)
}
p.Log.Development = dev
p.Log.File.MaxSize = p.ParseInt("log.file.maxSize")
p.Log.File.MaxBackups = p.ParseInt("log.file.maxBackups")
p.Log.File.MaxDays = p.ParseInt("log.file.maxAge")
rootPath, err := p.Load("log.file.rootPath")
if err != nil {
panic(err)
}
p.Log.File.Filename = path.Join(rootPath, fmt.Sprintf("masterservice-%d.log", p.NodeID))
}
...@@ -2,16 +2,17 @@ package masterservice ...@@ -2,16 +2,17 @@ package masterservice
import ( import (
"fmt" "fmt"
"log"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors" "github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb" "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb" "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2" "github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb" "github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb" "github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil" "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
) )
type reqTask interface { type reqTask interface {
...@@ -233,7 +234,7 @@ func (t *DropCollectionReqTask) Execute() error { ...@@ -233,7 +234,7 @@ func (t *DropCollectionReqTask) Execute() error {
//notify query service to release collection //notify query service to release collection
go func() { go func() {
if err = t.core.ReleaseCollection(t.Req.Base.Timestamp, 0, collMeta.ID); err != nil { if err = t.core.ReleaseCollection(t.Req.Base.Timestamp, 0, collMeta.ID); err != nil {
log.Printf("%s", err.Error()) log.Warn("ReleaseCollection failed", zap.String("error", err.Error()))
} }
}() }()
......
package masterservice package masterservice
import ( import (
"log"
"sync/atomic" "sync/atomic"
"time" "time"
"unsafe" "unsafe"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/errors" "github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv" "github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil" "github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil" "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
) )
const ( const (
...@@ -143,7 +142,7 @@ func (t *timestampOracle) UpdateTimestamp() error { ...@@ -143,7 +142,7 @@ func (t *timestampOracle) UpdateTimestamp() error {
jetLag := typeutil.SubTimeByWallClock(now, prev.physical) jetLag := typeutil.SubTimeByWallClock(now, prev.physical)
if jetLag > 3*UpdateTimestampStep { if jetLag > 3*UpdateTimestampStep {
log.Print("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now)) log.Debug("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
} }
var next time.Time var next time.Time
...@@ -154,7 +153,7 @@ func (t *timestampOracle) UpdateTimestamp() error { ...@@ -154,7 +153,7 @@ func (t *timestampOracle) UpdateTimestamp() error {
} else if prevLogical > maxLogical/2 { } else if prevLogical > maxLogical/2 {
// The reason choosing maxLogical/2 here is that it's big enough for common cases. // The reason choosing maxLogical/2 here is that it's big enough for common cases.
// Because there is enough timestamp can be allocated before next update. // Because there is enough timestamp can be allocated before next update.
log.Print("the logical time may be not enough", zap.Int64("prev-logical", prevLogical)) log.Debug("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
next = prev.physical.Add(time.Millisecond) next = prev.physical.Add(time.Millisecond)
} else { } else {
// It will still use the previous physical time to alloc the timestamp. // It will still use the previous physical time to alloc the timestamp.
......
...@@ -2,7 +2,7 @@ grpcio==1.26.0 ...@@ -2,7 +2,7 @@ grpcio==1.26.0
grpcio-tools==1.26.0 grpcio-tools==1.26.0
numpy==1.18.1 numpy==1.18.1
pytest-cov==2.8.1 pytest-cov==2.8.1
pymilvus-distributed==0.0.28 pymilvus-distributed==0.0.29
sklearn==0.0 sklearn==0.0
pytest==4.5.0 pytest==4.5.0
pytest-timeout==1.3.3 pytest-timeout==1.3.3
......
...@@ -378,7 +378,7 @@ class TestIndexBase: ...@@ -378,7 +378,7 @@ class TestIndexBase:
****************************************************************** ******************************************************************
""" """
@pytest.mark.skip("drop_index") @pytest.mark.skip("get_collection_stats")
def test_drop_index(self, connect, collection, get_simple_index): def test_drop_index(self, connect, collection, get_simple_index):
''' '''
target: test drop index interface target: test drop index interface
...@@ -392,7 +392,8 @@ class TestIndexBase: ...@@ -392,7 +392,8 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type # assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"] assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index") @pytest.mark.skip("get_collection_stats")
@pytest.mark.skip("drop_index raise exception")
@pytest.mark.level(2) @pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, collection, get_simple_index): def test_drop_index_repeatly(self, connect, collection, get_simple_index):
''' '''
...@@ -409,7 +410,6 @@ class TestIndexBase: ...@@ -409,7 +410,6 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type # assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"] assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.level(2) @pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, collection): def test_drop_index_without_connect(self, dis_connect, collection):
''' '''
...@@ -420,7 +420,6 @@ class TestIndexBase: ...@@ -420,7 +420,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e: with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name) dis_connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_existed(self, connect): def test_drop_index_collection_not_existed(self, connect):
''' '''
target: test drop index interface when collection name not existed target: test drop index interface when collection name not existed
...@@ -432,7 +431,6 @@ class TestIndexBase: ...@@ -432,7 +431,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e: with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name) connect.drop_index(collection_name, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_create(self, connect, collection): def test_drop_index_collection_not_create(self, connect, collection):
''' '''
target: test drop index interface when index not created target: test drop index interface when index not created
...@@ -455,7 +453,7 @@ class TestIndexBase: ...@@ -455,7 +453,7 @@ class TestIndexBase:
connect.create_index(collection, field_name, get_simple_index) connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name) connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index") @pytest.mark.skip("get_collection_stats")
def test_drop_index_ip(self, connect, collection, get_simple_index): def test_drop_index_ip(self, connect, collection, get_simple_index):
''' '''
target: test drop index interface target: test drop index interface
...@@ -470,7 +468,7 @@ class TestIndexBase: ...@@ -470,7 +468,7 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type # assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"] assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index") @pytest.mark.skip("get_collection_stats")
@pytest.mark.level(2) @pytest.mark.level(2)
def test_drop_index_repeatly_ip(self, connect, collection, get_simple_index): def test_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
''' '''
...@@ -488,7 +486,6 @@ class TestIndexBase: ...@@ -488,7 +486,6 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type # assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"] assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.level(2) @pytest.mark.level(2)
def test_drop_index_without_connect_ip(self, dis_connect, collection): def test_drop_index_without_connect_ip(self, dis_connect, collection):
''' '''
...@@ -499,7 +496,6 @@ class TestIndexBase: ...@@ -499,7 +496,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e: with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name) dis_connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_create_ip(self, connect, collection): def test_drop_index_collection_not_create_ip(self, connect, collection):
''' '''
target: test drop index interface when index not created target: test drop index interface when index not created
...@@ -511,6 +507,7 @@ class TestIndexBase: ...@@ -511,6 +507,7 @@ class TestIndexBase:
connect.drop_index(collection, field_name) connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index") @pytest.mark.skip("drop_index")
@pytest.mark.skip("can't create and drop")
@pytest.mark.level(2) @pytest.mark.level(2)
def test_create_drop_index_repeatly_ip(self, connect, collection, get_simple_index): def test_create_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
''' '''
...@@ -683,7 +680,7 @@ class TestIndexBinary: ...@@ -683,7 +680,7 @@ class TestIndexBinary:
****************************************************************** ******************************************************************
""" """
@pytest.mark.skip("get_collection_stats and drop_index do not impl") @pytest.mark.skip("get_collection_stats")
def test_drop_index(self, connect, binary_collection, get_jaccard_index): def test_drop_index(self, connect, binary_collection, get_jaccard_index):
''' '''
target: test drop index interface target: test drop index interface
......
from tests.utils import *
from tests.constants import *
uniq_id = "load_collection"
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
def test_load_collection(self, connect, collection):
'''
target: test load collection and wait for loading collection
method: insert then flush, when flushed, try load collection
expected: no errors
'''
ids = connect.insert(collection, default_entities)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection)
from tests.utils import *
from tests.constants import *
uniq_id = "load_partitions"
class TestLoadPartitions:
"""
******************************************************************
The following cases are used to test `load_partitions` function
******************************************************************
"""
def test_load_partitions(self, connect, collection):
'''
target: test load collection and wait for loading collection
method: insert then flush, when flushed, try load collection
expected: no errors
'''
partition_tag = "lvn9pq34u8rasjk"
connect.create_partition(collection, partition_tag + "1")
ids = connect.insert(collection, default_entities, partition_tag=partition_tag + "1")
connect.create_partition(collection, partition_tag + "2")
ids = connect.insert(collection, default_entity, partition_tag=partition_tag + "2")
connect.flush([collection])
connect.load_partitions(collection, [partition_tag + "2"])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册