提交 e396c3f4 编写于 作者: N neza2017 提交者: yefu.chen

Add zap log

Signed-off-by: Nneza2017 <yefu.chen@zilliz.com>
上级 4c491471
......@@ -2,19 +2,29 @@ package main
import (
"context"
"log"
"os"
"os/signal"
"syscall"
distributed "github.com/zilliztech/milvus-distributed/cmd/distributed/components"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
"go.uber.org/zap"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
masterservice.Params.Init()
log.SetupLogger(&masterservice.Params.Log)
defer func() {
if err := log.Sync(); err != nil {
panic(err)
}
}()
msFactory := pulsarms.NewFactory()
ms, err := distributed.NewMasterService(ctx, msFactory)
if err != nil {
......@@ -31,7 +41,7 @@ func main() {
syscall.SIGTERM,
syscall.SIGQUIT)
sig := <-sc
log.Printf("Got %s signal to exit", sig.String())
log.Info("Get signal to exit", zap.String("signal", sig.String()))
err = ms.Stop()
if err != nil {
panic(err)
......
......@@ -4,27 +4,27 @@ import (
"context"
"fmt"
"io"
"log"
"strconv"
"time"
"net"
"sync"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
dsc "github.com/zilliztech/milvus-distributed/internal/distributed/dataservice/client"
isc "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
psc "github.com/zilliztech/milvus-distributed/internal/distributed/proxyservice/client"
qsc "github.com/zilliztech/milvus-distributed/internal/distributed/queryservice/client"
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
"github.com/zilliztech/milvus-distributed/internal/log"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
"go.uber.org/zap"
"google.golang.org/grpc"
)
......@@ -102,7 +102,7 @@ func (s *Server) Run() error {
func (s *Server) init() error {
Params.Init()
log.Println("init params done")
log.Info("init params done")
err := s.startGrpc()
if err != nil {
......@@ -112,7 +112,7 @@ func (s *Server) init() error {
s.core.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
if s.connectProxyService {
log.Printf("proxy service address : %s", Params.ProxyServiceAddress)
log.Info("proxy service", zap.String("address", Params.ProxyServiceAddress))
proxyService := psc.NewClient(Params.ProxyServiceAddress)
if err := proxyService.Init(); err != nil {
panic(err)
......@@ -128,7 +128,7 @@ func (s *Server) init() error {
}
}
if s.connectDataService {
log.Printf("data service address : %s", Params.DataServiceAddress)
log.Info("data service", zap.String("address", Params.DataServiceAddress))
dataService := dsc.NewClient(Params.DataServiceAddress)
if err := dataService.Init(); err != nil {
panic(err)
......@@ -146,7 +146,7 @@ func (s *Server) init() error {
}
}
if s.connectIndexService {
log.Printf("index service address : %s", Params.IndexServiceAddress)
log.Info("index service", zap.String("address", Params.IndexServiceAddress))
indexService := isc.NewClient(Params.IndexServiceAddress)
if err := indexService.Init(); err != nil {
panic(err)
......@@ -173,7 +173,7 @@ func (s *Server) init() error {
}
}
cms.Params.Init()
log.Println("grpc init done ...")
log.Info("grpc init done ...")
if err := s.core.Init(); err != nil {
return err
......@@ -193,10 +193,10 @@ func (s *Server) startGrpcLoop(grpcPort int) {
defer s.wg.Done()
log.Println("network port: ", grpcPort)
log.Info("start grpc ", zap.Int("port", grpcPort))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
if err != nil {
log.Printf("GrpcServer:failed to listen: %v", err)
log.Warn("GrpcServer:failed to listen", zap.String("error", err.Error()))
s.grpcErrChan <- err
return
}
......@@ -215,7 +215,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
}
func (s *Server) start() error {
log.Println("Master Core start ...")
log.Info("Master Core start ...")
if err := s.core.Start(); err != nil {
return err
}
......
package masterservice
import (
"log"
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
......@@ -77,7 +77,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO))
if current == nil || current.physical.Equal(typeutil.ZeroTime) {
// If it's leader, maybe SyncTimestamp hasn't completed yet
log.Println("sync hasn't completed yet, wait for a while")
log.Debug("sync hasn't completed yet, wait for a while")
time.Sleep(200 * time.Millisecond)
continue
}
......@@ -85,8 +85,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
physical = current.physical.UnixNano() / int64(time.Millisecond)
logical = atomic.AddInt64(&current.logical, int64(count))
if logical >= maxLogical {
log.Println("logical part outside of max logical interval, please check ntp time",
zap.Int("retry-count", i))
log.Debug("logical part outside of max logical interval, please check ntp time", zap.Int("retry-count", i))
time.Sleep(UpdateTimestampStep)
continue
}
......
package masterservice
import (
"log"
"path"
"strconv"
"sync"
......@@ -9,11 +8,13 @@ import (
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
const (
......@@ -127,7 +128,7 @@ func (mt *metaTable) reloadFromKV() error {
}
collID, ok := mt.partitionID2CollID[partitionInfo.PartitionID]
if !ok {
log.Printf("partition id %d not belong to any collection", partitionInfo.PartitionID)
log.Warn("partition does not belong to any collection", zap.Int64("partition id", partitionInfo.PartitionID))
continue
}
mt.partitionID2Meta[partitionInfo.PartitionID] = partitionInfo
......@@ -235,14 +236,14 @@ func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID) error {
metaKeys = append(metaKeys, path.Join(PartitionMetaPrefix, strconv.FormatInt(partID, 10)))
partMeta, ok := mt.partitionID2Meta[partID]
if !ok {
log.Printf("partition id = %d not exist", partID)
log.Warn("partition id not exist", zap.Int64("partition id", partID))
continue
}
delete(mt.partitionID2Meta, partID)
for _, segID := range partMeta.SegmentIDs {
segIndexMeta, ok := mt.segID2IndexMeta[segID]
if !ok {
log.Printf("segment id = %d not exist", segID)
log.Warn("segment id not exist", zap.Int64("segment id", segID))
continue
}
delete(mt.segID2IndexMeta, segID)
......@@ -346,7 +347,7 @@ func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionName string
for _, t := range coll.PartitionIDs {
part, ok := mt.partitionID2Meta[t]
if !ok {
log.Printf("partition id = %d not exist", t)
log.Warn("partition id not exist", zap.Int64("partition id", t))
continue
}
if part.PartitionName == partitionName {
......@@ -441,7 +442,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
for _, segID := range partMeta.SegmentIDs {
segIndexMeta, ok := mt.segID2IndexMeta[segID]
if !ok {
log.Printf("segment id = %d has no index meta", segID)
log.Warn("segment has no index meta", zap.Int64("segment id", segID))
continue
}
delete(mt.segID2IndexMeta, segID)
......@@ -449,7 +450,7 @@ func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, partitionName str
delMetaKeys = append(delMetaKeys, path.Join(SegmentIndexMetaPrefix, strconv.FormatInt(segID, 10), strconv.FormatInt(indexID, 10)))
indexMeta, ok := mt.indexID2Meta[segIdxMeta.IndexID]
if !ok {
log.Printf("index id = %d not exist", segIdxMeta.IndexID)
log.Warn("index id not exist", zap.Int64("index id", segIdxMeta.IndexID))
continue
}
delete(mt.indexID2Meta, segIdxMeta.IndexID)
......@@ -589,7 +590,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
idxMeta, ok := mt.indexID2Meta[info.IndexID]
if !ok {
fieldIdxInfo = append(fieldIdxInfo, info)
log.Printf("index id = %d not has meta", info.IndexID)
log.Warn("index id not has meta", zap.Int64("index id", info.IndexID))
continue
}
if idxMeta.IndexName != indexName {
......@@ -601,7 +602,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
break
}
if len(fieldIdxInfo) == len(collMeta.FieldIndexes) {
log.Printf("collection = %s, field = %s, index = %s not found", collName, fieldName, indexName)
log.Warn("drop index,index not found", zap.String("collection name", collName), zap.String("filed name", fieldName), zap.String("index name", indexName))
return 0, false, nil
}
collMeta.FieldIndexes = fieldIdxInfo
......@@ -614,7 +615,7 @@ func (mt *metaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
for _, partID := range collMeta.PartitionIDs {
partMeta, ok := mt.partitionID2Meta[partID]
if !ok {
log.Printf("partition id = %d not exist", partID)
log.Warn("partition not exist", zap.Int64("partition id", partID))
continue
}
for _, segID := range partMeta.SegmentIDs {
......
package masterservice
import (
"fmt"
"path"
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
)
......@@ -30,6 +34,8 @@ type ParamTable struct {
DefaultIndexName string
Timeout int
Log log.Config
}
func (p *ParamTable) Init() {
......@@ -58,6 +64,8 @@ func (p *ParamTable) Init() {
p.initDefaultIndexName()
p.initTimeout()
p.initLogCfg()
})
}
......@@ -160,3 +168,34 @@ func (p *ParamTable) initDefaultIndexName() {
func (p *ParamTable) initTimeout() {
p.Timeout = p.ParseInt("master.timeout")
}
func (p *ParamTable) initLogCfg() {
p.Log = log.Config{}
format, err := p.Load("log.format")
if err != nil {
panic(err)
}
p.Log.Format = format
level, err := p.Load("log.level")
if err != nil {
panic(err)
}
p.Log.Level = level
devStr, err := p.Load("log.dev")
if err != nil {
panic(err)
}
dev, err := strconv.ParseBool(devStr)
if err != nil {
panic(err)
}
p.Log.Development = dev
p.Log.File.MaxSize = p.ParseInt("log.file.maxSize")
p.Log.File.MaxBackups = p.ParseInt("log.file.maxBackups")
p.Log.File.MaxDays = p.ParseInt("log.file.maxAge")
rootPath, err := p.Load("log.file.rootPath")
if err != nil {
panic(err)
}
p.Log.File.Filename = path.Join(rootPath, fmt.Sprintf("masterservice-%d.log", p.NodeID))
}
......@@ -2,16 +2,17 @@ package masterservice
import (
"fmt"
"log"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
type reqTask interface {
......@@ -233,7 +234,7 @@ func (t *DropCollectionReqTask) Execute() error {
//notify query service to release collection
go func() {
if err = t.core.ReleaseCollection(t.Req.Base.Timestamp, 0, collMeta.ID); err != nil {
log.Printf("%s", err.Error())
log.Warn("ReleaseCollection failed", zap.String("error", err.Error()))
}
}()
......
package masterservice
import (
"log"
"sync/atomic"
"time"
"unsafe"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/log"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
const (
......@@ -143,7 +142,7 @@ func (t *timestampOracle) UpdateTimestamp() error {
jetLag := typeutil.SubTimeByWallClock(now, prev.physical)
if jetLag > 3*UpdateTimestampStep {
log.Print("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
log.Debug("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
}
var next time.Time
......@@ -154,7 +153,7 @@ func (t *timestampOracle) UpdateTimestamp() error {
} else if prevLogical > maxLogical/2 {
// The reason choosing maxLogical/2 here is that it's big enough for common cases.
// Because there is enough timestamp can be allocated before next update.
log.Print("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
log.Debug("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
next = prev.physical.Add(time.Millisecond)
} else {
// It will still use the previous physical time to alloc the timestamp.
......
......@@ -2,7 +2,7 @@ grpcio==1.26.0
grpcio-tools==1.26.0
numpy==1.18.1
pytest-cov==2.8.1
pymilvus-distributed==0.0.28
pymilvus-distributed==0.0.29
sklearn==0.0
pytest==4.5.0
pytest-timeout==1.3.3
......
......@@ -378,7 +378,7 @@ class TestIndexBase:
******************************************************************
"""
@pytest.mark.skip("drop_index")
@pytest.mark.skip("get_collection_stats")
def test_drop_index(self, connect, collection, get_simple_index):
'''
target: test drop index interface
......@@ -392,7 +392,8 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.skip("get_collection_stats")
@pytest.mark.skip("drop_index raise exception")
@pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
......@@ -409,7 +410,6 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, collection):
'''
......@@ -420,7 +420,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_existed(self, connect):
'''
target: test drop index interface when collection name not existed
......@@ -432,7 +431,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_create(self, connect, collection):
'''
target: test drop index interface when index not created
......@@ -455,7 +453,7 @@ class TestIndexBase:
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
@pytest.mark.skip("get_collection_stats")
def test_drop_index_ip(self, connect, collection, get_simple_index):
'''
target: test drop index interface
......@@ -470,7 +468,7 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.skip("get_collection_stats")
@pytest.mark.level(2)
def test_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
'''
......@@ -488,7 +486,6 @@ class TestIndexBase:
# assert stats["partitions"][0]["segments"][0]["index_name"] == default_index_type
assert not stats["partitions"][0]["segments"]
@pytest.mark.skip("drop_index")
@pytest.mark.level(2)
def test_drop_index_without_connect_ip(self, dis_connect, collection):
'''
......@@ -499,7 +496,6 @@ class TestIndexBase:
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
def test_drop_index_collection_not_create_ip(self, connect, collection):
'''
target: test drop index interface when index not created
......@@ -511,6 +507,7 @@ class TestIndexBase:
connect.drop_index(collection, field_name)
@pytest.mark.skip("drop_index")
@pytest.mark.skip("can't create and drop")
@pytest.mark.level(2)
def test_create_drop_index_repeatly_ip(self, connect, collection, get_simple_index):
'''
......@@ -683,7 +680,7 @@ class TestIndexBinary:
******************************************************************
"""
@pytest.mark.skip("get_collection_stats and drop_index do not impl")
@pytest.mark.skip("get_collection_stats")
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
'''
target: test drop index interface
......
from tests.utils import *
from tests.constants import *
uniq_id = "load_collection"
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
def test_load_collection(self, connect, collection):
'''
target: test load collection and wait for loading collection
method: insert then flush, when flushed, try load collection
expected: no errors
'''
ids = connect.insert(collection, default_entities)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection)
from tests.utils import *
from tests.constants import *
uniq_id = "load_partitions"
class TestLoadPartitions:
"""
******************************************************************
The following cases are used to test `load_partitions` function
******************************************************************
"""
def test_load_partitions(self, connect, collection):
'''
target: test load collection and wait for loading collection
method: insert then flush, when flushed, try load collection
expected: no errors
'''
partition_tag = "lvn9pq34u8rasjk"
connect.create_partition(collection, partition_tag + "1")
ids = connect.insert(collection, default_entities, partition_tag=partition_tag + "1")
connect.create_partition(collection, partition_tag + "2")
ids = connect.insert(collection, default_entity, partition_tag=partition_tag + "2")
connect.flush([collection])
connect.load_partitions(collection, [partition_tag + "2"])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册