server.go 19.0 KB
Newer Older
S
sunby 已提交
1 2
package dataservice

S
sunby 已提交
3 4
import (
	"context"
S
sunby 已提交
5
	"errors"
S
sunby 已提交
6 7
	"fmt"
	"log"
S
sunby 已提交
8 9
	"path"
	"strconv"
S
sunby 已提交
10
	"sync"
S
sunby 已提交
11 12 13 14 15
	"sync/atomic"
	"time"

	"github.com/golang/protobuf/proto"
	"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
S
sunby 已提交
16

N
neza2017 已提交
17 18
	"github.com/zilliztech/milvus-distributed/internal/msgstream/util"

S
sunby 已提交
19 20 21
	"github.com/zilliztech/milvus-distributed/internal/msgstream"
	"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"

S
sunby 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34
	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"

	"github.com/zilliztech/milvus-distributed/internal/timesync"

	etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
	"go.etcd.io/etcd/clientv3"

	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
	"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
	"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)

S
sunby 已提交
35 36
const role = "dataservice"

S
sunby 已提交
37 38
type DataService interface {
	typeutil.Service
N
neza2017 已提交
39
	typeutil.Component
S
sunby 已提交
40 41 42 43 44 45 46
	RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
	Flush(req *datapb.FlushRequest) (*commonpb.Status, error)

	AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
	ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error)
	GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
	GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
N
neza2017 已提交
47 48
	GetSegmentInfoChannel() (string, error)
	GetInsertChannels(req *datapb.InsertChannelRequest) ([]string, error)
S
sunby 已提交
49 50 51 52 53
	GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
	GetPartitionStatistics(req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error)
	GetComponentStates() (*internalpb2.ComponentStates, error)
}

S
sunby 已提交
54 55 56 57 58 59 60 61 62 63
type MasterClient interface {
	ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
	DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
	ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
	GetDdChannel() (string, error)
	AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
	AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error)
	GetComponentStates() (*internalpb2.ComponentStates, error)
}

S
sunby 已提交
64
type (
S
sunby 已提交
65 66 67
	UniqueID  = typeutil.UniqueID
	Timestamp = typeutil.Timestamp
	Server    struct {
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
		ctx                context.Context
		serverLoopCtx      context.Context
		serverLoopCancel   context.CancelFunc
		serverLoopWg       sync.WaitGroup
		state              atomic.Value
		client             *etcdkv.EtcdKV
		meta               *meta
		segAllocator       segmentAllocator
		statsHandler       *statsHandler
		insertChannelMgr   *insertChannelManager
		allocator          allocator
		cluster            *dataNodeCluster
		msgProducer        *timesync.MsgProducer
		registerFinishCh   chan struct{}
		masterClient       MasterClient
		ttMsgStream        msgstream.MsgStream
		k2sMsgStream       msgstream.MsgStream
		ddChannelName      string
		segmentInfoStream  msgstream.MsgStream
		segmentFlushStream msgstream.MsgStream
S
sunby 已提交
88 89 90
	}
)

S
sunby 已提交
91
func CreateServer(ctx context.Context) (*Server, error) {
92
	Params.Init()
S
sunby 已提交
93
	ch := make(chan struct{})
S
sunby 已提交
94
	s := &Server{
S
sunby 已提交
95 96
		ctx:              ctx,
		insertChannelMgr: newInsertChannelManager(),
S
sunby 已提交
97 98
		registerFinishCh: ch,
		cluster:          newDataNodeCluster(ch),
S
sunby 已提交
99
	}
100
	s.state.Store(internalpb2.StateCode_INITIALIZING)
S
sunby 已提交
101 102 103 104 105
	return s, nil
}

func (s *Server) SetMasterClient(masterClient MasterClient) {
	s.masterClient = masterClient
S
sunby 已提交
106 107 108
}

func (s *Server) Init() error {
S
sunby 已提交
109 110 111 112
	return nil
}

func (s *Server) Start() error {
113
	var err error
S
sunby 已提交
114
	s.allocator = newAllocatorImpl(s.masterClient)
115
	if err = s.initMeta(); err != nil {
S
sunby 已提交
116 117 118
		return err
	}
	s.statsHandler = newStatsHandler(s.meta)
119
	s.segAllocator, err = newSegmentAllocator(s.meta, s.allocator)
S
sunby 已提交
120 121 122
	if err != nil {
		return err
	}
123 124
	s.initSegmentInfoChannel()
	if err = s.initMsgProducer(); err != nil {
S
sunby 已提交
125 126
		return err
	}
127
	if err = s.loadMetaFromMaster(); err != nil {
S
sunby 已提交
128 129
		return err
	}
S
sunby 已提交
130
	s.startServerLoop()
131
	s.waitDataNodeRegister()
S
sunby 已提交
132
	s.state.Store(internalpb2.StateCode_HEALTHY)
S
sunby 已提交
133 134 135 136
	log.Println("start success")
	return nil
}

S
sunby 已提交
137 138 139 140
func (s *Server) checkStateIsHealthy() bool {
	return s.state.Load().(internalpb2.StateCode) == internalpb2.StateCode_HEALTHY
}

S
sunby 已提交
141 142 143 144 145 146 147
func (s *Server) initMeta() error {
	etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
	if err != nil {
		return err
	}
	etcdKV := etcdkv.NewEtcdKV(etcdClient, Params.MetaRootPath)
	s.client = etcdKV
N
neza2017 已提交
148
	s.meta, err = newMeta(etcdKV)
S
sunby 已提交
149 150 151 152 153 154
	if err != nil {
		return err
	}
	return nil
}

155 156 157 158 159 160
func (s *Server) initSegmentInfoChannel() {
	segmentInfoStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
	segmentInfoStream.SetPulsarClient(Params.PulsarAddress)
	segmentInfoStream.CreatePulsarProducers([]string{Params.SegmentInfoChannelName})
	s.segmentInfoStream = segmentInfoStream
	s.segmentInfoStream.Start()
S
sunby 已提交
161
}
S
sunby 已提交
162
func (s *Server) initMsgProducer() error {
163
	ttMsgStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
N
neza2017 已提交
164 165 166
	ttMsgStream.SetPulsarClient(Params.PulsarAddress)
	ttMsgStream.CreatePulsarConsumers([]string{Params.TimeTickChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
	s.ttMsgStream = ttMsgStream
S
sunby 已提交
167 168 169
	s.ttMsgStream.Start()
	timeTickBarrier := timesync.NewHardTimeTickBarrier(s.ttMsgStream, s.cluster.GetNodeIDs())
	dataNodeTTWatcher := newDataNodeTimeTickWatcher(s.meta, s.segAllocator, s.cluster)
170 171 172 173 174 175 176
	k2sStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
	k2sStream.SetPulsarClient(Params.PulsarAddress)
	k2sStream.CreatePulsarProducers(Params.K2SChannelNames)
	s.k2sMsgStream = k2sStream
	s.k2sMsgStream.Start()
	k2sMsgWatcher := timesync.NewMsgTimeTickWatcher(s.k2sMsgStream)
	producer, err := timesync.NewTimeSyncMsgProducer(timeTickBarrier, dataNodeTTWatcher, k2sMsgWatcher)
S
sunby 已提交
177 178 179 180 181 182 183
	if err != nil {
		return err
	}
	s.msgProducer = producer
	s.msgProducer.Start(s.ctx)
	return nil
}
S
sunby 已提交
184

S
sunby 已提交
185 186
func (s *Server) loadMetaFromMaster() error {
	log.Println("loading collection meta from master")
S
sunby 已提交
187 188 189
	if err := s.checkMasterIsHealthy(); err != nil {
		return err
	}
S
sunby 已提交
190 191 192 193 194
	collections, err := s.masterClient.ShowCollections(&milvuspb.ShowCollectionRequest{
		Base: &commonpb.MsgBase{
			MsgType:   commonpb.MsgType_kShowCollections,
			MsgID:     -1, // todo add msg id
			Timestamp: 0,  // todo
S
sunby 已提交
195
			SourceID:  Params.NodeID,
S
sunby 已提交
196 197 198 199 200 201 202 203 204 205 206 207
		},
		DbName: "",
	})
	if err != nil {
		return err
	}
	for _, collectionName := range collections.CollectionNames {
		collection, err := s.masterClient.DescribeCollection(&milvuspb.DescribeCollectionRequest{
			Base: &commonpb.MsgBase{
				MsgType:   commonpb.MsgType_kDescribeCollection,
				MsgID:     -1, // todo
				Timestamp: 0,  // todo
S
sunby 已提交
208
				SourceID:  Params.NodeID,
S
sunby 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221
			},
			DbName:         "",
			CollectionName: collectionName,
		})
		if err != nil {
			log.Println(err.Error())
			continue
		}
		partitions, err := s.masterClient.ShowPartitions(&milvuspb.ShowPartitionRequest{
			Base: &commonpb.MsgBase{
				MsgType:   commonpb.MsgType_kShowPartitions,
				MsgID:     -1, // todo
				Timestamp: 0,  // todo
S
sunby 已提交
222
				SourceID:  Params.NodeID,
S
sunby 已提交
223 224 225 226 227 228 229 230 231 232 233 234
			},
			DbName:         "",
			CollectionName: collectionName,
			CollectionID:   collection.CollectionID,
		})
		if err != nil {
			log.Println(err.Error())
			continue
		}
		err = s.meta.AddCollection(&collectionInfo{
			ID:         collection.CollectionID,
			Schema:     collection.Schema,
S
sunby 已提交
235
			Partitions: partitions.PartitionIDs,
S
sunby 已提交
236 237 238 239 240 241 242 243
		})
		if err != nil {
			log.Println(err.Error())
			continue
		}
	}
	log.Println("load collection meta from master complete")
	return nil
S
sunby 已提交
244
}
S
sunby 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274

func (s *Server) checkMasterIsHealthy() error {
	ticker := time.NewTicker(300 * time.Millisecond)
	ctx, cancel := context.WithTimeout(s.ctx, 30*time.Second)
	defer func() {
		ticker.Stop()
		cancel()
	}()
	for {
		var resp *internalpb2.ComponentStates
		var err error
		select {
		case <-ctx.Done():
			return fmt.Errorf("master is not healthy")
		case <-ticker.C:
			resp, err = s.masterClient.GetComponentStates()
			if err != nil {
				return err
			}
			if resp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
				return errors.New(resp.Status.Reason)
			}
		}
		if resp.State.StateCode == internalpb2.StateCode_HEALTHY {
			break
		}
	}
	return nil
}

275 276
func (s *Server) startServerLoop() {
	s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.ctx)
277
	s.serverLoopWg.Add(2)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	go s.startStatsChannel(s.serverLoopCtx)
	go s.startSegmentFlushChannel(s.serverLoopCtx)
}

func (s *Server) startStatsChannel(ctx context.Context) {
	defer s.serverLoopWg.Done()
	statsStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
	statsStream.SetPulsarClient(Params.PulsarAddress)
	statsStream.CreatePulsarConsumers([]string{Params.StatisticsChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
	statsStream.Start()
	defer statsStream.Close()
	for {
		select {
		case <-ctx.Done():
			return
		default:
		}
		msgPack := statsStream.Consume()
		for _, msg := range msgPack.Msgs {
			statistics := msg.(*msgstream.SegmentStatisticsMsg)
			for _, stat := range statistics.SegStats {
				if err := s.statsHandler.HandleSegmentStat(stat); err != nil {
					log.Println(err.Error())
					continue
				}
			}
		}
	}
}

func (s *Server) startSegmentFlushChannel(ctx context.Context) {
	defer s.serverLoopWg.Done()
	flushStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
	flushStream.SetPulsarClient(Params.PulsarAddress)
	flushStream.CreatePulsarConsumers([]string{Params.SegmentInfoChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
	flushStream.Start()
	defer flushStream.Close()
	for {
		select {
		case <-ctx.Done():
			log.Println("segment flush channel shut down")
			return
		default:
		}
		msgPack := flushStream.Consume()
		for _, msg := range msgPack.Msgs {
			if msg.Type() != commonpb.MsgType_kSegmentFlushDone {
				continue
			}
			realMsg := msg.(*msgstream.FlushCompletedMsg)

			segmentInfo, err := s.meta.GetSegment(realMsg.SegmentID)
			if err != nil {
				log.Println(err.Error())
				continue
			}
			segmentInfo.FlushedTime = realMsg.BeginTimestamp
			if err = s.meta.UpdateSegment(segmentInfo); err != nil {
				log.Println(err.Error())
				continue
			}
		}
	}
}

func (s *Server) waitDataNodeRegister() {
	log.Println("waiting data node to register")
	<-s.registerFinishCh
	log.Println("all data nodes register")
}
S
sunby 已提交
348 349

func (s *Server) Stop() error {
S
sunby 已提交
350
	s.ttMsgStream.Close()
351
	s.k2sMsgStream.Close()
S
sunby 已提交
352
	s.msgProducer.Close()
N
neza2017 已提交
353
	s.segmentInfoStream.Close()
S
sunby 已提交
354
	s.stopServerLoop()
S
sunby 已提交
355 356 357
	return nil
}

S
sunby 已提交
358 359 360 361 362
func (s *Server) stopServerLoop() {
	s.serverLoopCancel()
	s.serverLoopWg.Wait()
}

S
sunby 已提交
363
func (s *Server) GetComponentStates() (*internalpb2.ComponentStates, error) {
S
sunby 已提交
364 365 366 367
	resp := &internalpb2.ComponentStates{
		State: &internalpb2.ComponentInfo{
			NodeID:    Params.NodeID,
			Role:      role,
S
sunby 已提交
368
			StateCode: s.state.Load().(internalpb2.StateCode),
S
sunby 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381
		},
		Status: &commonpb.Status{
			ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
		},
	}
	dataNodeStates, err := s.cluster.GetDataNodeStates()
	if err != nil {
		resp.Status.Reason = err.Error()
		return resp, nil
	}
	resp.SubcomponentStates = dataNodeStates
	resp.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
	return resp, nil
S
sunby 已提交
382 383
}

N
neza2017 已提交
384 385
func (s *Server) GetTimeTickChannel() (string, error) {
	return Params.TimeTickChannelName, nil
S
sunby 已提交
386 387
}

N
neza2017 已提交
388 389
func (s *Server) GetStatisticsChannel() (string, error) {
	return Params.StatisticsChannelName, nil
S
sunby 已提交
390 391 392
}

func (s *Server) RegisterNode(req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error) {
S
sunby 已提交
393
	ret := &datapb.RegisterNodeResponse{
S
sunby 已提交
394
		Status: &commonpb.Status{
S
sunby 已提交
395
			ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
S
sunby 已提交
396
		},
S
sunby 已提交
397 398
	}
	s.cluster.Register(req.Address.Ip, req.Address.Port, req.Base.SourceID)
N
neza2017 已提交
399
	if s.ddChannelName == "" {
N
neza2017 已提交
400
		resp, err := s.masterClient.GetDdChannel()
S
sunby 已提交
401 402 403 404
		if err != nil {
			ret.Status.Reason = err.Error()
			return ret, err
		}
N
neza2017 已提交
405
		s.ddChannelName = resp
S
sunby 已提交
406 407 408 409 410 411 412 413
	}
	ret.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
	ret.InitParams = &internalpb2.InitParams{
		NodeID: Params.NodeID,
		StartParams: []*commonpb.KeyValuePair{
			{Key: "DDChannelName", Value: s.ddChannelName},
			{Key: "SegmentStatisticsChannelName", Value: Params.StatisticsChannelName},
			{Key: "TimeTickChannelName", Value: Params.TimeTickChannelName},
N
neza2017 已提交
414
			{Key: "CompleteFlushChannelName", Value: Params.SegmentInfoChannelName},
S
sunby 已提交
415 416 417
		},
	}
	return ret, nil
S
sunby 已提交
418 419 420
}

func (s *Server) Flush(req *datapb.FlushRequest) (*commonpb.Status, error) {
S
sunby 已提交
421 422 423 424 425 426
	if !s.checkStateIsHealthy() {
		return &commonpb.Status{
			ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
			Reason:    "server is initializing",
		}, nil
	}
N
neza2017 已提交
427
	s.segAllocator.SealAllSegments(req.CollectionID)
S
sunby 已提交
428 429 430
	return &commonpb.Status{
		ErrorCode: commonpb.ErrorCode_SUCCESS,
	}, nil
S
sunby 已提交
431 432 433 434 435 436 437 438 439
}

func (s *Server) AssignSegmentID(req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
	resp := &datapb.AssignSegIDResponse{
		Status: &commonpb.Status{
			ErrorCode: commonpb.ErrorCode_SUCCESS,
		},
		SegIDAssignments: make([]*datapb.SegIDAssignment, 0),
	}
S
sunby 已提交
440 441 442 443 444
	if !s.checkStateIsHealthy() {
		resp.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
		resp.Status.Reason = "server is initializing"
		return resp, nil
	}
S
sunby 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	for _, r := range req.SegIDRequests {
		result := &datapb.SegIDAssignment{
			Status: &commonpb.Status{
				ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
			},
		}
		segmentID, retCount, expireTs, err := s.segAllocator.AllocSegment(r.CollectionID, r.PartitionID, r.ChannelName, int(r.Count))
		if err != nil {
			if _, ok := err.(errRemainInSufficient); !ok {
				result.Status.Reason = fmt.Sprintf("allocation of Collection %d, Partition %d, Channel %s, Count %d error:  %s",
					r.CollectionID, r.PartitionID, r.ChannelName, r.Count, err.Error())
				resp.SegIDAssignments = append(resp.SegIDAssignments, result)
				continue
			}

			log.Printf("no enough space for allocation of Collection %d, Partition %d, Channel %s, Count %d",
				r.CollectionID, r.PartitionID, r.ChannelName, r.Count)
			if err = s.openNewSegment(r.CollectionID, r.PartitionID, r.ChannelName); err != nil {
				result.Status.Reason = fmt.Sprintf("open new segment of Collection %d, Partition %d, Channel %s, Count %d error:  %s",
					r.CollectionID, r.PartitionID, r.ChannelName, r.Count, err.Error())
				resp.SegIDAssignments = append(resp.SegIDAssignments, result)
				continue
			}

			segmentID, retCount, expireTs, err = s.segAllocator.AllocSegment(r.CollectionID, r.PartitionID, r.ChannelName, int(r.Count))
			if err != nil {
				result.Status.Reason = fmt.Sprintf("retry allocation of Collection %d, Partition %d, Channel %s, Count %d error:  %s",
					r.CollectionID, r.PartitionID, r.ChannelName, r.Count, err.Error())
				resp.SegIDAssignments = append(resp.SegIDAssignments, result)
				continue
			}
		}

		result.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
		result.CollectionID = r.CollectionID
		result.SegID = segmentID
		result.PartitionID = r.PartitionID
		result.Count = uint32(retCount)
		result.ExpireTime = expireTs
		result.ChannelName = r.ChannelName
		resp.SegIDAssignments = append(resp.SegIDAssignments, result)
	}
	return resp, nil
}

func (s *Server) openNewSegment(collectionID UniqueID, partitionID UniqueID, channelName string) error {
	group, err := s.insertChannelMgr.GetChannelGroup(collectionID, channelName)
	if err != nil {
		return err
	}
N
neza2017 已提交
495 496 497 498 499 500

	id, err := s.allocator.allocID()
	if err != nil {
		return err
	}
	segmentInfo, err := BuildSegment(collectionID, partitionID, id, group)
S
sunby 已提交
501 502 503 504 505 506
	if err != nil {
		return err
	}
	if err = s.meta.AddSegment(segmentInfo); err != nil {
		return err
	}
S
sunby 已提交
507
	if err = s.segAllocator.OpenSegment(segmentInfo); err != nil {
S
sunby 已提交
508 509
		return err
	}
510 511 512 513 514
	infoMsg := &msgstream.SegmentInfoMsg{
		SegmentMsg: datapb.SegmentMsg{
			Base: &commonpb.MsgBase{
				MsgType:   commonpb.MsgType_kSegmentInfo,
				MsgID:     0,
515 516
				Timestamp: 0, // todo
				SourceID:  0,
517 518 519 520 521 522 523 524 525 526
			},
			Segment: segmentInfo,
		},
	}
	msgPack := &pulsarms.MsgPack{
		Msgs: []msgstream.TsMsg{infoMsg},
	}
	if err = s.segmentInfoStream.Produce(msgPack); err != nil {
		return err
	}
S
sunby 已提交
527 528 529 530
	return nil
}

func (s *Server) ShowSegments(req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error) {
S
sunby 已提交
531 532 533 534 535 536 537 538
	if !s.checkStateIsHealthy() {
		return &datapb.ShowSegmentResponse{
			Status: &commonpb.Status{
				ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
				Reason:    "server is initializing",
			},
		}, nil
	}
S
sunby 已提交
539 540 541 542 543 544 545 546 547 548
	ids := s.meta.GetSegmentsByCollectionAndPartitionID(req.CollectionID, req.PartitionID)
	return &datapb.ShowSegmentResponse{SegmentIDs: ids}, nil
}

func (s *Server) GetSegmentStates(req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error) {
	resp := &datapb.SegmentStatesResponse{
		Status: &commonpb.Status{
			ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
		},
	}
S
sunby 已提交
549 550 551 552
	if !s.checkStateIsHealthy() {
		resp.Status.Reason = "server is initializing"
		return resp, nil
	}
S
sunby 已提交
553 554 555 556 557 558 559 560 561 562

	segmentInfo, err := s.meta.GetSegment(req.SegmentID)
	if err != nil {
		resp.Status.Reason = "get segment states error: " + err.Error()
		return resp, nil
	}
	resp.State = segmentInfo.State
	resp.CreateTime = segmentInfo.OpenTime
	resp.SealedTime = segmentInfo.SealedTime
	resp.FlushedTime = segmentInfo.FlushedTime
563 564
	resp.StartPositions = segmentInfo.StartPosition
	resp.EndPositions = segmentInfo.EndPosition
S
sunby 已提交
565 566 567 568
	return resp, nil
}

func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error) {
S
sunby 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	// todo
	resp := &datapb.InsertBinlogPathsResponse{
		Status: &commonpb.Status{
			ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
		},
	}
	p := path.Join(Params.SegmentFlushMetaPath, strconv.FormatInt(req.SegmentID, 10))
	value, err := s.client.Load(p)
	if err != nil {
		resp.Status.Reason = err.Error()
		return resp, nil
	}
	flushMeta := &datapb.SegmentFlushMeta{}
	err = proto.UnmarshalText(value, flushMeta)
	if err != nil {
		resp.Status.Reason = err.Error()
		return resp, nil
	}
	fields := make([]UniqueID, len(flushMeta.Fields))
	paths := make([]*internalpb2.StringList, len(flushMeta.Fields))
	for _, field := range flushMeta.Fields {
		fields = append(fields, field.FieldID)
		paths = append(paths, &internalpb2.StringList{Values: field.BinlogPaths})
	}
	resp.FieldIDs = fields
	resp.Paths = paths
	return resp, nil
S
sunby 已提交
596 597
}

N
neza2017 已提交
598
func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) ([]string, error) {
S
sunby 已提交
599 600 601
	if !s.checkStateIsHealthy() {
		return nil, errors.New("server is initializing")
	}
S
sunby 已提交
602 603
	contains, ret := s.insertChannelMgr.ContainsCollection(req.CollectionID)
	if contains {
N
neza2017 已提交
604
		return ret, nil
S
sunby 已提交
605
	}
S
sunby 已提交
606
	channelGroups, err := s.insertChannelMgr.AllocChannels(req.CollectionID, s.cluster.GetNumOfNodes())
S
sunby 已提交
607
	if err != nil {
N
neza2017 已提交
608
		return nil, err
S
sunby 已提交
609
	}
S
sunby 已提交
610

S
sunby 已提交
611 612
	channels := make([]string, Params.InsertChannelNumPerCollection)
	for _, group := range channelGroups {
S
sunby 已提交
613
		channels = append(channels, group...)
S
sunby 已提交
614
	}
S
sunby 已提交
615 616
	s.cluster.WatchInsertChannels(channelGroups)

N
neza2017 已提交
617
	return channels, nil
S
sunby 已提交
618 619 620 621 622 623 624 625 626 627
}

func (s *Server) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
	// todo implement
	return nil, nil
}

func (s *Server) GetPartitionStatistics(req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error) {
	// todo implement
	return nil, nil
S
sunby 已提交
628
}
N
neza2017 已提交
629 630 631 632

func (s *Server) GetSegmentInfoChannel() (string, error) {
	return Params.SegmentInfoChannelName, nil
}