task.go 26.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.

12 13 14 15 16 17 18
package querynode

import (
	"context"
	"errors"
	"fmt"
	"math/rand"
19
	"strconv"
20
	"time"
21 22 23

	"go.uber.org/zap"

X
Xiangyu Wang 已提交
24
	"github.com/milvus-io/milvus/internal/log"
25
	"github.com/milvus-io/milvus/internal/proto/datapb"
X
Xiangyu Wang 已提交
26 27
	"github.com/milvus-io/milvus/internal/proto/internalpb"
	queryPb "github.com/milvus-io/milvus/internal/proto/querypb"
28
	"github.com/milvus-io/milvus/internal/rootcoord"
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
)

type task interface {
	ID() UniqueID       // return ReqID
	SetID(uid UniqueID) // set ReqID
	Timestamp() Timestamp
	PreExecute(ctx context.Context) error
	Execute(ctx context.Context) error
	PostExecute(ctx context.Context) error
	WaitToFinish() error
	Notify(err error)
	OnEnqueue() error
}

type baseTask struct {
	done chan error
	ctx  context.Context
	id   UniqueID
}

49 50 51 52 53 54
type watchDmChannelsTask struct {
	baseTask
	req  *queryPb.WatchDmChannelsRequest
	node *QueryNode
}

55 56 57 58 59 60
type watchDeltaChannelsTask struct {
	baseTask
	req  *queryPb.WatchDeltaChannelsRequest
	node *QueryNode
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
type loadSegmentsTask struct {
	baseTask
	req  *queryPb.LoadSegmentsRequest
	node *QueryNode
}

type releaseCollectionTask struct {
	baseTask
	req  *queryPb.ReleaseCollectionRequest
	node *QueryNode
}

type releasePartitionsTask struct {
	baseTask
	req  *queryPb.ReleasePartitionsRequest
	node *QueryNode
}

func (b *baseTask) ID() UniqueID {
	return b.id
}

func (b *baseTask) SetID(uid UniqueID) {
	b.id = uid
}

func (b *baseTask) WaitToFinish() error {
88 89
	err := <-b.done
	return err
90 91 92 93 94 95
}

func (b *baseTask) Notify(err error) {
	b.done <- err
}

96 97 98
// watchDmChannelsTask
func (w *watchDmChannelsTask) Timestamp() Timestamp {
	if w.req.Base == nil {
B
bigsheeper 已提交
99
		log.Warn("nil base req in watchDmChannelsTask", zap.Any("collectionID", w.req.CollectionID))
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
		return 0
	}
	return w.req.Base.Timestamp
}

func (w *watchDmChannelsTask) OnEnqueue() error {
	if w.req == nil || w.req.Base == nil {
		w.SetID(rand.Int63n(100000000000))
	} else {
		w.SetID(w.req.Base.MsgID)
	}
	return nil
}

func (w *watchDmChannelsTask) PreExecute(ctx context.Context) error {
	return nil
}

func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
	collectionID := w.req.CollectionID
120
	partitionID := w.req.PartitionID
121
	// if no partitionID is specified, load type is load collection
122
	loadPartition := partitionID != 0
123

124 125 126
	// get all vChannels
	vChannels := make([]Channel, 0)
	pChannels := make([]Channel, 0)
127
	VPChannels := make(map[string]string) // map[vChannel]pChannel
128
	for _, info := range w.req.Infos {
129 130 131 132 133
		v := info.ChannelName
		p := rootcoord.ToPhysicalChannel(info.ChannelName)
		vChannels = append(vChannels, v)
		pChannels = append(pChannels, p)
		VPChannels[v] = p
134
	}
G
groot 已提交
135
	log.Debug("Starting WatchDmChannels ...",
136 137
		zap.Any("collectionName", w.req.Schema.Name),
		zap.Any("collectionID", collectionID),
138 139
		zap.Any("vChannels", vChannels),
		zap.Any("pChannels", pChannels),
140 141 142 143
	)
	if len(VPChannels) != len(vChannels) {
		return errors.New("get physical channels failed, illegal channel length, collectionID = " + fmt.Sprintln(collectionID))
	}
G
groot 已提交
144
	log.Debug("Get physical channels done",
145 146
		zap.Any("collectionID", collectionID),
	)
147 148 149 150 151 152 153 154

	// init replica
	if hasCollectionInStreaming := w.node.streaming.replica.hasCollection(collectionID); !hasCollectionInStreaming {
		err := w.node.streaming.replica.addCollection(collectionID, w.req.Schema)
		if err != nil {
			return err
		}
	}
155
	// init replica
156 157 158 159 160 161
	if hasCollectionInHistorical := w.node.historical.replica.hasCollection(collectionID); !hasCollectionInHistorical {
		err := w.node.historical.replica.addCollection(collectionID, w.req.Schema)
		if err != nil {
			return err
		}
	}
162 163 164 165 166 167 168 169 170 171 172 173 174
	var l loadType
	if loadPartition {
		l = loadTypePartition
	} else {
		l = loadTypeCollection
	}
	sCol, err := w.node.streaming.replica.getCollectionByID(collectionID)
	if err != nil {
		return err
	}
	sCol.addVChannels(vChannels)
	sCol.addPChannels(pChannels)
	sCol.setLoadType(l)
G
godchen 已提交
175 176 177 178 179 180 181
	hCol, err := w.node.historical.replica.getCollectionByID(collectionID)
	if err != nil {
		return err
	}
	hCol.addVChannels(vChannels)
	hCol.addPChannels(pChannels)
	hCol.setLoadType(l)
182
	if loadPartition {
183
		sCol.deleteReleasedPartition(partitionID)
G
godchen 已提交
184
		hCol.deleteReleasedPartition(partitionID)
185 186 187 188 189 190
		if hasPartitionInStreaming := w.node.streaming.replica.hasPartition(partitionID); !hasPartitionInStreaming {
			err := w.node.streaming.replica.addPartition(collectionID, partitionID)
			if err != nil {
				return err
			}
		}
G
godchen 已提交
191 192 193 194 195 196
		if hasPartitionInHistorical := w.node.historical.replica.hasPartition(partitionID); !hasPartitionInHistorical {
			err := w.node.historical.replica.addPartition(collectionID, partitionID)
			if err != nil {
				return err
			}
		}
197
	}
198
	log.Debug("watchDMChannel, init replica done", zap.Any("collectionID", collectionID))
199

200
	// get subscription name
201 202
	getUniqueSubName := func() string {
		prefixName := Params.MsgChannelSubName
203
		return prefixName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
204
	}
205
	consumeSubName := getUniqueSubName()
206

207 208
	// group channels by to seeking or consuming
	toSeekChannels := make([]*internalpb.MsgPosition, 0)
209
	toSubChannels := make([]Channel, 0)
210
	for _, info := range w.req.Infos {
211 212
		if info.SeekPosition == nil || len(info.SeekPosition.MsgID) == 0 {
			toSubChannels = append(toSubChannels, info.ChannelName)
213 214
			continue
		}
215 216
		info.SeekPosition.MsgGroup = consumeSubName
		toSeekChannels = append(toSeekChannels, info.SeekPosition)
217
	}
218
	log.Debug("watchDMChannel, group channels done", zap.Any("collectionID", collectionID))
219

220 221 222
	// add excluded segments for unFlushed segments,
	// unFlushed segments before check point should be filtered out.
	unFlushedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
223
	for _, info := range w.req.Infos {
224
		unFlushedCheckPointInfos = append(unFlushedCheckPointInfos, info.UnflushedSegments...)
225
	}
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	w.node.streaming.replica.addExcludedSegments(collectionID, unFlushedCheckPointInfos)
	log.Debug("watchDMChannel, add check points info for unFlushed segments done",
		zap.Any("collectionID", collectionID),
		zap.Any("unFlushedCheckPointInfos", unFlushedCheckPointInfos),
	)

	// add excluded segments for flushed segments,
	// flushed segments with later check point than seekPosition should be filtered out.
	flushedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
	for _, info := range w.req.Infos {
		for _, flushedSegment := range info.FlushedSegments {
			for _, position := range toSeekChannels {
				if flushedSegment.DmlPosition != nil &&
					flushedSegment.DmlPosition.ChannelName == position.ChannelName &&
					flushedSegment.DmlPosition.Timestamp > position.Timestamp {
					flushedCheckPointInfos = append(flushedCheckPointInfos, flushedSegment)
				}
			}
		}
	}
	w.node.streaming.replica.addExcludedSegments(collectionID, flushedCheckPointInfos)
	log.Debug("watchDMChannel, add check points info for flushed segments done",
		zap.Any("collectionID", collectionID),
		zap.Any("flushedCheckPointInfos", flushedCheckPointInfos),
	)
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	// add excluded segments for dropped segments,
	// dropped segments with later check point than seekPosition should be filtered out.
	droppedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
	for _, info := range w.req.Infos {
		for _, droppedSegment := range info.DroppedSegments {
			for _, position := range toSeekChannels {
				if droppedSegment != nil &&
					droppedSegment.DmlPosition.ChannelName == position.ChannelName &&
					droppedSegment.DmlPosition.Timestamp > position.Timestamp {
					droppedCheckPointInfos = append(droppedCheckPointInfos, droppedSegment)
				}
			}
		}
	}
	w.node.streaming.replica.addExcludedSegments(collectionID, droppedCheckPointInfos)
	log.Debug("watchDMChannel, add check points info for dropped segments done",
		zap.Any("collectionID", collectionID),
		zap.Any("droppedCheckPointInfos", droppedCheckPointInfos),
	)

272
	// create tSafe
273
	for _, channel := range vChannels {
274
		w.node.tSafeReplica.addTSafe(channel)
275 276 277 278
	}

	// add flow graph
	if loadPartition {
279
		w.node.dataSyncService.addPartitionFlowGraph(collectionID, partitionID, vChannels)
G
groot 已提交
280
		log.Debug("Query node add partition flow graphs", zap.Any("channels", vChannels))
281
	} else {
282
		w.node.dataSyncService.addCollectionFlowGraph(collectionID, vChannels)
G
groot 已提交
283
		log.Debug("Query node add collection flow graphs", zap.Any("channels", vChannels))
284 285
	}

286 287 288 289 290 291 292 293 294 295 296 297
	// add tSafe watcher if queryCollection exists
	qc, err := w.node.queryService.getQueryCollection(collectionID)
	if err == nil {
		for _, channel := range vChannels {
			err = qc.addTSafeWatcher(channel)
			if err != nil {
				// tSafe have been exist, not error
				log.Warn(err.Error())
			}
		}
	}

298
	// channels as consumer
299
	var nodeFGs map[Channel]*queryNodeFlowGraph
300
	if loadPartition {
301
		nodeFGs, err = w.node.dataSyncService.getPartitionFlowGraphs(partitionID, vChannels)
302 303 304 305
		if err != nil {
			return err
		}
	} else {
306
		nodeFGs, err = w.node.dataSyncService.getCollectionFlowGraphs(collectionID, vChannels)
307 308 309
		if err != nil {
			return err
		}
310
	}
311
	for _, channel := range toSubChannels {
312 313
		for _, fg := range nodeFGs {
			if fg.channel == channel {
314 315
				// use pChannel to consume
				err := fg.consumerFlowGraph(VPChannels[channel], consumeSubName)
316 317
				if err != nil {
					errMsg := "msgStream consume error :" + err.Error()
B
bigsheeper 已提交
318
					log.Warn(errMsg)
319 320 321 322 323
					return errors.New(errMsg)
				}
			}
		}
	}
324 325 326
	log.Debug("as consumer channels",
		zap.Any("collectionID", collectionID),
		zap.Any("toSubChannels", toSubChannels))
327

328 329
	// seek channel
	for _, pos := range toSeekChannels {
330 331
		for _, fg := range nodeFGs {
			if fg.channel == pos.ChannelName {
332
				pos.MsgGroup = consumeSubName
333 334
				// use pChannel to seek
				pos.ChannelName = VPChannels[fg.channel]
335 336 337
				err := fg.seekQueryNodeFlowGraph(pos)
				if err != nil {
					errMsg := "msgStream seek error :" + err.Error()
B
bigsheeper 已提交
338
					log.Warn(errMsg)
339 340 341
					return errors.New(errMsg)
				}
			}
342 343
		}
	}
G
groot 已提交
344
	log.Debug("Seek all channel done",
345 346
		zap.Any("collectionID", collectionID),
		zap.Any("toSeekChannels", toSeekChannels))
347

348 349 350 351 352
	// load growing segments
	unFlushedSegments := make([]*queryPb.SegmentLoadInfo, 0)
	unFlushedSegmentIDs := make([]UniqueID, 0)
	for _, info := range w.req.Infos {
		for _, ufInfo := range info.UnflushedSegments {
353 354 355 356 357 358 359 360 361 362 363 364 365
			// unFlushed segment may not have binLogs, skip loading
			if len(ufInfo.Binlogs) > 0 {
				unFlushedSegments = append(unFlushedSegments, &queryPb.SegmentLoadInfo{
					SegmentID:    ufInfo.ID,
					PartitionID:  ufInfo.PartitionID,
					CollectionID: ufInfo.CollectionID,
					BinlogPaths:  ufInfo.Binlogs,
					NumOfRows:    ufInfo.NumOfRows,
					Statslogs:    ufInfo.Statslogs,
					Deltalogs:    ufInfo.Deltalogs,
				})
				unFlushedSegmentIDs = append(unFlushedSegmentIDs, ufInfo.ID)
			}
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
		}
	}
	req := &queryPb.LoadSegmentsRequest{
		Infos:        unFlushedSegments,
		CollectionID: collectionID,
		Schema:       w.req.Schema,
	}
	log.Debug("loading growing segments in WatchDmChannels...",
		zap.Any("collectionID", collectionID),
		zap.Any("unFlushedSegmentIDs", unFlushedSegmentIDs),
	)
	err = w.node.loader.loadSegment(req, segmentTypeGrowing)
	if err != nil {
		return err
	}
	log.Debug("load growing segments done in WatchDmChannels",
		zap.Any("collectionID", collectionID),
		zap.Any("unFlushedSegmentIDs", unFlushedSegmentIDs),
	)

386 387
	// start flow graphs
	if loadPartition {
388
		err = w.node.dataSyncService.startPartitionFlowGraph(partitionID, vChannels)
389 390 391 392
		if err != nil {
			return err
		}
	} else {
393
		err = w.node.dataSyncService.startCollectionFlowGraph(collectionID, vChannels)
394 395 396
		if err != nil {
			return err
		}
397
	}
398

399
	log.Debug("WatchDmChannels done", zap.String("ChannelIDs", fmt.Sprintln(vChannels)))
400 401 402 403 404 405 406
	return nil
}

func (w *watchDmChannelsTask) PostExecute(ctx context.Context) error {
	return nil
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
// watchDeltaChannelsTask
func (w *watchDeltaChannelsTask) Timestamp() Timestamp {
	if w.req.Base == nil {
		log.Warn("nil base req in watchDeltaChannelsTask", zap.Any("collectionID", w.req.CollectionID))
		return 0
	}
	return w.req.Base.Timestamp
}

func (w *watchDeltaChannelsTask) OnEnqueue() error {
	if w.req == nil || w.req.Base == nil {
		w.SetID(rand.Int63n(100000000000))
	} else {
		w.SetID(w.req.Base.MsgID)
	}
	return nil
}

func (w *watchDeltaChannelsTask) PreExecute(ctx context.Context) error {
	return nil
}

func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
	collectionID := w.req.CollectionID

	// get all vChannels
	vDeltaChannels := make([]Channel, 0)
	pDeltaChannels := make([]Channel, 0)
	VPDeltaChannels := make(map[string]string) // map[vChannel]pChannel
	for _, info := range w.req.Infos {
		v := info.ChannelName
		p := rootcoord.ToPhysicalChannel(info.ChannelName)
		vDeltaChannels = append(vDeltaChannels, v)
		pDeltaChannels = append(pDeltaChannels, p)
		VPDeltaChannels[v] = p
	}
	log.Debug("Starting WatchDeltaChannels ...",
		zap.Any("collectionID", collectionID),
		zap.Any("vDeltaChannels", vDeltaChannels),
		zap.Any("pChannels", pDeltaChannels),
	)
	if len(VPDeltaChannels) != len(vDeltaChannels) {
		return errors.New("get physical channels failed, illegal channel length, collectionID = " + fmt.Sprintln(collectionID))
	}
	log.Debug("Get physical channels done",
		zap.Any("collectionID", collectionID),
	)

	if hasCollectionInHistorical := w.node.historical.replica.hasCollection(collectionID); !hasCollectionInHistorical {
		return fmt.Errorf("cannot find collection with collectionID, %d", collectionID)
	}
	hCol, err := w.node.historical.replica.getCollectionByID(collectionID)
	if err != nil {
		return err
	}
G
godchen 已提交
462 463 464 465 466 467 468 469 470 471

	// Check if the same deltaChannel has been watched
	for _, dstChan := range vDeltaChannels {
		for _, srcChan := range hCol.vDeltaChannels {
			if dstChan == srcChan {
				return nil
			}
		}
	}

472 473 474
	hCol.addVDeltaChannels(vDeltaChannels)
	hCol.addPDeltaChannels(pDeltaChannels)

G
godchen 已提交
475 476 477 478 479 480 481 482 483 484
	if hasCollectionInStreaming := w.node.streaming.replica.hasCollection(collectionID); !hasCollectionInStreaming {
		return fmt.Errorf("cannot find collection with collectionID, %d", collectionID)
	}
	sCol, err := w.node.streaming.replica.getCollectionByID(collectionID)
	if err != nil {
		return err
	}
	sCol.addVDeltaChannels(vDeltaChannels)
	sCol.addPDeltaChannels(pDeltaChannels)

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	// get subscription name
	getUniqueSubName := func() string {
		prefixName := Params.MsgChannelSubName
		return prefixName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
	}
	consumeSubName := getUniqueSubName()

	// group channels by to seeking or consuming
	toSubChannels := make([]Channel, 0)
	for _, info := range w.req.Infos {
		toSubChannels = append(toSubChannels, info.ChannelName)
	}
	log.Debug("watchDeltaChannel, group channels done", zap.Any("collectionID", collectionID))

	// create tSafe
	for _, channel := range vDeltaChannels {
		w.node.tSafeReplica.addTSafe(channel)
	}

	w.node.dataSyncService.addCollectionDeltaFlowGraph(collectionID, vDeltaChannels)

	// add tSafe watcher if queryCollection exists
	qc, err := w.node.queryService.getQueryCollection(collectionID)
	if err == nil {
		for _, channel := range vDeltaChannels {
			err = qc.addTSafeWatcher(channel)
			if err != nil {
				// tSafe have been exist, not error
				log.Warn(err.Error())
			}
		}
	}

	// channels as consumer
	var nodeFGs map[Channel]*queryNodeFlowGraph
	nodeFGs, err = w.node.dataSyncService.getCollectionDeltaFlowGraphs(collectionID, vDeltaChannels)
	if err != nil {
		return err
	}
	for _, channel := range toSubChannels {
		for _, fg := range nodeFGs {
			if fg.channel == channel {
				// use pChannel to consume
G
godchen 已提交
528
				err := fg.consumerFlowGraphLatest(VPDeltaChannels[channel], consumeSubName)
529 530 531 532 533 534 535 536 537 538 539 540
				if err != nil {
					errMsg := "msgStream consume error :" + err.Error()
					log.Warn(errMsg)
					return errors.New(errMsg)
				}
			}
		}
	}
	log.Debug("as consumer channels",
		zap.Any("collectionID", collectionID),
		zap.Any("toSubChannels", toSubChannels))

G
godchen 已提交
541 542 543
	for _, info := range w.req.Infos {
		w.node.loader.FromDmlCPLoadDelete(w.ctx, collectionID, info.SeekPosition)
	}
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558

	// start flow graphs
	err = w.node.dataSyncService.startCollectionDeltaFlowGraph(collectionID, vDeltaChannels)
	if err != nil {
		return err
	}

	log.Debug("WatchDeltaChannels done", zap.String("ChannelIDs", fmt.Sprintln(vDeltaChannels)))
	return nil
}

func (w *watchDeltaChannelsTask) PostExecute(ctx context.Context) error {
	return nil
}

559 560
// loadSegmentsTask
func (l *loadSegmentsTask) Timestamp() Timestamp {
561
	if l.req.Base == nil {
B
bigsheeper 已提交
562
		log.Warn("nil base req in loadSegmentsTask")
563 564
		return 0
	}
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
	return l.req.Base.Timestamp
}

func (l *loadSegmentsTask) OnEnqueue() error {
	if l.req == nil || l.req.Base == nil {
		l.SetID(rand.Int63n(100000000000))
	} else {
		l.SetID(l.req.Base.MsgID)
	}
	return nil
}

func (l *loadSegmentsTask) PreExecute(ctx context.Context) error {
	return nil
}

func (l *loadSegmentsTask) Execute(ctx context.Context) error {
	// TODO: support db
G
groot 已提交
583
	log.Debug("Query node load segment", zap.String("loadSegmentRequest", fmt.Sprintln(l.req)))
584 585
	var err error

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
	// init meta
	for _, info := range l.req.Infos {
		collectionID := info.CollectionID
		partitionID := info.PartitionID
		hasCollectionInHistorical := l.node.historical.replica.hasCollection(collectionID)
		hasPartitionInHistorical := l.node.historical.replica.hasPartition(partitionID)
		if !hasCollectionInHistorical {
			err = l.node.historical.replica.addCollection(collectionID, l.req.Schema)
			if err != nil {
				return err
			}
		}
		if !hasPartitionInHistorical {
			err = l.node.historical.replica.addPartition(collectionID, partitionID)
			if err != nil {
				return err
			}
		}
		hasCollectionInStreaming := l.node.streaming.replica.hasCollection(collectionID)
		hasPartitionInStreaming := l.node.streaming.replica.hasPartition(partitionID)
		if !hasCollectionInStreaming {
			err = l.node.streaming.replica.addCollection(collectionID, l.req.Schema)
			if err != nil {
				return err
			}
		}
		if !hasPartitionInStreaming {
			err = l.node.streaming.replica.addPartition(collectionID, partitionID)
			if err != nil {
				return err
			}
		}
	}

620
	err = l.node.loader.loadSegment(l.req, segmentTypeSealed)
621
	if err != nil {
B
bigsheeper 已提交
622
		log.Warn(err.Error())
623 624 625
		return err
	}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
	for _, info := range l.req.Infos {
		collectionID := info.CollectionID
		partitionID := info.PartitionID
		sCol, err := l.node.streaming.replica.getCollectionByID(collectionID)
		if err != nil {
			return err
		}
		sCol.deleteReleasedPartition(partitionID)
		hCol, err := l.node.historical.replica.getCollectionByID(collectionID)
		if err != nil {
			return err
		}
		hCol.deleteReleasedPartition(partitionID)
	}

641
	log.Debug("LoadSegments done", zap.String("SegmentLoadInfos", fmt.Sprintln(l.req.Infos)))
642 643 644 645 646 647 648 649 650
	return nil
}

func (l *loadSegmentsTask) PostExecute(ctx context.Context) error {
	return nil
}

// releaseCollectionTask
func (r *releaseCollectionTask) Timestamp() Timestamp {
651
	if r.req.Base == nil {
B
bigsheeper 已提交
652
		log.Warn("nil base req in releaseCollectionTask", zap.Any("collectionID", r.req.CollectionID))
653 654
		return 0
	}
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	return r.req.Base.Timestamp
}

func (r *releaseCollectionTask) OnEnqueue() error {
	if r.req == nil || r.req.Base == nil {
		r.SetID(rand.Int63n(100000000000))
	} else {
		r.SetID(r.req.Base.MsgID)
	}
	return nil
}

func (r *releaseCollectionTask) PreExecute(ctx context.Context) error {
	return nil
}

671 672 673 674 675 676 677 678
type ReplicaType int

const (
	replicaNone ReplicaType = iota
	replicaStreaming
	replicaHistorical
)

679
func (r *releaseCollectionTask) Execute(ctx context.Context) error {
G
groot 已提交
680
	log.Debug("Execute release collection task", zap.Any("collectionID", r.req.CollectionID))
B
bigsheeper 已提交
681
	errMsg := "release collection failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
G
godchen 已提交
682
	log.Debug("release streaming", zap.Any("collectionID", r.req.CollectionID))
683
	err := r.releaseReplica(r.node.streaming.replica, replicaStreaming)
684
	if err != nil {
685 686 687 688
		return errors.New(errMsg + err.Error())
	}

	// remove collection metas in streaming and historical
G
godchen 已提交
689
	log.Debug("release historical", zap.Any("collectionID", r.req.CollectionID))
690 691 692
	err = r.releaseReplica(r.node.historical.replica, replicaHistorical)
	if err != nil {
		return errors.New(errMsg + err.Error())
693
	}
694 695 696
	r.node.historical.removeGlobalSegmentIDsByCollectionID(r.req.CollectionID)
	// remove query collection
	r.node.queryService.stopQueryCollection(r.req.CollectionID)
B
bigsheeper 已提交
697

698 699 700 701 702 703 704 705 706
	log.Debug("ReleaseCollection done", zap.Int64("collectionID", r.req.CollectionID))
	return nil
}

func (r *releaseCollectionTask) releaseReplica(replica ReplicaInterface, replicaType ReplicaType) error {
	collection, err := replica.getCollectionByID(r.req.CollectionID)
	if err != nil {
		return err
	}
B
bigsheeper 已提交
707
	// set release time
G
godchen 已提交
708
	log.Debug("set release time", zap.Any("collectionID", r.req.CollectionID))
709 710
	collection.setReleaseTime(r.req.Base.Timestamp)

B
bigsheeper 已提交
711
	// sleep to wait for query tasks done
712
	const gracefulReleaseTime = 1
B
bigsheeper 已提交
713
	time.Sleep(gracefulReleaseTime * time.Second)
G
groot 已提交
714
	log.Debug("Starting release collection...",
B
bigsheeper 已提交
715 716
		zap.Any("collectionID", r.req.CollectionID),
	)
717 718 719 720
	if replicaType == replicaStreaming {
		r.node.dataSyncService.removeCollectionFlowGraph(r.req.CollectionID)
		// remove partition flow graphs which partitions belong to the target collection
		partitionIDs, err := replica.getPartitionIDs(r.req.CollectionID)
B
bigsheeper 已提交
721 722
		if err != nil {
			return err
723
		}
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
		for _, partitionID := range partitionIDs {
			r.node.dataSyncService.removePartitionFlowGraph(partitionID)
		}
		// remove all tSafes of the target collection
		for _, channel := range collection.getVChannels() {
			log.Debug("Releasing tSafe in releaseCollectionTask...",
				zap.Any("collectionID", r.req.CollectionID),
				zap.Any("vChannel", channel),
			)
			// no tSafe in tSafeReplica, don't return error
			err = r.node.tSafeReplica.removeTSafe(channel)
			if err != nil {
				log.Warn(err.Error())
			}
		}
	} else {
		r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
		// remove all tSafes of the target collection
		for _, channel := range collection.getVDeltaChannels() {
			log.Debug("Releasing tSafe in releaseCollectionTask...",
				zap.Any("collectionID", r.req.CollectionID),
				zap.Any("vDeltaChannel", channel),
			)
			// no tSafe in tSafeReplica, don't return error
			err = r.node.tSafeReplica.removeTSafe(channel)
			if err != nil {
				log.Warn(err.Error())
			}
B
bigsheeper 已提交
752 753
		}
	}
754

755 756 757 758 759 760
	// remove excludedSegments record
	replica.removeExcludedSegments(r.req.CollectionID)
	err = replica.removeCollection(r.req.CollectionID)
	if err != nil {
		return err
	}
761 762 763 764 765 766 767 768 769
	return nil
}

func (r *releaseCollectionTask) PostExecute(ctx context.Context) error {
	return nil
}

// releasePartitionsTask
func (r *releasePartitionsTask) Timestamp() Timestamp {
770
	if r.req.Base == nil {
B
bigsheeper 已提交
771
		log.Warn("nil base req in releasePartitionsTask", zap.Any("collectionID", r.req.CollectionID))
772 773
		return 0
	}
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
	return r.req.Base.Timestamp
}

func (r *releasePartitionsTask) OnEnqueue() error {
	if r.req == nil || r.req.Base == nil {
		r.SetID(rand.Int63n(100000000000))
	} else {
		r.SetID(r.req.Base.MsgID)
	}
	return nil
}

func (r *releasePartitionsTask) PreExecute(ctx context.Context) error {
	return nil
}

func (r *releasePartitionsTask) Execute(ctx context.Context) error {
G
groot 已提交
791
	log.Debug("Execute release partition task",
792 793
		zap.Any("collectionID", r.req.CollectionID),
		zap.Any("partitionIDs", r.req.PartitionIDs))
B
bigsheeper 已提交
794
	errMsg := "release partitions failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
795

B
bigsheeper 已提交
796
	// sleep to wait for query tasks done
B
bigsheeper 已提交
797
	const gracefulReleaseTime = 1
B
bigsheeper 已提交
798
	time.Sleep(gracefulReleaseTime * time.Second)
799

B
bigsheeper 已提交
800 801 802 803 804 805 806 807 808
	// get collection from streaming and historical
	hCol, err := r.node.historical.replica.getCollectionByID(r.req.CollectionID)
	if err != nil {
		return err
	}
	sCol, err := r.node.streaming.replica.getCollectionByID(r.req.CollectionID)
	if err != nil {
		return err
	}
G
godchen 已提交
809
	log.Debug("start release partition", zap.Any("collectionID", r.req.CollectionID))
810

B
bigsheeper 已提交
811 812 813
	// release partitions
	vChannels := sCol.getVChannels()
	for _, id := range r.req.PartitionIDs {
814
		if _, err := r.node.dataSyncService.getPartitionFlowGraphs(id, vChannels); err == nil {
815
			r.node.dataSyncService.removePartitionFlowGraph(id)
B
bigsheeper 已提交
816 817
			// remove all tSafes of the target partition
			for _, channel := range vChannels {
G
groot 已提交
818
				log.Debug("Releasing tSafe in releasePartitionTask...",
B
bigsheeper 已提交
819 820 821 822
					zap.Any("collectionID", r.req.CollectionID),
					zap.Any("partitionID", id),
					zap.Any("vChannel", channel),
				)
823
				// no tSafe in tSafeReplica, don't return error
824
				err = r.node.tSafeReplica.removeTSafe(channel)
825 826 827
				if err != nil {
					log.Warn(err.Error())
				}
828
			}
B
bigsheeper 已提交
829
		}
830

B
bigsheeper 已提交
831 832 833
		// remove partition from streaming and historical
		hasPartitionInHistorical := r.node.historical.replica.hasPartition(id)
		if hasPartitionInHistorical {
834
			err := r.node.historical.replica.removePartition(id)
B
bigsheeper 已提交
835 836 837
			if err != nil {
				// not return, try to release all partitions
				log.Warn(errMsg + err.Error())
838
			}
B
bigsheeper 已提交
839 840 841
		}
		hasPartitionInStreaming := r.node.streaming.replica.hasPartition(id)
		if hasPartitionInStreaming {
842
			err := r.node.streaming.replica.removePartition(id)
B
bigsheeper 已提交
843 844 845
			if err != nil {
				// not return, try to release all partitions
				log.Warn(errMsg + err.Error())
B
bigsheeper 已提交
846
			}
847
		}
B
bigsheeper 已提交
848

B
bigsheeper 已提交
849 850 851
		hCol.addReleasedPartition(id)
		sCol.addReleasedPartition(id)
	}
G
godchen 已提交
852 853 854 855
	pids, err := r.node.historical.replica.getPartitionIDs(r.req.CollectionID)
	if err != nil {
		return err
	}
G
godchen 已提交
856
	log.Debug("start release history pids", zap.Any("pids", pids), zap.Any("load type", hCol.getLoadType()))
G
godchen 已提交
857 858
	if len(pids) == 0 && hCol.getLoadType() == loadTypePartition {
		r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
G
godchen 已提交
859
		log.Debug("release delta channels", zap.Any("deltaChannels", hCol.getVDeltaChannels()))
G
godchen 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872
		vChannels := hCol.getVDeltaChannels()
		for _, channel := range vChannels {
			log.Debug("Releasing tSafe in releasePartitionTask...",
				zap.Any("collectionID", r.req.CollectionID),
				zap.Any("vChannel", channel),
			)
			// no tSafe in tSafeReplica, don't return error
			err = r.node.tSafeReplica.removeTSafe(channel)
			if err != nil {
				log.Warn(err.Error())
			}
		}
	}
873

B
bigsheeper 已提交
874 875
	// release global segment info
	r.node.historical.removeGlobalSegmentIDsByPartitionIds(r.req.PartitionIDs)
876

G
groot 已提交
877
	log.Debug("Release partition task done",
878 879
		zap.Any("collectionID", r.req.CollectionID),
		zap.Any("partitionIDs", r.req.PartitionIDs))
880 881 882 883 884 885
	return nil
}

func (r *releasePartitionsTask) PostExecute(ctx context.Context) error {
	return nil
}