task.go 69.1 KB
Newer Older
1 2 3 4 5 6
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
7 8
// with the License. You may obtain a copy of the License at
//
9
//     http://www.apache.org/licenses/LICENSE-2.0
10
//
11 12 13 14 15
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
16

17
package querycoord
18 19 20

import (
	"context"
21
	"errors"
22
	"fmt"
23
	"sort"
24
	"sync"
25
	"time"
26

27
	"github.com/golang/protobuf/proto"
28 29
	"go.uber.org/zap"

X
Xiangyu Wang 已提交
30
	"github.com/milvus-io/milvus/internal/log"
31
	"github.com/milvus-io/milvus/internal/metrics"
X
Xiangyu Wang 已提交
32 33 34
	"github.com/milvus-io/milvus/internal/proto/commonpb"
	"github.com/milvus-io/milvus/internal/proto/datapb"
	"github.com/milvus-io/milvus/internal/proto/querypb"
35
	"github.com/milvus-io/milvus/internal/util/funcutil"
36
	"github.com/milvus-io/milvus/internal/util/timerecord"
37 38
)

39 40
const timeoutForRPC = 10 * time.Second

41
const (
42 43 44 45
	triggerTaskPrefix     = "queryCoord-triggerTask"
	activeTaskPrefix      = "queryCoord-activeTask"
	taskInfoPrefix        = "queryCoord-taskInfo"
	loadBalanceInfoPrefix = "queryCoord-loadBalanceInfo"
46 47
)

48
const (
49
	// MaxRetryNum is the maximum number of times that each task can be retried
50
	MaxRetryNum = 5
X
xige-16 已提交
51
	// MaxSendSizeToEtcd is the default limit size of etcd messages that can be sent and received
52 53 54
	// MaxSendSizeToEtcd = 2097152
	// Limit size of every loadSegmentReq to 200k
	MaxSendSizeToEtcd = 200000
55 56
)

57 58 59 60 61 62 63
type taskState int

const (
	taskUndo    taskState = 0
	taskDoing   taskState = 1
	taskDone    taskState = 3
	taskExpired taskState = 4
64
	taskFailed  taskState = 5
65 66
)

67
type task interface {
68 69 70 71 72 73 74
	traceCtx() context.Context
	getTaskID() UniqueID // return ReqId
	setTaskID(id UniqueID)
	msgBase() *commonpb.MsgBase
	msgType() commonpb.MsgType
	timestamp() Timestamp
	getTriggerCondition() querypb.TriggerCondition
75
	setTriggerCondition(trigger querypb.TriggerCondition)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	preExecute(ctx context.Context) error
	execute(ctx context.Context) error
	postExecute(ctx context.Context) error
	reschedule(ctx context.Context) ([]task, error)
	rollBack(ctx context.Context) []task
	waitToFinish() error
	notify(err error)
	taskPriority() querypb.TriggerCondition
	setParentTask(t task)
	getParentTask() task
	getChildTask() []task
	addChildTask(t task)
	removeChildTaskByID(taskID UniqueID)
	isValid() bool
	marshal() ([]byte, error)
	getState() taskState
	setState(state taskState)
	isRetryable() bool
	setResultInfo(err error)
	getResultInfo() *commonpb.Status
	updateTaskProcess()
97
	elapseSpan() time.Duration
98 99
}

100
type baseTask struct {
101
	condition
102 103 104 105 106 107 108
	ctx        context.Context
	cancel     context.CancelFunc
	result     *commonpb.Status
	resultMu   sync.RWMutex
	state      taskState
	stateMu    sync.RWMutex
	retryCount int
109
	retryMu    sync.RWMutex
110
	//sync.RWMutex
111 112 113

	taskID           UniqueID
	triggerCondition querypb.TriggerCondition
114
	triggerMu        sync.RWMutex
115 116
	parentTask       task
	childTasks       []task
117
	childTasksMu     sync.RWMutex
118 119

	timeRecorder *timerecord.TimeRecorder
120 121
}

122
func newBaseTask(ctx context.Context, triggerType querypb.TriggerCondition) *baseTask {
123
	childCtx, cancel := context.WithCancel(ctx)
124
	condition := newTaskCondition(childCtx)
125

126
	baseTask := &baseTask{
127 128
		ctx:              childCtx,
		cancel:           cancel,
129
		condition:        condition,
130 131 132 133
		state:            taskUndo,
		retryCount:       MaxRetryNum,
		triggerCondition: triggerType,
		childTasks:       []task{},
134
		timeRecorder:     timerecord.NewTimeRecorder("QueryCoordBaseTask"),
135 136 137
	}

	return baseTask
138 139
}

140
// getTaskID function returns the unique taskID of the trigger task
141
func (bt *baseTask) getTaskID() UniqueID {
142 143 144
	return bt.taskID
}

145
// setTaskID function sets the trigger task with a unique id, which is allocated by tso
146
func (bt *baseTask) setTaskID(id UniqueID) {
147
	bt.taskID = id
148 149
}

150
func (bt *baseTask) traceCtx() context.Context {
151 152 153
	return bt.ctx
}

154
func (bt *baseTask) getTriggerCondition() querypb.TriggerCondition {
155 156 157
	bt.triggerMu.RLock()
	defer bt.triggerMu.RUnlock()

158 159 160
	return bt.triggerCondition
}

161 162 163 164 165 166 167
func (bt *baseTask) setTriggerCondition(trigger querypb.TriggerCondition) {
	bt.triggerMu.Lock()
	defer bt.triggerMu.Unlock()

	bt.triggerCondition = trigger
}

168
func (bt *baseTask) taskPriority() querypb.TriggerCondition {
169 170 171
	return bt.triggerCondition
}

172
func (bt *baseTask) setParentTask(t task) {
173 174 175
	bt.parentTask = t
}

176
func (bt *baseTask) getParentTask() task {
177 178 179
	return bt.parentTask
}

180 181
// GetChildTask function returns all the child tasks of the trigger task
// Child task may be loadSegmentTask, watchDmChannelTask or watchQueryChannelTask
182
func (bt *baseTask) getChildTask() []task {
183 184 185
	bt.childTasksMu.RLock()
	defer bt.childTasksMu.RUnlock()

186 187 188
	return bt.childTasks
}

189
func (bt *baseTask) addChildTask(t task) {
190 191 192
	bt.childTasksMu.Lock()
	defer bt.childTasksMu.Unlock()

193 194 195
	bt.childTasks = append(bt.childTasks, t)
}

196
func (bt *baseTask) removeChildTaskByID(taskID UniqueID) {
197 198 199 200 201
	bt.childTasksMu.Lock()
	defer bt.childTasksMu.Unlock()

	result := make([]task, 0)
	for _, t := range bt.childTasks {
202
		if t.getTaskID() != taskID {
203 204 205 206
			result = append(result, t)
		}
	}
	bt.childTasks = result
207
	metrics.QueryCoordNumChildTasks.WithLabelValues().Dec()
208 209
}

210 211 212 213 214 215 216
func (bt *baseTask) clearChildTasks() {
	bt.childTasksMu.Lock()
	defer bt.childTasksMu.Unlock()

	bt.childTasks = []task{}
}

217
func (bt *baseTask) isValid() bool {
218 219 220
	return true
}

221
func (bt *baseTask) reschedule(ctx context.Context) ([]task, error) {
222 223 224
	return nil, nil
}

225
// State returns the state of task, such as taskUndo, taskDoing, taskDone, taskExpired, taskFailed
226
func (bt *baseTask) getState() taskState {
227 228
	bt.stateMu.RLock()
	defer bt.stateMu.RUnlock()
229 230 231
	return bt.state
}

232
func (bt *baseTask) setState(state taskState) {
233 234
	bt.stateMu.Lock()
	defer bt.stateMu.Unlock()
235 236 237
	bt.state = state
}

238
func (bt *baseTask) isRetryable() bool {
239 240
	bt.retryMu.RLock()
	defer bt.retryMu.RUnlock()
241 242 243
	return bt.retryCount > 0
}

244 245 246 247 248 249 250
func (bt *baseTask) reduceRetryCount() {
	bt.retryMu.Lock()
	defer bt.retryMu.Unlock()

	bt.retryCount--
}

251
func (bt *baseTask) setResultInfo(err error) {
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	bt.resultMu.Lock()
	defer bt.resultMu.Unlock()

	if bt.result == nil {
		bt.result = &commonpb.Status{}
	}
	if err == nil {
		bt.result.ErrorCode = commonpb.ErrorCode_Success
		bt.result.Reason = ""
		return
	}

	bt.result.ErrorCode = commonpb.ErrorCode_UnexpectedError
	bt.result.Reason = bt.result.Reason + ", " + err.Error()
}

268
func (bt *baseTask) getResultInfo() *commonpb.Status {
269 270 271 272 273
	bt.resultMu.RLock()
	defer bt.resultMu.RUnlock()
	return proto.Clone(bt.result).(*commonpb.Status)
}

274
func (bt *baseTask) updateTaskProcess() {
275 276 277
	// TODO::
}

278
func (bt *baseTask) rollBack(ctx context.Context) []task {
279 280 281 282
	//TODO::
	return nil
}

283 284 285 286
func (bt *baseTask) elapseSpan() time.Duration {
	return bt.timeRecorder.ElapseSpan()
}

287 288
type loadCollectionTask struct {
	*baseTask
289
	*querypb.LoadCollectionRequest
290 291 292
	broker  *globalMetaBroker
	cluster Cluster
	meta    Meta
293
	once    sync.Once
294 295
}

296
func (lct *loadCollectionTask) msgBase() *commonpb.MsgBase {
297 298 299
	return lct.Base
}

300
func (lct *loadCollectionTask) marshal() ([]byte, error) {
301
	return proto.Marshal(lct.LoadCollectionRequest)
302 303
}

304
func (lct *loadCollectionTask) msgType() commonpb.MsgType {
305 306 307
	return lct.Base.MsgType
}

308
func (lct *loadCollectionTask) timestamp() Timestamp {
309 310 311
	return lct.Base.Timestamp
}

312
func (lct *loadCollectionTask) updateTaskProcess() {
313
	collectionID := lct.CollectionID
314
	childTasks := lct.getChildTask()
315 316
	allDone := true
	for _, t := range childTasks {
317
		if t.getState() != taskDone {
318
			allDone = false
319
			break
320
		}
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

		// wait watchDeltaChannel and watchQueryChannel task done after loading segment
		nodeID := getDstNodeIDByTask(t)
		if t.msgType() == commonpb.MsgType_LoadSegments {
			if !lct.cluster.hasWatchedDeltaChannel(lct.ctx, nodeID, collectionID) ||
				!lct.cluster.hasWatchedQueryChannel(lct.ctx, nodeID, collectionID) {
				allDone = false
				break
			}
		}

		// wait watchQueryChannel task done after watch dmChannel
		if t.msgType() == commonpb.MsgType_WatchDmChannels {
			if !lct.cluster.hasWatchedQueryChannel(lct.ctx, nodeID, collectionID) {
				allDone = false
				break
			}
		}

340 341
	}
	if allDone {
342
		err := lct.meta.setLoadPercentage(collectionID, 0, 100, querypb.LoadType_LoadCollection)
343 344
		if err != nil {
			log.Error("loadCollectionTask: set load percentage to meta's collectionInfo", zap.Int64("collectionID", collectionID))
345
			lct.setResultInfo(err)
346
		}
347
		lct.once.Do(func() {
C
cai.zhang 已提交
348
			metrics.QueryCoordLoadCount.WithLabelValues(metrics.SuccessLabel).Inc()
349 350 351
			metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(lct.elapseSpan().Milliseconds()))
			metrics.QueryCoordNumChildTasks.WithLabelValues().Sub(float64(len(lct.getChildTask())))
		})
352 353 354
	}
}

355
func (lct *loadCollectionTask) preExecute(ctx context.Context) error {
356 357
	collectionID := lct.CollectionID
	schema := lct.Schema
358
	lct.setResultInfo(nil)
359
	log.Debug("start do loadCollectionTask",
360
		zap.Int64("msgID", lct.getTaskID()),
361 362
		zap.Int64("collectionID", collectionID),
		zap.Stringer("schema", schema))
G
godchen 已提交
363
	return nil
364 365
}

366
func (lct *loadCollectionTask) execute(ctx context.Context) error {
367
	defer lct.reduceRetryCount()
368 369
	collectionID := lct.CollectionID

370
	toLoadPartitionIDs, err := lct.broker.showPartitionIDs(ctx, collectionID)
371
	if err != nil {
372
		log.Error("loadCollectionTask: showPartition failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
373 374 375
		lct.setResultInfo(err)
		return err
	}
376
	log.Debug("loadCollectionTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toLoadPartitionIDs), zap.Int64("msgID", lct.Base.MsgID))
377

378 379 380 381 382 383 384
	var (
		loadSegmentReqs    = []*querypb.LoadSegmentsRequest{}
		watchDmChannelReqs = []*querypb.WatchDmChannelsRequest{}
		deltaChannelInfos  = []*datapb.VchannelInfo{}
		dmChannelInfos     = []*datapb.VchannelInfo{}
	)

385
	for _, partitionID := range toLoadPartitionIDs {
386
		vChannelInfos, binlogs, err := lct.broker.getRecoveryInfo(lct.ctx, collectionID, partitionID)
387
		if err != nil {
388
			log.Error("loadCollectionTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
389
			lct.setResultInfo(err)
390 391
			return err
		}
392

393 394
		for _, segmentBinlog := range binlogs {
			segmentLoadInfo := lct.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBinlog, true, lct.Schema)
395 396
			msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
			msgBase.MsgType = commonpb.MsgType_LoadSegments
397
			loadSegmentReq := &querypb.LoadSegmentsRequest{
X
xige-16 已提交
398 399 400 401
				Base:         msgBase,
				Infos:        []*querypb.SegmentLoadInfo{segmentLoadInfo},
				Schema:       lct.Schema,
				CollectionID: collectionID,
402 403 404 405 406
				LoadMeta: &querypb.LoadMetaInfo{
					LoadType:     querypb.LoadType_LoadCollection,
					CollectionID: collectionID,
					PartitionIDs: toLoadPartitionIDs,
				},
407 408 409
			}

			loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
410
		}
411

412 413
		for _, info := range vChannelInfos {
			deltaChannelInfo, err := generateWatchDeltaChannelInfo(info)
414
			if err != nil {
415 416
				log.Error("loadCollectionTask: generateWatchDeltaChannelInfo failed", zap.Int64("collectionID", collectionID), zap.String("channelName", info.ChannelName), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
				lct.setResultInfo(err)
417
				return err
418
			}
419 420
			deltaChannelInfos = append(deltaChannelInfos, deltaChannelInfo)
			dmChannelInfos = append(dmChannelInfos, info)
421
		}
G
godchen 已提交
422
	}
423
	mergedDeltaChannels := mergeWatchDeltaChannelInfo(deltaChannelInfos)
G
godchen 已提交
424
	// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
425
	err = lct.meta.setDeltaChannel(collectionID, mergedDeltaChannels)
G
godchen 已提交
426
	if err != nil {
427 428
		log.Error("loadCollectionTask: set delta channel info failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
		lct.setResultInfo(err)
G
godchen 已提交
429 430
		return err
	}
431

432 433 434 435 436 437 438 439 440 441 442 443
	//TODO:: queryNode receive dm message according partitionID cache
	//TODO:: queryNode add partitionID to cache if receive create partition message from dmChannel
	mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
	for _, info := range mergedDmChannel {
		msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
		msgBase.MsgType = commonpb.MsgType_WatchDmChannels
		watchRequest := &querypb.WatchDmChannelsRequest{
			Base:         msgBase,
			CollectionID: collectionID,
			//PartitionIDs: toLoadPartitionIDs,
			Infos:  []*datapb.VchannelInfo{info},
			Schema: lct.Schema,
444 445 446 447 448
			LoadMeta: &querypb.LoadMetaInfo{
				LoadType:     querypb.LoadType_LoadCollection,
				CollectionID: collectionID,
				PartitionIDs: toLoadPartitionIDs,
			},
449 450 451 452 453 454
		}

		watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
	}

	internalTasks, err := assignInternalTask(ctx, lct, lct.meta, lct.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, nil)
X
xige-16 已提交
455
	if err != nil {
456
		log.Error("loadCollectionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
457
		lct.setResultInfo(err)
X
xige-16 已提交
458 459
		return err
	}
460 461
	for _, internalTask := range internalTasks {
		lct.addChildTask(internalTask)
462 463
		log.Debug("loadCollectionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())), zap.Int64("msgID", lct.Base.MsgID))
	}
464
	metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
465 466
	log.Debug("loadCollectionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID))

467
	err = lct.meta.addCollection(collectionID, querypb.LoadType_LoadCollection, lct.Schema)
468 469 470 471 472 473 474 475 476 477
	if err != nil {
		log.Error("loadCollectionTask: add collection to meta failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
		lct.setResultInfo(err)
		return err
	}
	err = lct.meta.addPartitions(collectionID, toLoadPartitionIDs)
	if err != nil {
		log.Error("loadCollectionTask: add partitions to meta failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toLoadPartitionIDs), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
		lct.setResultInfo(err)
		return err
478
	}
479

480
	log.Debug("LoadCollection execute done",
481
		zap.Int64("msgID", lct.getTaskID()),
482
		zap.Int64("collectionID", collectionID))
483 484 485
	return nil
}

486
func (lct *loadCollectionTask) postExecute(ctx context.Context) error {
487
	collectionID := lct.CollectionID
488 489
	if lct.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		lct.clearChildTasks()
490
		err := lct.meta.releaseCollection(collectionID)
491
		if err != nil {
492 493
			log.Error("loadCollectionTask: occur error when release collection info from meta", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
			panic(err)
494 495
		}
	}
496

497
	log.Debug("loadCollectionTask postExecute done",
498
		zap.Int64("msgID", lct.getTaskID()),
499
		zap.Int64("collectionID", collectionID))
G
godchen 已提交
500
	return nil
501 502
}

503
func (lct *loadCollectionTask) rollBack(ctx context.Context) []task {
504
	onlineNodeIDs := lct.cluster.onlineNodeIDs()
505
	resultTasks := make([]task, 0)
506
	for _, nodeID := range onlineNodeIDs {
507
		//brute force rollBack, should optimize
508 509
		msgBase := proto.Clone(lct.Base).(*commonpb.MsgBase)
		msgBase.MsgType = commonpb.MsgType_ReleaseCollection
510
		req := &querypb.ReleaseCollectionRequest{
511
			Base:         msgBase,
512 513 514 515
			DbID:         lct.DbID,
			CollectionID: lct.CollectionID,
			NodeID:       nodeID,
		}
X
xige-16 已提交
516
		baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
517
		baseTask.setParentTask(lct)
518 519
		releaseCollectionTask := &releaseCollectionTask{
			baseTask:                 baseTask,
520 521 522 523 524
			ReleaseCollectionRequest: req,
			cluster:                  lct.cluster,
		}
		resultTasks = append(resultTasks, releaseCollectionTask)
	}
525 526 527 528 529 530 531 532

	err := lct.meta.releaseCollection(lct.CollectionID)
	if err != nil {
		log.Error("releaseCollectionTask: release collectionInfo from meta failed", zap.Int64("collectionID", lct.CollectionID), zap.Int64("msgID", lct.Base.MsgID), zap.Error(err))
		panic(err)
	}

	log.Debug("loadCollectionTask: generate rollBack task for loadCollectionTask", zap.Int64("collectionID", lct.CollectionID), zap.Int64("msgID", lct.Base.MsgID))
533 534 535
	return resultTasks
}

536 537 538
// releaseCollectionTask will release all the data of this collection on query nodes
type releaseCollectionTask struct {
	*baseTask
539
	*querypb.ReleaseCollectionRequest
540 541 542
	cluster Cluster
	meta    Meta
	broker  *globalMetaBroker
543 544
}

545
func (rct *releaseCollectionTask) msgBase() *commonpb.MsgBase {
546 547 548
	return rct.Base
}

549
func (rct *releaseCollectionTask) marshal() ([]byte, error) {
550
	return proto.Marshal(rct.ReleaseCollectionRequest)
551 552
}

553
func (rct *releaseCollectionTask) msgType() commonpb.MsgType {
554 555 556
	return rct.Base.MsgType
}

557
func (rct *releaseCollectionTask) timestamp() Timestamp {
558 559 560
	return rct.Base.Timestamp
}

561 562 563 564 565 566 567 568 569 570 571 572 573
func (rct *releaseCollectionTask) updateTaskProcess() {
	collectionID := rct.CollectionID
	parentTask := rct.getParentTask()
	if parentTask == nil {
		// all queryNodes have successfully released the data, clean up collectionMeta
		err := rct.meta.releaseCollection(collectionID)
		if err != nil {
			log.Error("releaseCollectionTask: release collectionInfo from meta failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", rct.Base.MsgID), zap.Error(err))
			panic(err)
		}
	}
}

574
func (rct *releaseCollectionTask) preExecute(context.Context) error {
575
	collectionID := rct.CollectionID
576
	rct.setResultInfo(nil)
577
	log.Debug("start do releaseCollectionTask",
578
		zap.Int64("msgID", rct.getTaskID()),
579
		zap.Int64("collectionID", collectionID))
G
godchen 已提交
580
	return nil
581 582
}

583
func (rct *releaseCollectionTask) execute(ctx context.Context) error {
584 585
	// cancel the maximum number of retries for queryNode cleaning data until the data is completely freed
	// defer rct.reduceRetryCount()
586
	collectionID := rct.CollectionID
587

588
	// if nodeID ==0, it means that the release request has not been assigned to the specified query node
589
	if rct.NodeID <= 0 {
590 591
		ctx2, cancel2 := context.WithTimeout(rct.ctx, timeoutForRPC)
		defer cancel2()
592
		err := rct.broker.releaseDQLMessageStream(ctx2, collectionID)
593
		if err != nil {
594
			log.Error("releaseCollectionTask: release collection end, releaseDQLMessageStream occur error", zap.Int64("collectionID", rct.CollectionID), zap.Int64("msgID", rct.Base.MsgID), zap.Error(err))
595 596 597
			rct.setResultInfo(err)
			return err
		}
598

599 600
		onlineNodeIDs := rct.cluster.onlineNodeIDs()
		for _, nodeID := range onlineNodeIDs {
601 602
			req := proto.Clone(rct.ReleaseCollectionRequest).(*querypb.ReleaseCollectionRequest)
			req.NodeID = nodeID
X
xige-16 已提交
603
			baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
604
			baseTask.setParentTask(rct)
605 606
			releaseCollectionTask := &releaseCollectionTask{
				baseTask:                 baseTask,
607
				ReleaseCollectionRequest: req,
608 609
				cluster:                  rct.cluster,
			}
610

611
			rct.addChildTask(releaseCollectionTask)
612
			log.Debug("releaseCollectionTask: add a releaseCollectionTask to releaseCollectionTask's childTask", zap.Any("task", releaseCollectionTask))
613 614
		}
	} else {
615
		err := rct.cluster.releaseCollection(ctx, rct.NodeID, rct.ReleaseCollectionRequest)
616
		if err != nil {
617 618 619
			log.Warn("releaseCollectionTask: release collection end, node occur error", zap.Int64("collectionID", collectionID), zap.Int64("nodeID", rct.NodeID))
			// after release failed, the task will always redo
			// if the query node happens to be down, the node release was judged to have succeeded
620 621
			return err
		}
622 623
	}

624
	log.Debug("releaseCollectionTask Execute done",
625
		zap.Int64("msgID", rct.getTaskID()),
626
		zap.Int64("collectionID", collectionID),
627
		zap.Int64("nodeID", rct.NodeID))
628 629 630
	return nil
}

631
func (rct *releaseCollectionTask) postExecute(context.Context) error {
632
	collectionID := rct.CollectionID
633 634
	if rct.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		rct.clearChildTasks()
635
	}
636

637
	log.Debug("releaseCollectionTask postExecute done",
638
		zap.Int64("msgID", rct.getTaskID()),
639
		zap.Int64("collectionID", collectionID),
640
		zap.Int64("nodeID", rct.NodeID))
G
godchen 已提交
641
	return nil
642 643
}

644
func (rct *releaseCollectionTask) rollBack(ctx context.Context) []task {
645 646 647 648 649 650
	//TODO::
	//if taskID == 0, recovery meta
	//if taskID != 0, recovery collection on queryNode
	return nil
}

651 652 653
// loadPartitionTask will load all the data of this partition to query nodes
type loadPartitionTask struct {
	*baseTask
654
	*querypb.LoadPartitionsRequest
655 656 657 658
	broker  *globalMetaBroker
	cluster Cluster
	meta    Meta
	addCol  bool
659
	once    sync.Once
660 661
}

662
func (lpt *loadPartitionTask) msgBase() *commonpb.MsgBase {
663 664 665
	return lpt.Base
}

666
func (lpt *loadPartitionTask) marshal() ([]byte, error) {
667
	return proto.Marshal(lpt.LoadPartitionsRequest)
668 669
}

670
func (lpt *loadPartitionTask) msgType() commonpb.MsgType {
671 672 673
	return lpt.Base.MsgType
}

674
func (lpt *loadPartitionTask) timestamp() Timestamp {
675 676 677
	return lpt.Base.Timestamp
}

678
func (lpt *loadPartitionTask) updateTaskProcess() {
679
	collectionID := lpt.CollectionID
680
	partitionIDs := lpt.PartitionIDs
681
	childTasks := lpt.getChildTask()
682 683
	allDone := true
	for _, t := range childTasks {
684
		if t.getState() != taskDone {
685 686
			allDone = false
		}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704

		// wait watchDeltaChannel and watchQueryChannel task done after loading segment
		nodeID := getDstNodeIDByTask(t)
		if t.msgType() == commonpb.MsgType_LoadSegments {
			if !lpt.cluster.hasWatchedDeltaChannel(lpt.ctx, nodeID, collectionID) ||
				!lpt.cluster.hasWatchedQueryChannel(lpt.ctx, nodeID, collectionID) {
				allDone = false
				break
			}
		}

		// wait watchQueryChannel task done after watching dmChannel
		if t.msgType() == commonpb.MsgType_WatchDmChannels {
			if !lpt.cluster.hasWatchedQueryChannel(lpt.ctx, nodeID, collectionID) {
				allDone = false
				break
			}
		}
705 706 707 708 709 710
	}
	if allDone {
		for _, id := range partitionIDs {
			err := lpt.meta.setLoadPercentage(collectionID, id, 100, querypb.LoadType_LoadPartition)
			if err != nil {
				log.Error("loadPartitionTask: set load percentage to meta's collectionInfo", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", id))
711
				lpt.setResultInfo(err)
712 713
			}
		}
714
		lpt.once.Do(func() {
C
cai.zhang 已提交
715
			metrics.QueryCoordLoadCount.WithLabelValues(metrics.SuccessLabel).Inc()
716 717 718
			metrics.QueryCoordLoadLatency.WithLabelValues().Observe(float64(lpt.elapseSpan().Milliseconds()))
			metrics.QueryCoordNumChildTasks.WithLabelValues().Sub(float64(len(lpt.getChildTask())))
		})
719
	}
720 721
}

722
func (lpt *loadPartitionTask) preExecute(context.Context) error {
723
	collectionID := lpt.CollectionID
724
	lpt.setResultInfo(nil)
725
	log.Debug("start do loadPartitionTask",
726
		zap.Int64("msgID", lpt.getTaskID()),
727
		zap.Int64("collectionID", collectionID))
G
godchen 已提交
728
	return nil
729 730
}

731
func (lpt *loadPartitionTask) execute(ctx context.Context) error {
732
	defer lpt.reduceRetryCount()
733 734 735
	collectionID := lpt.CollectionID
	partitionIDs := lpt.PartitionIDs

736 737 738 739
	var loadSegmentReqs []*querypb.LoadSegmentsRequest
	var watchDmChannelReqs []*querypb.WatchDmChannelsRequest
	var deltaChannelInfos []*datapb.VchannelInfo
	var dmChannelInfos []*datapb.VchannelInfo
740
	for _, partitionID := range partitionIDs {
741
		vChannelInfos, binlogs, err := lpt.broker.getRecoveryInfo(lpt.ctx, collectionID, partitionID)
742
		if err != nil {
743
			log.Error("loadPartitionTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
744
			lpt.setResultInfo(err)
745 746
			return err
		}
747

748
		for _, segmentBingLog := range binlogs {
749
			segmentLoadInfo := lpt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, lpt.Schema)
750 751
			msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
			msgBase.MsgType = commonpb.MsgType_LoadSegments
752
			loadSegmentReq := &querypb.LoadSegmentsRequest{
X
xige-16 已提交
753 754 755 756
				Base:         msgBase,
				Infos:        []*querypb.SegmentLoadInfo{segmentLoadInfo},
				Schema:       lpt.Schema,
				CollectionID: collectionID,
757 758 759 760 761
				LoadMeta: &querypb.LoadMetaInfo{
					LoadType:     querypb.LoadType_LoadPartition,
					CollectionID: collectionID,
					PartitionIDs: partitionIDs,
				},
762
			}
763
			loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
764 765
		}

766 767
		for _, info := range vChannelInfos {
			deltaChannelInfo, err := generateWatchDeltaChannelInfo(info)
768
			if err != nil {
769 770
				log.Error("loadPartitionTask: generateWatchDeltaChannelInfo failed", zap.Int64("collectionID", collectionID), zap.String("channelName", info.ChannelName), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
				lpt.setResultInfo(err)
771
				return err
772
			}
773 774
			deltaChannelInfos = append(deltaChannelInfos, deltaChannelInfo)
			dmChannelInfos = append(dmChannelInfos, info)
775
		}
G
godchen 已提交
776
	}
777
	mergedDeltaChannels := mergeWatchDeltaChannelInfo(deltaChannelInfos)
G
godchen 已提交
778
	// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
779
	err := lpt.meta.setDeltaChannel(collectionID, mergedDeltaChannels)
G
godchen 已提交
780
	if err != nil {
781 782
		log.Error("loadPartitionTask: set delta channel info failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
		lpt.setResultInfo(err)
G
godchen 已提交
783 784
		return err
	}
785 786 787 788 789 790 791 792 793 794 795

	mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
	for _, info := range mergedDmChannel {
		msgBase := proto.Clone(lpt.Base).(*commonpb.MsgBase)
		msgBase.MsgType = commonpb.MsgType_WatchDmChannels
		watchRequest := &querypb.WatchDmChannelsRequest{
			Base:         msgBase,
			CollectionID: collectionID,
			PartitionIDs: partitionIDs,
			Infos:        []*datapb.VchannelInfo{info},
			Schema:       lpt.Schema,
796 797 798 799 800
			LoadMeta: &querypb.LoadMetaInfo{
				LoadType:     querypb.LoadType_LoadPartition,
				CollectionID: collectionID,
				PartitionIDs: partitionIDs,
			},
801 802 803 804 805 806
		}

		watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
	}

	internalTasks, err := assignInternalTask(ctx, lpt, lpt.meta, lpt.cluster, loadSegmentReqs, watchDmChannelReqs, false, nil, nil)
X
xige-16 已提交
807
	if err != nil {
808
		log.Error("loadPartitionTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
809
		lpt.setResultInfo(err)
X
xige-16 已提交
810 811
		return err
	}
812 813
	for _, internalTask := range internalTasks {
		lpt.addChildTask(internalTask)
814 815
		log.Debug("loadPartitionTask: add a childTask", zap.Int64("collectionID", collectionID), zap.Int32("task type", int32(internalTask.msgType())))
	}
816
	metrics.QueryCoordNumChildTasks.WithLabelValues().Add(float64(len(internalTasks)))
817 818 819 820 821 822 823 824 825 826 827 828 829 830
	log.Debug("loadPartitionTask: assign child task done", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID))

	err = lpt.meta.addCollection(collectionID, querypb.LoadType_LoadPartition, lpt.Schema)
	if err != nil {
		log.Error("loadPartitionTask: add collection to meta failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
		lpt.setResultInfo(err)
		return err
	}

	err = lpt.meta.addPartitions(collectionID, partitionIDs)
	if err != nil {
		log.Error("loadPartitionTask: add partition to meta failed", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", partitionIDs), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
		lpt.setResultInfo(err)
		return err
831
	}
832

833
	log.Debug("loadPartitionTask Execute done",
834
		zap.Int64("msgID", lpt.getTaskID()),
835
		zap.Int64("collectionID", collectionID),
836 837
		zap.Int64s("partitionIDs", partitionIDs),
		zap.Int64("msgID", lpt.Base.MsgID))
838 839 840
	return nil
}

841
func (lpt *loadPartitionTask) postExecute(ctx context.Context) error {
842 843
	collectionID := lpt.CollectionID
	partitionIDs := lpt.PartitionIDs
844 845
	if lpt.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		lpt.clearChildTasks()
846 847 848 849
		err := lpt.meta.releaseCollection(collectionID)
		if err != nil {
			log.Error("loadPartitionTask: occur error when release collection info from meta", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
			panic(err)
850 851
		}
	}
852

853
	log.Debug("loadPartitionTask postExecute done",
854
		zap.Int64("msgID", lpt.getTaskID()),
855 856
		zap.Int64("collectionID", collectionID),
		zap.Int64s("partitionIDs", partitionIDs))
G
godchen 已提交
857
	return nil
858 859
}

860
func (lpt *loadPartitionTask) rollBack(ctx context.Context) []task {
861
	collectionID := lpt.CollectionID
862 863
	resultTasks := make([]task, 0)
	//brute force rollBack, should optimize
864 865 866 867 868 869 870 871 872 873 874 875
	onlineNodeIDs := lpt.cluster.onlineNodeIDs()
	for _, nodeID := range onlineNodeIDs {
		req := &querypb.ReleaseCollectionRequest{
			Base: &commonpb.MsgBase{
				MsgType:   commonpb.MsgType_ReleaseCollection,
				MsgID:     lpt.Base.MsgID,
				Timestamp: lpt.Base.Timestamp,
				SourceID:  lpt.Base.SourceID,
			},
			DbID:         lpt.DbID,
			CollectionID: collectionID,
			NodeID:       nodeID,
876
		}
877 878 879 880 881 882
		baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
		baseTask.setParentTask(lpt)
		releaseCollectionTask := &releaseCollectionTask{
			baseTask:                 baseTask,
			ReleaseCollectionRequest: req,
			cluster:                  lpt.cluster,
883
		}
884
		resultTasks = append(resultTasks, releaseCollectionTask)
885
	}
886 887 888 889 890 891 892

	err := lpt.meta.releaseCollection(collectionID)
	if err != nil {
		log.Error("loadPartitionTask: release collection info from meta failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lpt.Base.MsgID), zap.Error(err))
		panic(err)
	}
	log.Debug("loadPartitionTask: generate rollBack task for loadPartitionTask", zap.Int64("collectionID", collectionID), zap.Int64("msgID", lpt.Base.MsgID))
893 894 895
	return resultTasks
}

896 897 898
// releasePartitionTask will release all the data of this partition on query nodes
type releasePartitionTask struct {
	*baseTask
899
	*querypb.ReleasePartitionsRequest
900
	cluster Cluster
901
	meta    Meta
902 903
}

904
func (rpt *releasePartitionTask) msgBase() *commonpb.MsgBase {
905 906 907
	return rpt.Base
}

908
func (rpt *releasePartitionTask) marshal() ([]byte, error) {
909
	return proto.Marshal(rpt.ReleasePartitionsRequest)
910 911
}

912
func (rpt *releasePartitionTask) msgType() commonpb.MsgType {
913 914 915
	return rpt.Base.MsgType
}

916
func (rpt *releasePartitionTask) timestamp() Timestamp {
917 918 919
	return rpt.Base.Timestamp
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
func (rpt *releasePartitionTask) updateTaskProcess() {
	collectionID := rpt.CollectionID
	partitionIDs := rpt.PartitionIDs
	parentTask := rpt.getParentTask()
	if parentTask == nil {
		// all queryNodes have successfully released the data, clean up collectionMeta
		err := rpt.meta.releasePartitions(collectionID, partitionIDs)
		if err != nil {
			log.Error("releasePartitionTask: release collectionInfo from meta failed", zap.Int64("collectionID", collectionID), zap.Int64("msgID", rpt.Base.MsgID), zap.Error(err))
			panic(err)
		}

	}
}

935
func (rpt *releasePartitionTask) preExecute(context.Context) error {
936
	collectionID := rpt.CollectionID
937
	partitionIDs := rpt.PartitionIDs
938
	rpt.setResultInfo(nil)
939
	log.Debug("start do releasePartitionTask",
940
		zap.Int64("msgID", rpt.getTaskID()),
941 942
		zap.Int64("collectionID", collectionID),
		zap.Int64s("partitionIDs", partitionIDs))
G
godchen 已提交
943
	return nil
944 945
}

946
func (rpt *releasePartitionTask) execute(ctx context.Context) error {
947 948
	// cancel the maximum number of retries for queryNode cleaning data until the data is completely freed
	// defer rpt.reduceRetryCount()
949 950
	collectionID := rpt.CollectionID
	partitionIDs := rpt.PartitionIDs
951

952
	// if nodeID ==0, it means that the release request has not been assigned to the specified query node
953
	if rpt.NodeID <= 0 {
954 955
		onlineNodeIDs := rpt.cluster.onlineNodeIDs()
		for _, nodeID := range onlineNodeIDs {
956 957
			req := proto.Clone(rpt.ReleasePartitionsRequest).(*querypb.ReleasePartitionsRequest)
			req.NodeID = nodeID
X
xige-16 已提交
958
			baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
959
			baseTask.setParentTask(rpt)
960 961
			releasePartitionTask := &releasePartitionTask{
				baseTask:                 baseTask,
962
				ReleasePartitionsRequest: req,
963
				cluster:                  rpt.cluster,
964
				meta:                     rpt.meta,
965
			}
966
			rpt.addChildTask(releasePartitionTask)
967 968
			log.Debug("releasePartitionTask: add a releasePartitionTask to releasePartitionTask's childTask", zap.Int64("collectionID", collectionID), zap.Int64("msgID", rpt.Base.MsgID))
		}
969
	} else {
970
		err := rpt.cluster.releasePartitions(ctx, rpt.NodeID, rpt.ReleasePartitionsRequest)
971
		if err != nil {
972
			log.Warn("ReleasePartitionsTask: release partition end, node occur error", zap.Int64("collectionID", collectionID), zap.String("nodeID", fmt.Sprintln(rpt.NodeID)))
973 974
			// after release failed, the task will always redo
			// if the query node happens to be down, the node release was judged to have succeeded
975 976
			return err
		}
977 978
	}

979
	log.Debug("releasePartitionTask Execute done",
980
		zap.Int64("msgID", rpt.getTaskID()),
981
		zap.Int64("collectionID", collectionID),
982
		zap.Int64s("partitionIDs", partitionIDs),
983
		zap.Int64("nodeID", rpt.NodeID))
984 985 986
	return nil
}

987
func (rpt *releasePartitionTask) postExecute(context.Context) error {
988 989
	collectionID := rpt.CollectionID
	partitionIDs := rpt.PartitionIDs
990 991
	if rpt.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		rpt.clearChildTasks()
992
	}
993

994
	log.Debug("releasePartitionTask postExecute done",
995
		zap.Int64("msgID", rpt.getTaskID()),
996 997
		zap.Int64("collectionID", collectionID),
		zap.Int64s("partitionIDs", partitionIDs),
998
		zap.Int64("nodeID", rpt.NodeID))
G
godchen 已提交
999
	return nil
1000 1001
}

1002
func (rpt *releasePartitionTask) rollBack(ctx context.Context) []task {
1003 1004 1005 1006 1007 1008
	//TODO::
	//if taskID == 0, recovery meta
	//if taskID != 0, recovery partition on queryNode
	return nil
}

1009 1010
type loadSegmentTask struct {
	*baseTask
1011
	*querypb.LoadSegmentsRequest
1012 1013 1014
	meta           Meta
	cluster        Cluster
	excludeNodeIDs []int64
1015 1016
}

1017
func (lst *loadSegmentTask) msgBase() *commonpb.MsgBase {
1018 1019 1020
	return lst.Base
}

1021
func (lst *loadSegmentTask) marshal() ([]byte, error) {
1022
	return proto.Marshal(lst.LoadSegmentsRequest)
1023 1024
}

1025
func (lst *loadSegmentTask) isValid() bool {
1026
	online, err := lst.cluster.isOnline(lst.DstNodeID)
1027 1028 1029 1030
	if err != nil {
		return false
	}

1031
	return lst.ctx != nil && online
1032 1033
}

1034
func (lst *loadSegmentTask) msgType() commonpb.MsgType {
1035 1036
	return lst.Base.MsgType
}
1037

1038
func (lst *loadSegmentTask) timestamp() Timestamp {
1039 1040
	return lst.Base.Timestamp
}
1041

1042
func (lst *loadSegmentTask) updateTaskProcess() {
1043
	parentTask := lst.getParentTask()
1044
	if parentTask == nil {
1045
		log.Warn("loadSegmentTask: parentTask should not be nil")
1046 1047
		return
	}
1048
	parentTask.updateTaskProcess()
1049 1050
}

1051
func (lst *loadSegmentTask) preExecute(context.Context) error {
1052 1053 1054 1055
	segmentIDs := make([]UniqueID, 0)
	for _, info := range lst.Infos {
		segmentIDs = append(segmentIDs, info.SegmentID)
	}
1056
	lst.setResultInfo(nil)
1057
	log.Debug("start do loadSegmentTask",
1058
		zap.Int64s("segmentIDs", segmentIDs),
1059
		zap.Int64("loaded nodeID", lst.DstNodeID),
1060
		zap.Int64("taskID", lst.getTaskID()))
G
godchen 已提交
1061
	return nil
1062
}
1063

1064
func (lst *loadSegmentTask) execute(ctx context.Context) error {
1065
	defer lst.reduceRetryCount()
1066

1067
	err := lst.cluster.loadSegments(ctx, lst.DstNodeID, lst.LoadSegmentsRequest)
1068
	if err != nil {
1069
		log.Warn("loadSegmentTask: loadSegment occur error", zap.Int64("taskID", lst.getTaskID()))
1070
		lst.setResultInfo(err)
1071 1072
		return err
	}
1073

1074
	log.Debug("loadSegmentTask Execute done",
1075
		zap.Int64("taskID", lst.getTaskID()))
1076 1077
	return nil
}
1078

1079
func (lst *loadSegmentTask) postExecute(context.Context) error {
1080
	log.Debug("loadSegmentTask postExecute done",
1081
		zap.Int64("taskID", lst.getTaskID()))
G
godchen 已提交
1082
	return nil
1083 1084
}

1085
func (lst *loadSegmentTask) reschedule(ctx context.Context) ([]task, error) {
1086
	loadSegmentReqs := make([]*querypb.LoadSegmentsRequest, 0)
1087
	for _, info := range lst.Infos {
1088 1089 1090
		msgBase := proto.Clone(lst.Base).(*commonpb.MsgBase)
		msgBase.MsgType = commonpb.MsgType_LoadSegments
		req := &querypb.LoadSegmentsRequest{
X
xige-16 已提交
1091 1092 1093 1094 1095
			Base:         msgBase,
			Infos:        []*querypb.SegmentLoadInfo{info},
			Schema:       lst.Schema,
			SourceNodeID: lst.SourceNodeID,
			CollectionID: lst.CollectionID,
1096 1097 1098 1099 1100
			LoadMeta: &querypb.LoadMetaInfo{
				LoadType:     lst.GetLoadMeta().GetLoadType(),
				CollectionID: lst.GetCollectionID(),
				PartitionIDs: lst.GetLoadMeta().GetPartitionIDs(),
			},
1101 1102 1103 1104 1105
		}
		loadSegmentReqs = append(loadSegmentReqs, req)
	}
	if lst.excludeNodeIDs == nil {
		lst.excludeNodeIDs = []int64{}
1106
	}
1107
	lst.excludeNodeIDs = append(lst.excludeNodeIDs, lst.DstNodeID)
G
godchen 已提交
1108

1109 1110 1111 1112 1113
	wait2AssignTaskSuccess := false
	if lst.getParentTask().getTriggerCondition() == querypb.TriggerCondition_NodeDown {
		wait2AssignTaskSuccess = true
	}
	reScheduledTasks, err := assignInternalTask(ctx, lst.getParentTask(), lst.meta, lst.cluster, loadSegmentReqs, nil, wait2AssignTaskSuccess, lst.excludeNodeIDs, nil)
1114
	if err != nil {
1115
		log.Error("loadSegment reschedule failed", zap.Int64s("excludeNodes", lst.excludeNodeIDs), zap.Int64("taskID", lst.getTaskID()), zap.Error(err))
1116 1117
		return nil, err
	}
1118

1119
	return reScheduledTasks, nil
1120 1121
}

1122 1123
type releaseSegmentTask struct {
	*baseTask
1124
	*querypb.ReleaseSegmentsRequest
1125
	cluster Cluster
1126 1127
}

1128
func (rst *releaseSegmentTask) msgBase() *commonpb.MsgBase {
1129 1130 1131
	return rst.Base
}

1132
func (rst *releaseSegmentTask) marshal() ([]byte, error) {
1133
	return proto.Marshal(rst.ReleaseSegmentsRequest)
1134 1135
}

1136
func (rst *releaseSegmentTask) isValid() bool {
1137
	online, err := rst.cluster.isOnline(rst.NodeID)
1138 1139 1140
	if err != nil {
		return false
	}
1141
	return rst.ctx != nil && online
1142 1143
}

1144
func (rst *releaseSegmentTask) msgType() commonpb.MsgType {
1145 1146
	return rst.Base.MsgType
}
1147

1148
func (rst *releaseSegmentTask) timestamp() Timestamp {
1149 1150
	return rst.Base.Timestamp
}
1151

1152
func (rst *releaseSegmentTask) preExecute(context.Context) error {
1153
	segmentIDs := rst.SegmentIDs
1154
	rst.setResultInfo(nil)
1155
	log.Debug("start do releaseSegmentTask",
1156 1157
		zap.Int64s("segmentIDs", segmentIDs),
		zap.Int64("loaded nodeID", rst.NodeID),
1158
		zap.Int64("taskID", rst.getTaskID()))
G
godchen 已提交
1159
	return nil
1160
}
1161

1162
func (rst *releaseSegmentTask) execute(ctx context.Context) error {
1163
	defer rst.reduceRetryCount()
1164

1165
	err := rst.cluster.releaseSegments(rst.ctx, rst.NodeID, rst.ReleaseSegmentsRequest)
1166
	if err != nil {
1167
		log.Warn("releaseSegmentTask: releaseSegment occur error", zap.Int64("taskID", rst.getTaskID()))
1168
		rst.setResultInfo(err)
1169 1170 1171 1172
		return err
	}

	log.Debug("releaseSegmentTask Execute done",
1173
		zap.Int64s("segmentIDs", rst.SegmentIDs),
1174
		zap.Int64("taskID", rst.getTaskID()))
1175 1176
	return nil
}
1177

1178
func (rst *releaseSegmentTask) postExecute(context.Context) error {
1179 1180
	segmentIDs := rst.SegmentIDs
	log.Debug("releaseSegmentTask postExecute done",
1181
		zap.Int64s("segmentIDs", segmentIDs),
1182
		zap.Int64("taskID", rst.getTaskID()))
G
godchen 已提交
1183
	return nil
1184 1185
}

1186 1187
type watchDmChannelTask struct {
	*baseTask
1188
	*querypb.WatchDmChannelsRequest
1189 1190 1191
	meta           Meta
	cluster        Cluster
	excludeNodeIDs []int64
1192 1193
}

1194
func (wdt *watchDmChannelTask) msgBase() *commonpb.MsgBase {
1195 1196 1197
	return wdt.Base
}

1198
func (wdt *watchDmChannelTask) marshal() ([]byte, error) {
1199
	return proto.Marshal(wdt.WatchDmChannelsRequest)
1200 1201
}

1202
func (wdt *watchDmChannelTask) isValid() bool {
1203
	online, err := wdt.cluster.isOnline(wdt.NodeID)
1204 1205 1206
	if err != nil {
		return false
	}
1207
	return wdt.ctx != nil && online
1208 1209
}

1210
func (wdt *watchDmChannelTask) msgType() commonpb.MsgType {
1211 1212
	return wdt.Base.MsgType
}
1213

1214
func (wdt *watchDmChannelTask) timestamp() Timestamp {
1215 1216
	return wdt.Base.Timestamp
}
1217

1218
func (wdt *watchDmChannelTask) updateTaskProcess() {
1219
	parentTask := wdt.getParentTask()
1220
	if parentTask == nil {
1221
		log.Warn("watchDmChannelTask: parentTask should not be nil")
1222 1223
		return
	}
1224
	parentTask.updateTaskProcess()
1225 1226
}

1227
func (wdt *watchDmChannelTask) preExecute(context.Context) error {
1228 1229 1230 1231
	channelInfos := wdt.Infos
	channels := make([]string, 0)
	for _, info := range channelInfos {
		channels = append(channels, info.ChannelName)
1232
	}
1233
	wdt.setResultInfo(nil)
1234
	log.Debug("start do watchDmChannelTask",
1235 1236
		zap.Strings("dmChannels", channels),
		zap.Int64("loaded nodeID", wdt.NodeID),
1237
		zap.Int64("taskID", wdt.getTaskID()))
G
godchen 已提交
1238
	return nil
1239
}
1240

1241
func (wdt *watchDmChannelTask) execute(ctx context.Context) error {
1242
	defer wdt.reduceRetryCount()
1243

1244
	err := wdt.cluster.watchDmChannels(wdt.ctx, wdt.NodeID, wdt.WatchDmChannelsRequest)
1245
	if err != nil {
1246
		log.Warn("watchDmChannelTask: watchDmChannel occur error", zap.Int64("taskID", wdt.getTaskID()))
1247
		wdt.setResultInfo(err)
1248 1249
		return err
	}
1250

1251
	log.Debug("watchDmChannelsTask Execute done",
1252
		zap.Int64("taskID", wdt.getTaskID()))
1253 1254
	return nil
}
1255

1256
func (wdt *watchDmChannelTask) postExecute(context.Context) error {
1257
	log.Debug("watchDmChannelTask postExecute done",
1258
		zap.Int64("taskID", wdt.getTaskID()))
G
godchen 已提交
1259
	return nil
1260 1261
}

1262
func (wdt *watchDmChannelTask) reschedule(ctx context.Context) ([]task, error) {
1263
	collectionID := wdt.CollectionID
1264
	watchDmChannelReqs := make([]*querypb.WatchDmChannelsRequest, 0)
1265
	for _, info := range wdt.Infos {
1266 1267 1268 1269 1270
		msgBase := proto.Clone(wdt.Base).(*commonpb.MsgBase)
		msgBase.MsgType = commonpb.MsgType_WatchDmChannels
		req := &querypb.WatchDmChannelsRequest{
			Base:         msgBase,
			CollectionID: collectionID,
1271
			PartitionIDs: wdt.PartitionIDs,
1272 1273 1274
			Infos:        []*datapb.VchannelInfo{info},
			Schema:       wdt.Schema,
			ExcludeInfos: wdt.ExcludeInfos,
1275 1276 1277 1278 1279
			LoadMeta: &querypb.LoadMetaInfo{
				LoadType:     wdt.GetLoadMeta().GetLoadType(),
				CollectionID: collectionID,
				PartitionIDs: wdt.GetLoadMeta().GetPartitionIDs(),
			},
1280 1281
		}
		watchDmChannelReqs = append(watchDmChannelReqs, req)
1282 1283
	}

1284 1285 1286
	if wdt.excludeNodeIDs == nil {
		wdt.excludeNodeIDs = []int64{}
	}
1287
	wdt.excludeNodeIDs = append(wdt.excludeNodeIDs, wdt.NodeID)
1288 1289 1290 1291 1292
	wait2AssignTaskSuccess := false
	if wdt.getParentTask().getTriggerCondition() == querypb.TriggerCondition_NodeDown {
		wait2AssignTaskSuccess = true
	}
	reScheduledTasks, err := assignInternalTask(ctx, wdt.parentTask, wdt.meta, wdt.cluster, nil, watchDmChannelReqs, wait2AssignTaskSuccess, wdt.excludeNodeIDs, nil)
1293
	if err != nil {
1294
		log.Error("watchDmChannel reschedule failed", zap.Int64("taskID", wdt.getTaskID()), zap.Int64s("excludeNodes", wdt.excludeNodeIDs), zap.Error(err))
1295 1296
		return nil, err
	}
1297

1298
	return reScheduledTasks, nil
1299 1300
}

1301 1302 1303
type watchDeltaChannelTask struct {
	*baseTask
	*querypb.WatchDeltaChannelsRequest
1304
	cluster Cluster
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
}

func (wdt *watchDeltaChannelTask) msgBase() *commonpb.MsgBase {
	return wdt.Base
}

func (wdt *watchDeltaChannelTask) marshal() ([]byte, error) {
	return proto.Marshal(wdt.WatchDeltaChannelsRequest)
}

1315 1316 1317 1318 1319 1320 1321 1322 1323
func (wdt *watchDeltaChannelTask) isValid() bool {
	online, err := wdt.cluster.isOnline(wdt.NodeID)
	if err != nil {
		return false
	}

	return wdt.ctx != nil && online
}

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
func (wdt *watchDeltaChannelTask) msgType() commonpb.MsgType {
	return wdt.Base.MsgType
}

func (wdt *watchDeltaChannelTask) timestamp() Timestamp {
	return wdt.Base.Timestamp
}

func (wdt *watchDeltaChannelTask) updateTaskProcess() {
	parentTask := wdt.getParentTask()
	if parentTask == nil {
		log.Warn("watchDeltaChannel: parentTask should not be nil")
		return
	}
	parentTask.updateTaskProcess()
}

func (wdt *watchDeltaChannelTask) preExecute(context.Context) error {
	channelInfos := wdt.Infos
	channels := make([]string, 0)
	for _, info := range channelInfos {
		channels = append(channels, info.ChannelName)
	}
	wdt.setResultInfo(nil)
	log.Debug("start do watchDeltaChannelTask",
		zap.Strings("deltaChannels", channels),
		zap.Int64("loaded nodeID", wdt.NodeID),
		zap.Int64("taskID", wdt.getTaskID()))
	return nil
}

func (wdt *watchDeltaChannelTask) execute(ctx context.Context) error {
1356
	defer wdt.reduceRetryCount()
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375

	err := wdt.cluster.watchDeltaChannels(wdt.ctx, wdt.NodeID, wdt.WatchDeltaChannelsRequest)
	if err != nil {
		log.Warn("watchDeltaChannelTask: watchDeltaChannel occur error", zap.Int64("taskID", wdt.getTaskID()), zap.Error(err))
		wdt.setResultInfo(err)
		return err
	}

	log.Debug("watchDeltaChannelsTask Execute done",
		zap.Int64("taskID", wdt.getTaskID()))
	return nil
}

func (wdt *watchDeltaChannelTask) postExecute(context.Context) error {
	log.Debug("watchDeltaChannelTask postExecute done",
		zap.Int64("taskID", wdt.getTaskID()))
	return nil
}

1376 1377
type watchQueryChannelTask struct {
	*baseTask
1378
	*querypb.AddQueryChannelRequest
1379
	cluster Cluster
1380 1381
}

1382
func (wqt *watchQueryChannelTask) msgBase() *commonpb.MsgBase {
1383 1384 1385
	return wqt.Base
}

1386
func (wqt *watchQueryChannelTask) marshal() ([]byte, error) {
1387
	return proto.Marshal(wqt.AddQueryChannelRequest)
1388
}
1389

1390
func (wqt *watchQueryChannelTask) isValid() bool {
1391
	online, err := wqt.cluster.isOnline(wqt.NodeID)
1392 1393 1394 1395
	if err != nil {
		return false
	}

1396
	return wqt.ctx != nil && online
1397
}
1398

1399
func (wqt *watchQueryChannelTask) msgType() commonpb.MsgType {
1400 1401 1402
	return wqt.Base.MsgType
}

1403
func (wqt *watchQueryChannelTask) timestamp() Timestamp {
1404 1405 1406
	return wqt.Base.Timestamp
}

1407
func (wqt *watchQueryChannelTask) updateTaskProcess() {
1408
	parentTask := wqt.getParentTask()
1409
	if parentTask == nil {
1410
		log.Warn("watchQueryChannelTask: parentTask should not be nil")
1411
		return
1412
	}
1413
	parentTask.updateTaskProcess()
1414 1415
}

1416
func (wqt *watchQueryChannelTask) preExecute(context.Context) error {
1417
	wqt.setResultInfo(nil)
1418
	log.Debug("start do watchQueryChannelTask",
1419
		zap.Int64("collectionID", wqt.CollectionID),
X
xige-16 已提交
1420 1421
		zap.String("queryChannel", wqt.QueryChannel),
		zap.String("queryResultChannel", wqt.QueryResultChannel),
1422
		zap.Int64("loaded nodeID", wqt.NodeID),
1423
		zap.Int64("taskID", wqt.getTaskID()))
G
godchen 已提交
1424
	return nil
1425
}
1426

1427
func (wqt *watchQueryChannelTask) execute(ctx context.Context) error {
1428
	defer wqt.reduceRetryCount()
1429

1430
	err := wqt.cluster.addQueryChannel(wqt.ctx, wqt.NodeID, wqt.AddQueryChannelRequest)
1431
	if err != nil {
Z
zhenshan.cao 已提交
1432
		log.Warn("watchQueryChannelTask: watchQueryChannel occur error", zap.Int64("taskID", wqt.getTaskID()), zap.Error(err))
1433
		wqt.setResultInfo(err)
1434 1435 1436
		return err
	}

1437
	log.Debug("watchQueryChannelTask Execute done",
1438
		zap.Int64("collectionID", wqt.CollectionID),
X
xige-16 已提交
1439 1440
		zap.String("queryChannel", wqt.QueryChannel),
		zap.String("queryResultChannel", wqt.QueryResultChannel),
1441
		zap.Int64("taskID", wqt.getTaskID()))
1442 1443
	return nil
}
1444

1445 1446
func (wqt *watchQueryChannelTask) postExecute(context.Context) error {
	log.Debug("watchQueryChannelTask postExecute done",
1447
		zap.Int64("collectionID", wqt.CollectionID),
X
xige-16 已提交
1448 1449
		zap.String("queryChannel", wqt.QueryChannel),
		zap.String("queryResultChannel", wqt.QueryResultChannel),
1450
		zap.Int64("taskID", wqt.getTaskID()))
G
godchen 已提交
1451
	return nil
1452 1453
}

X
xige-16 已提交
1454
//****************************handoff task********************************//
1455
type handoffTask struct {
X
xige-16 已提交
1456 1457
	*baseTask
	*querypb.HandoffSegmentsRequest
1458 1459 1460
	broker  *globalMetaBroker
	cluster Cluster
	meta    Meta
X
xige-16 已提交
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
}

func (ht *handoffTask) msgBase() *commonpb.MsgBase {
	return ht.Base
}

func (ht *handoffTask) marshal() ([]byte, error) {
	return proto.Marshal(ht.HandoffSegmentsRequest)
}

func (ht *handoffTask) msgType() commonpb.MsgType {
	return ht.Base.MsgType
}

func (ht *handoffTask) timestamp() Timestamp {
	return ht.Base.Timestamp
}

func (ht *handoffTask) preExecute(context.Context) error {
	ht.setResultInfo(nil)
	segmentIDs := make([]UniqueID, 0)
	segmentInfos := ht.HandoffSegmentsRequest.SegmentInfos
	for _, info := range segmentInfos {
		segmentIDs = append(segmentIDs, info.SegmentID)
	}
	log.Debug("start do handoff segments task",
		zap.Int64s("segmentIDs", segmentIDs))
	return nil
}

func (ht *handoffTask) execute(ctx context.Context) error {
	segmentInfos := ht.HandoffSegmentsRequest.SegmentInfos
	for _, segmentInfo := range segmentInfos {
		collectionID := segmentInfo.CollectionID
		partitionID := segmentInfo.PartitionID
		segmentID := segmentInfo.SegmentID

		collectionInfo, err := ht.meta.getCollectionInfoByID(collectionID)
		if err != nil {
			log.Debug("handoffTask: collection has not been loaded into memory", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID))
			continue
		}

1504
		if collectionInfo.LoadType == querypb.LoadType_LoadCollection && ht.meta.hasReleasePartition(collectionID, partitionID) {
1505 1506 1507 1508
			log.Debug("handoffTask: partition has not been released", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID))
			continue
		}

X
xige-16 已提交
1509 1510 1511 1512 1513 1514 1515
		partitionLoaded := false
		for _, id := range collectionInfo.PartitionIDs {
			if id == partitionID {
				partitionLoaded = true
			}
		}

1516
		if collectionInfo.LoadType != querypb.LoadType_LoadCollection && !partitionLoaded {
X
xige-16 已提交
1517 1518 1519 1520
			log.Debug("handoffTask: partition has not been loaded into memory", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("segmentID", segmentID))
			continue
		}

1521
		//  segment which is compacted from should exist in query node
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
		for _, compactedSegID := range segmentInfo.CompactionFrom {
			_, err = ht.meta.getSegmentInfoByID(compactedSegID)
			if err != nil {
				log.Error("handoffTask: compacted segment has not been loaded into memory", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Int64("segmentID", segmentID))
				ht.setResultInfo(err)
				return err
			}
		}

		// segment which is compacted to should not exist in query node
X
xige-16 已提交
1532 1533
		_, err = ht.meta.getSegmentInfoByID(segmentID)
		if err != nil {
1534
			dmChannelInfos, binlogs, err := ht.broker.getRecoveryInfo(ht.ctx, collectionID, partitionID)
X
xige-16 已提交
1535
			if err != nil {
1536
				log.Error("handoffTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Error(err))
X
xige-16 已提交
1537 1538 1539 1540 1541 1542
				ht.setResultInfo(err)
				return err
			}

			findBinlog := false
			var loadSegmentReq *querypb.LoadSegmentsRequest
G
godchen 已提交
1543
			var watchDeltaChannels []*datapb.VchannelInfo
1544 1545
			for _, segmentBinlog := range binlogs {
				if segmentBinlog.SegmentID == segmentID {
X
xige-16 已提交
1546
					findBinlog = true
1547 1548 1549
					segmentLoadInfo := ht.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBinlog, false, nil)
					segmentLoadInfo.CompactionFrom = segmentInfo.CompactionFrom
					segmentLoadInfo.IndexInfos = segmentInfo.IndexInfos
X
xige-16 已提交
1550 1551 1552
					msgBase := proto.Clone(ht.Base).(*commonpb.MsgBase)
					msgBase.MsgType = commonpb.MsgType_LoadSegments
					loadSegmentReq = &querypb.LoadSegmentsRequest{
1553 1554 1555 1556
						Base:         msgBase,
						Infos:        []*querypb.SegmentLoadInfo{segmentLoadInfo},
						Schema:       collectionInfo.Schema,
						CollectionID: collectionID,
X
xige-16 已提交
1557 1558 1559
					}
				}
			}
1560
			for _, info := range dmChannelInfos {
1561 1562 1563
				deltaChannel, err := generateWatchDeltaChannelInfo(info)
				if err != nil {
					return err
1564
				}
1565
				watchDeltaChannels = append(watchDeltaChannels, deltaChannel)
1566
			}
X
xige-16 已提交
1567 1568 1569 1570 1571 1572

			if !findBinlog {
				err = fmt.Errorf("segmnet has not been flushed, segmentID is %d", segmentID)
				ht.setResultInfo(err)
				return err
			}
1573
			mergedDeltaChannels := mergeWatchDeltaChannelInfo(watchDeltaChannels)
G
godchen 已提交
1574
			// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
1575
			err = ht.meta.setDeltaChannel(collectionID, mergedDeltaChannels)
G
godchen 已提交
1576
			if err != nil {
1577 1578
				log.Error("handoffTask: set delta channel info to meta failed", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID), zap.Error(err))
				ht.setResultInfo(err)
G
godchen 已提交
1579 1580
				return err
			}
1581
			internalTasks, err := assignInternalTask(ctx, ht, ht.meta, ht.cluster, []*querypb.LoadSegmentsRequest{loadSegmentReq}, nil, true, nil, nil)
X
xige-16 已提交
1582
			if err != nil {
1583
				log.Error("handoffTask: assign child task failed", zap.Int64("collectionID", collectionID), zap.Int64("segmentID", segmentID), zap.Error(err))
X
xige-16 已提交
1584 1585 1586
				ht.setResultInfo(err)
				return err
			}
1587 1588
			for _, internalTask := range internalTasks {
				ht.addChildTask(internalTask)
1589
				log.Debug("handoffTask: add a childTask", zap.Int32("task type", int32(internalTask.msgType())), zap.Int64("segmentID", segmentID))
1590
			}
X
xige-16 已提交
1591 1592
		} else {
			err = fmt.Errorf("sealed segment has been exist on query node, segmentID is %d", segmentID)
1593
			log.Error("handoffTask: handoff segment failed", zap.Int64("segmentID", segmentID), zap.Error(err))
X
xige-16 已提交
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
			ht.setResultInfo(err)
			return err
		}
	}

	log.Debug("handoffTask: assign child task done", zap.Any("segmentInfos", segmentInfos))

	log.Debug("handoffTask Execute done",
		zap.Int64("taskID", ht.getTaskID()))
	return nil
}

func (ht *handoffTask) postExecute(context.Context) error {
1607 1608
	if ht.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		ht.clearChildTasks()
X
xige-16 已提交
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	}

	log.Debug("handoffTask postExecute done",
		zap.Int64("taskID", ht.getTaskID()))

	return nil
}

func (ht *handoffTask) rollBack(ctx context.Context) []task {
	resultTasks := make([]task, 0)
	childTasks := ht.getChildTask()
	for _, childTask := range childTasks {
		if childTask.msgType() == commonpb.MsgType_LoadSegments {
			// TODO:: add release segment to rollBack, no release does not affect correctness of query
		}
	}

	return resultTasks
1627
}
1628

1629 1630
type loadBalanceTask struct {
	*baseTask
1631
	*querypb.LoadBalanceRequest
1632 1633 1634
	broker  *globalMetaBroker
	cluster Cluster
	meta    Meta
1635 1636
}

1637
func (lbt *loadBalanceTask) msgBase() *commonpb.MsgBase {
1638 1639 1640
	return lbt.Base
}

1641
func (lbt *loadBalanceTask) marshal() ([]byte, error) {
1642
	return proto.Marshal(lbt.LoadBalanceRequest)
1643 1644
}

1645
func (lbt *loadBalanceTask) msgType() commonpb.MsgType {
1646 1647 1648
	return lbt.Base.MsgType
}

1649
func (lbt *loadBalanceTask) timestamp() Timestamp {
1650 1651 1652
	return lbt.Base.Timestamp
}

1653
func (lbt *loadBalanceTask) preExecute(context.Context) error {
1654
	lbt.setResultInfo(nil)
1655
	log.Debug("start do loadBalanceTask",
1656
		zap.Int32("trigger type", int32(lbt.triggerCondition)),
1657 1658
		zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs),
		zap.Any("balanceReason", lbt.BalanceReason),
1659
		zap.Int64("taskID", lbt.getTaskID()))
G
godchen 已提交
1660
	return nil
1661 1662
}

1663
func (lbt *loadBalanceTask) execute(ctx context.Context) error {
1664
	defer lbt.reduceRetryCount()
1665

X
xige-16 已提交
1666
	if lbt.triggerCondition == querypb.TriggerCondition_NodeDown {
1667 1668 1669 1670 1671
		segmentID2Info := make(map[UniqueID]*querypb.SegmentInfo)
		dmChannel2WatchInfo := make(map[string]*querypb.DmChannelWatchInfo)
		loadSegmentReqs := make([]*querypb.LoadSegmentsRequest, 0)
		watchDmChannelReqs := make([]*querypb.WatchDmChannelsRequest, 0)
		recoveredCollectionIDs := make(map[UniqueID]struct{})
1672
		for _, nodeID := range lbt.SourceNodeIDs {
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
			segmentInfos := lbt.meta.getSegmentInfosByNode(nodeID)
			for _, segmentInfo := range segmentInfos {
				segmentID2Info[segmentInfo.SegmentID] = segmentInfo
				recoveredCollectionIDs[segmentInfo.CollectionID] = struct{}{}
			}
			dmChannelWatchInfos := lbt.meta.getDmChannelInfosByNodeID(nodeID)
			for _, watchInfo := range dmChannelWatchInfos {
				dmChannel2WatchInfo[watchInfo.DmChannel] = watchInfo
				recoveredCollectionIDs[watchInfo.CollectionID] = struct{}{}
			}
		}

		for collectionID := range recoveredCollectionIDs {
			collectionInfo, err := lbt.meta.getCollectionInfoByID(collectionID)
			if err != nil {
				log.Error("loadBalanceTask: get collectionInfo from meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
				lbt.setResultInfo(err)
				return err
			}
			schema := collectionInfo.Schema
			var deltaChannelInfos []*datapb.VchannelInfo
			var dmChannelInfos []*datapb.VchannelInfo

			var toRecoverPartitionIDs []UniqueID
1697
			if collectionInfo.LoadType == querypb.LoadType_LoadCollection {
1698
				toRecoverPartitionIDs, err = lbt.broker.showPartitionIDs(ctx, collectionID)
1699
				if err != nil {
1700
					log.Error("loadBalanceTask: show collection's partitionIDs failed", zap.Int64("collectionID", collectionID), zap.Error(err))
1701
					lbt.setResultInfo(err)
1702
					panic(err)
1703
				}
1704 1705 1706 1707
			} else {
				toRecoverPartitionIDs = collectionInfo.PartitionIDs
			}
			log.Debug("loadBalanceTask: get collection's all partitionIDs", zap.Int64("collectionID", collectionID), zap.Int64s("partitionIDs", toRecoverPartitionIDs))
1708

1709
			for _, partitionID := range toRecoverPartitionIDs {
1710
				vChannelInfos, binlogs, err := lbt.broker.getRecoveryInfo(lbt.ctx, collectionID, partitionID)
1711
				if err != nil {
1712
					log.Error("loadBalanceTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Error(err))
1713
					lbt.setResultInfo(err)
1714
					panic(err)
1715
				}
1716

1717 1718 1719
				for _, segmentBingLog := range binlogs {
					segmentID := segmentBingLog.SegmentID
					if _, ok := segmentID2Info[segmentID]; ok {
1720
						segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, schema)
1721 1722
						msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
						msgBase.MsgType = commonpb.MsgType_LoadSegments
1723
						loadSegmentReq := &querypb.LoadSegmentsRequest{
X
xige-16 已提交
1724 1725 1726
							Base:         msgBase,
							Infos:        []*querypb.SegmentLoadInfo{segmentLoadInfo},
							Schema:       schema,
1727
							CollectionID: collectionID,
1728 1729 1730 1731 1732
							LoadMeta: &querypb.LoadMetaInfo{
								LoadType:     collectionInfo.LoadType,
								CollectionID: collectionID,
								PartitionIDs: toRecoverPartitionIDs,
							},
1733 1734 1735 1736
						}

						loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
					}
1737
				}
1738

1739 1740 1741 1742
				for _, info := range vChannelInfos {
					deltaChannel, err := generateWatchDeltaChannelInfo(info)
					if err != nil {
						log.Error("loadBalanceTask: generateWatchDeltaChannelInfo failed", zap.Int64("collectionID", collectionID), zap.String("channelName", info.ChannelName), zap.Error(err))
1743
						lbt.setResultInfo(err)
1744
						panic(err)
1745
					}
1746 1747 1748 1749
					deltaChannelInfos = append(deltaChannelInfos, deltaChannel)
					dmChannelInfos = append(dmChannelInfos, info)
				}
			}
1750

1751 1752 1753 1754 1755
			mergedDeltaChannel := mergeWatchDeltaChannelInfo(deltaChannelInfos)
			// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
			err = lbt.meta.setDeltaChannel(collectionID, mergedDeltaChannel)
			if err != nil {
				log.Error("loadBalanceTask: set delta channel info meta failed", zap.Int64("collectionID", collectionID), zap.Error(err))
1756
				lbt.setResultInfo(err)
1757
				panic(err)
1758 1759 1760
			}

			mergedDmChannel := mergeDmChannelInfo(dmChannelInfos)
1761 1762 1763 1764 1765 1766 1767 1768 1769
			for channelName, vChannelInfo := range mergedDmChannel {
				if _, ok := dmChannel2WatchInfo[channelName]; ok {
					msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
					msgBase.MsgType = commonpb.MsgType_WatchDmChannels
					watchRequest := &querypb.WatchDmChannelsRequest{
						Base:         msgBase,
						CollectionID: collectionID,
						Infos:        []*datapb.VchannelInfo{vChannelInfo},
						Schema:       schema,
1770 1771 1772 1773 1774
						LoadMeta: &querypb.LoadMetaInfo{
							LoadType:     collectionInfo.LoadType,
							CollectionID: collectionID,
							PartitionIDs: toRecoverPartitionIDs,
						},
1775
					}
1776

1777 1778 1779
					if collectionInfo.LoadType == querypb.LoadType_LoadPartition {
						watchRequest.PartitionIDs = toRecoverPartitionIDs
					}
1780

1781
					watchDmChannelReqs = append(watchDmChannelReqs, watchRequest)
X
xige-16 已提交
1782
				}
1783
			}
1784
		}
1785 1786
		internalTasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, watchDmChannelReqs, true, lbt.SourceNodeIDs, lbt.DstNodeIDs)
		if err != nil {
1787
			log.Error("loadBalanceTask: assign child task failed", zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs))
1788
			lbt.setResultInfo(err)
1789
			panic(err)
1790 1791 1792 1793 1794 1795
		}
		for _, internalTask := range internalTasks {
			lbt.addChildTask(internalTask)
			log.Debug("loadBalanceTask: add a childTask", zap.Int32("task type", int32(internalTask.msgType())), zap.Any("task", internalTask))
		}
		log.Debug("loadBalanceTask: assign child task done", zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs))
1796 1797
	}

X
xige-16 已提交
1798
	if lbt.triggerCondition == querypb.TriggerCondition_LoadBalance {
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
		if len(lbt.SourceNodeIDs) == 0 {
			err := errors.New("loadBalanceTask: empty source Node list to balance")
			log.Error(err.Error())
			lbt.setResultInfo(err)
			return err
		}

		balancedSegmentInfos := make(map[UniqueID]*querypb.SegmentInfo)
		balancedSegmentIDs := make([]UniqueID, 0)

		for _, nodeID := range lbt.SourceNodeIDs {
			nodeExist := lbt.cluster.hasNode(nodeID)
			if !nodeExist {
				err := fmt.Errorf("loadBalanceTask: query node %d is not exist to balance", nodeID)
				log.Error(err.Error())
				lbt.setResultInfo(err)
				return err
			}
			segmentInfos := lbt.meta.getSegmentInfosByNode(nodeID)
			for _, info := range segmentInfos {
				balancedSegmentInfos[info.SegmentID] = info
				balancedSegmentIDs = append(balancedSegmentIDs, info.SegmentID)
			}
		}

		// check balanced sealedSegmentIDs in request whether exist in query node
		for _, segmentID := range lbt.SealedSegmentIDs {
			if _, ok := balancedSegmentInfos[segmentID]; !ok {
				err := fmt.Errorf("loadBalanceTask: unloaded segment %d", segmentID)
				log.Warn(err.Error())
				lbt.setResultInfo(err)
				return err
			}
		}

		if len(lbt.SealedSegmentIDs) != 0 {
			balancedSegmentIDs = lbt.SealedSegmentIDs
		}

		col2PartitionIDs := make(map[UniqueID][]UniqueID)
		par2Segments := make(map[UniqueID][]*querypb.SegmentInfo)
		for _, segmentID := range balancedSegmentIDs {
			info := balancedSegmentInfos[segmentID]
			collectionID := info.CollectionID
			partitionID := info.PartitionID
			if _, ok := col2PartitionIDs[collectionID]; !ok {
				col2PartitionIDs[collectionID] = make([]UniqueID, 0)
			}
			if _, ok := par2Segments[partitionID]; !ok {
				col2PartitionIDs[collectionID] = append(col2PartitionIDs[collectionID], partitionID)
				par2Segments[partitionID] = make([]*querypb.SegmentInfo, 0)
			}
			par2Segments[partitionID] = append(par2Segments[partitionID], info)
		}

1854
		loadSegmentReqs := make([]*querypb.LoadSegmentsRequest, 0)
1855
		for collectionID, partitionIDs := range col2PartitionIDs {
G
godchen 已提交
1856
			var watchDeltaChannels []*datapb.VchannelInfo
1857 1858 1859 1860 1861 1862 1863
			collectionInfo, err := lbt.meta.getCollectionInfoByID(collectionID)
			if err != nil {
				log.Error("loadBalanceTask: can't find collectionID in meta", zap.Int64("collectionID", collectionID), zap.Error(err))
				lbt.setResultInfo(err)
				return err
			}
			for _, partitionID := range partitionIDs {
1864
				dmChannelInfos, binlogs, err := lbt.broker.getRecoveryInfo(lbt.ctx, collectionID, partitionID)
1865
				if err != nil {
1866
					log.Error("loadBalanceTask: getRecoveryInfo failed", zap.Int64("collectionID", collectionID), zap.Int64("partitionID", partitionID), zap.Error(err))
1867 1868 1869 1870 1871
					lbt.setResultInfo(err)
					return err
				}

				segmentID2Binlog := make(map[UniqueID]*datapb.SegmentBinlogs)
1872
				for _, binlog := range binlogs {
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
					segmentID2Binlog[binlog.SegmentID] = binlog
				}

				for _, segmentInfo := range par2Segments[partitionID] {
					segmentID := segmentInfo.SegmentID
					if _, ok := segmentID2Binlog[segmentID]; !ok {
						log.Warn("loadBalanceTask: can't find binlog of segment to balance, may be has been compacted", zap.Int64("segmentID", segmentID))
						continue
					}
					segmentBingLog := segmentID2Binlog[segmentID]
1883
					segmentLoadInfo := lbt.broker.generateSegmentLoadInfo(ctx, collectionID, partitionID, segmentBingLog, true, collectionInfo.Schema)
1884 1885 1886
					msgBase := proto.Clone(lbt.Base).(*commonpb.MsgBase)
					msgBase.MsgType = commonpb.MsgType_LoadSegments
					loadSegmentReq := &querypb.LoadSegmentsRequest{
1887 1888 1889 1890
						Base:         msgBase,
						Infos:        []*querypb.SegmentLoadInfo{segmentLoadInfo},
						Schema:       collectionInfo.Schema,
						CollectionID: collectionID,
1891 1892 1893 1894
					}
					loadSegmentReqs = append(loadSegmentReqs, loadSegmentReq)
				}

1895
				for _, info := range dmChannelInfos {
1896 1897 1898
					deltaChannel, err := generateWatchDeltaChannelInfo(info)
					if err != nil {
						return err
1899
					}
1900
					watchDeltaChannels = append(watchDeltaChannels, deltaChannel)
1901 1902
				}
			}
1903
			mergedDeltaChannels := mergeWatchDeltaChannelInfo(watchDeltaChannels)
G
godchen 已提交
1904
			// If meta is not updated here, deltaChannel meta will not be available when loadSegment reschedule
1905
			err = lbt.meta.setDeltaChannel(collectionID, mergedDeltaChannels)
1906
			if err != nil {
1907
				log.Error("loadBalanceTask: set delta channel info to meta failed", zap.Error(err))
1908 1909 1910
				lbt.setResultInfo(err)
				return err
			}
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
		}
		internalTasks, err := assignInternalTask(ctx, lbt, lbt.meta, lbt.cluster, loadSegmentReqs, nil, false, lbt.SourceNodeIDs, lbt.DstNodeIDs)
		if err != nil {
			log.Error("loadBalanceTask: assign child task failed", zap.Any("balance request", lbt.LoadBalanceRequest))
			lbt.setResultInfo(err)
			return err
		}
		for _, internalTask := range internalTasks {
			lbt.addChildTask(internalTask)
			log.Debug("loadBalanceTask: add a childTask", zap.Int32("task type", int32(internalTask.msgType())), zap.Any("balance request", lbt.LoadBalanceRequest))
1921 1922 1923
		}
		log.Debug("loadBalanceTask: assign child task done", zap.Any("balance request", lbt.LoadBalanceRequest))
	}
1924

1925
	log.Debug("loadBalanceTask Execute done",
1926
		zap.Int32("trigger type", int32(lbt.triggerCondition)),
1927 1928
		zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs),
		zap.Any("balanceReason", lbt.BalanceReason),
1929
		zap.Int64("taskID", lbt.getTaskID()))
1930 1931 1932
	return nil
}

1933
func (lbt *loadBalanceTask) postExecute(context.Context) error {
1934 1935
	if lbt.getResultInfo().ErrorCode != commonpb.ErrorCode_Success {
		lbt.clearChildTasks()
1936
	}
1937 1938 1939 1940 1941

	// if loadBalanceTask execute failed after query node down, the lbt.getResultInfo().ErrorCode will be set to commonpb.ErrorCode_UnexpectedError
	// then the queryCoord will panic, and the nodeInfo should not be removed immediately
	// after queryCoord recovery, the balanceTask will redo
	if lbt.triggerCondition == querypb.TriggerCondition_NodeDown && lbt.getResultInfo().ErrorCode == commonpb.ErrorCode_Success {
1942 1943 1944
		for _, id := range lbt.SourceNodeIDs {
			err := lbt.cluster.removeNodeInfo(id)
			if err != nil {
1945
				//TODO:: clear node info after removeNodeInfo failed
1946
				log.Error("loadBalanceTask: occur error when removing node info from cluster", zap.Int64("nodeID", id))
1947
			}
1948 1949
		}
	}
1950

1951
	log.Debug("loadBalanceTask postExecute done",
1952
		zap.Int32("trigger type", int32(lbt.triggerCondition)),
1953 1954
		zap.Int64s("sourceNodeIDs", lbt.SourceNodeIDs),
		zap.Any("balanceReason", lbt.BalanceReason),
1955
		zap.Int64("taskID", lbt.getTaskID()))
G
godchen 已提交
1956
	return nil
1957 1958
}

G
godchen 已提交
1959
func assignInternalTask(ctx context.Context,
1960
	parentTask task, meta Meta, cluster Cluster,
1961
	loadSegmentRequests []*querypb.LoadSegmentsRequest,
1962
	watchDmChannelRequests []*querypb.WatchDmChannelsRequest,
1963
	wait bool, excludeNodeIDs []int64, includeNodeIDs []int64) ([]task, error) {
1964
	log.Debug("assignInternalTask: start assign task to query node")
1965
	internalTasks := make([]task, 0)
1966
	err := cluster.allocateSegmentsToQueryNode(ctx, loadSegmentRequests, wait, excludeNodeIDs, includeNodeIDs)
1967
	if err != nil {
1968 1969
		log.Error("assignInternalTask: assign segment to node failed", zap.Any("load segments requests", loadSegmentRequests))
		return nil, err
1970
	}
1971
	log.Debug("assignInternalTask: assign segment to node success")
1972 1973

	err = cluster.allocateChannelsToQueryNode(ctx, watchDmChannelRequests, wait, excludeNodeIDs)
1974
	if err != nil {
1975 1976
		log.Error("assignInternalTask: assign dmChannel to node failed", zap.Any("watch dmChannel requests", watchDmChannelRequests))
		return nil, err
1977
	}
1978 1979
	log.Debug("assignInternalTask: assign dmChannel to node success")

1980 1981 1982 1983 1984
	if len(loadSegmentRequests) > 0 {
		sort.Slice(loadSegmentRequests, func(i, j int) bool {
			return loadSegmentRequests[i].CollectionID < loadSegmentRequests[j].CollectionID ||
				loadSegmentRequests[i].CollectionID == loadSegmentRequests[j].CollectionID && loadSegmentRequests[i].DstNodeID < loadSegmentRequests[j].DstNodeID
		})
1985

1986 1987 1988 1989 1990 1991
		batchReq := loadSegmentRequests[0]
		batchSize := proto.Size(batchReq)
		for _, req := range loadSegmentRequests[1:] {
			// Pack current batch, switch to new batch
			if req.CollectionID != batchReq.CollectionID || req.DstNodeID != batchReq.DstNodeID ||
				batchSize+proto.Size(req) > MaxSendSizeToEtcd {
1992 1993 1994 1995
				baseTask := newBaseTask(ctx, parentTask.getTriggerCondition())
				baseTask.setParentTask(parentTask)
				loadSegmentTask := &loadSegmentTask{
					baseTask:            baseTask,
1996
					LoadSegmentsRequest: batchReq,
1997 1998 1999 2000 2001
					meta:                meta,
					cluster:             cluster,
					excludeNodeIDs:      excludeNodeIDs,
				}
				internalTasks = append(internalTasks, loadSegmentTask)
2002 2003 2004 2005 2006 2007

				batchReq = req
				batchSize = proto.Size(batchReq)
			} else {
				batchReq.Infos = append(batchReq.Infos, req.Infos...)
				batchSize += proto.Size(req)
2008
			}
2009
		}
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021

		// Pack the last batch
		baseTask := newBaseTask(ctx, parentTask.getTriggerCondition())
		baseTask.setParentTask(parentTask)
		loadSegmentTask := &loadSegmentTask{
			baseTask:            baseTask,
			LoadSegmentsRequest: batchReq,
			meta:                meta,
			cluster:             cluster,
			excludeNodeIDs:      excludeNodeIDs,
		}
		internalTasks = append(internalTasks, loadSegmentTask)
2022 2023
	}

2024
	for _, req := range watchDmChannelRequests {
2025 2026
		baseTask := newBaseTask(ctx, parentTask.getTriggerCondition())
		baseTask.setParentTask(parentTask)
2027 2028
		watchDmChannelTask := &watchDmChannelTask{
			baseTask:               baseTask,
2029
			WatchDmChannelsRequest: req,
2030 2031
			meta:                   meta,
			cluster:                cluster,
2032
			excludeNodeIDs:         excludeNodeIDs,
2033
		}
2034
		internalTasks = append(internalTasks, watchDmChannelTask)
2035 2036
	}

2037
	return internalTasks, nil
2038
}
2039

G
godchen 已提交
2040
func generateWatchDeltaChannelInfo(info *datapb.VchannelInfo) (*datapb.VchannelInfo, error) {
2041
	deltaChannelName, err := funcutil.ConvertChannelName(info.ChannelName, Params.CommonCfg.RootCoordDml, Params.CommonCfg.RootCoordDelta)
G
godchen 已提交
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
	if err != nil {
		return nil, err
	}
	deltaChannel := proto.Clone(info).(*datapb.VchannelInfo)
	deltaChannel.ChannelName = deltaChannelName
	deltaChannel.UnflushedSegments = nil
	deltaChannel.FlushedSegments = nil
	deltaChannel.DroppedSegments = nil
	return deltaChannel, nil
}
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068

func mergeWatchDeltaChannelInfo(infos []*datapb.VchannelInfo) []*datapb.VchannelInfo {
	minPositions := make(map[string]int)
	for index, info := range infos {
		_, ok := minPositions[info.ChannelName]
		if !ok {
			minPositions[info.ChannelName] = index
		}
		minTimeStampIndex := minPositions[info.ChannelName]
		if info.SeekPosition.GetTimestamp() < infos[minTimeStampIndex].SeekPosition.GetTimestamp() {
			minPositions[info.ChannelName] = index
		}
	}
	var result []*datapb.VchannelInfo
	for _, index := range minPositions {
		result = append(result, infos[index])
	}
G
godchen 已提交
2069 2070 2071 2072
	log.Debug("merge delta channels finished",
		zap.Any("origin info length", len(infos)),
		zap.Any("merged info length", len(result)),
	)
2073 2074
	return result
}
2075

2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
func mergeDmChannelInfo(infos []*datapb.VchannelInfo) map[string]*datapb.VchannelInfo {
	minPositions := make(map[string]*datapb.VchannelInfo)
	for _, info := range infos {
		if _, ok := minPositions[info.ChannelName]; !ok {
			minPositions[info.ChannelName] = info
			continue
		}
		minPositionInfo := minPositions[info.ChannelName]
		if info.SeekPosition.GetTimestamp() < minPositionInfo.SeekPosition.GetTimestamp() {
			minPositionInfo.SeekPosition = info.SeekPosition
		}
2087 2088 2089
		minPositionInfo.DroppedSegments = append(minPositionInfo.DroppedSegments, info.DroppedSegments...)
		minPositionInfo.UnflushedSegments = append(minPositionInfo.UnflushedSegments, info.UnflushedSegments...)
		minPositionInfo.FlushedSegments = append(minPositionInfo.FlushedSegments, info.FlushedSegments...)
2090 2091 2092 2093
	}

	return minPositions
}