未验证 提交 235d736a 编写于 作者: X xige-16 提交者: GitHub

Add max number of retries for interTask in querycoord (#8215)

Signed-off-by: Nxige-16 <xi.ge@zilliz.com>
上级 bd3a8ed3
......@@ -374,6 +374,7 @@ func (c *queryNodeCluster) releasePartitions(ctx context.Context, nodeID int64,
log.Debug("ReleasePartitions: queryNode release partitions error", zap.String("error", err.Error()))
return err
}
for _, partitionID := range in.PartitionIDs {
err = c.clusterMeta.releasePartition(in.CollectionID, partitionID)
if err != nil {
......
......@@ -141,21 +141,23 @@ func (qc *QueryCoord) LoadCollection(ctx context.Context, req *querypb.LoadColle
return status, err
}
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_grpcRequest)
loadCollectionTask := &LoadCollectionTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_grpcRequest,
},
BaseTask: baseTask,
LoadCollectionRequest: req,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
qc.scheduler.Enqueue([]task{loadCollectionTask})
err := qc.scheduler.Enqueue(loadCollectionTask)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
return status, err
}
err := loadCollectionTask.WaitToFinish()
err = loadCollectionTask.WaitToFinish()
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
......@@ -188,20 +190,22 @@ func (qc *QueryCoord) ReleaseCollection(ctx context.Context, req *querypb.Releas
return status, nil
}
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_grpcRequest)
releaseCollectionTask := &ReleaseCollectionTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_grpcRequest,
},
BaseTask: baseTask,
ReleaseCollectionRequest: req,
cluster: qc.cluster,
meta: qc.meta,
rootCoord: qc.rootCoordClient,
}
qc.scheduler.Enqueue([]task{releaseCollectionTask})
err := qc.scheduler.Enqueue(releaseCollectionTask)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
return status, err
}
err := releaseCollectionTask.WaitToFinish()
err = releaseCollectionTask.WaitToFinish()
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
......@@ -329,20 +333,22 @@ func (qc *QueryCoord) LoadPartitions(ctx context.Context, req *querypb.LoadParti
req.PartitionIDs = partitionIDsToLoad
}
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_grpcRequest)
loadPartitionTask := &LoadPartitionTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_grpcRequest,
},
BaseTask: baseTask,
LoadPartitionsRequest: req,
dataCoord: qc.dataCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
qc.scheduler.Enqueue([]task{loadPartitionTask})
err := qc.scheduler.Enqueue(loadPartitionTask)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
return status, err
}
err := loadPartitionTask.WaitToFinish()
err = loadPartitionTask.WaitToFinish()
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
......@@ -398,18 +404,20 @@ func (qc *QueryCoord) ReleasePartitions(ctx context.Context, req *querypb.Releas
}
req.PartitionIDs = toReleasedPartitions
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_grpcRequest)
releasePartitionTask := &ReleasePartitionTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_grpcRequest,
},
BaseTask: baseTask,
ReleasePartitionsRequest: req,
cluster: qc.cluster,
}
qc.scheduler.Enqueue([]task{releasePartitionTask})
err := qc.scheduler.Enqueue(releasePartitionTask)
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
return status, err
}
err := releasePartitionTask.WaitToFinish()
err = releasePartitionTask.WaitToFinish()
if err != nil {
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
......
......@@ -13,6 +13,7 @@ package querycoord
import (
"context"
"encoding/json"
"errors"
"testing"
"time"
......@@ -328,6 +329,87 @@ func TestGrpcTask(t *testing.T) {
assert.Nil(t, err)
}
func TestGrpcTaskEnqueueFail(t *testing.T) {
refreshParams()
ctx := context.Background()
queryCoord, err := startQueryCoord(ctx)
assert.Nil(t, err)
_, err = startQueryNodeServer(ctx)
assert.Nil(t, err)
taskIDAllocator := queryCoord.scheduler.taskIDAllocator
failedAllocator := func() (UniqueID, error) {
return 0, errors.New("scheduler failed to allocate ID")
}
queryCoord.scheduler.taskIDAllocator = failedAllocator
t.Run("Test LoadPartition", func(t *testing.T) {
status, err := queryCoord.LoadPartitions(ctx, &querypb.LoadPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadPartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
Schema: genCollectionSchema(defaultCollectionID, false),
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.NotNil(t, err)
})
t.Run("Test LoadCollection", func(t *testing.T) {
status, err := queryCoord.LoadCollection(ctx, &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genCollectionSchema(defaultCollectionID, false),
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.NotNil(t, err)
})
queryCoord.scheduler.taskIDAllocator = taskIDAllocator
status, err := queryCoord.LoadCollection(ctx, &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
},
CollectionID: defaultCollectionID,
Schema: genCollectionSchema(defaultCollectionID, false),
})
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
assert.Nil(t, err)
queryCoord.scheduler.taskIDAllocator = failedAllocator
t.Run("Test ReleasePartition", func(t *testing.T) {
status, err := queryCoord.ReleasePartitions(ctx, &querypb.ReleasePartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ReleasePartitions,
},
CollectionID: defaultCollectionID,
PartitionIDs: []UniqueID{defaultPartitionID},
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.NotNil(t, err)
})
t.Run("Test ReleaseCollection", func(t *testing.T) {
status, err := queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ReleaseCollection,
},
CollectionID: defaultCollectionID,
})
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
assert.NotNil(t, err)
})
queryCoord.Stop()
err = removeAllSession()
assert.Nil(t, err)
}
func TestLoadBalanceTask(t *testing.T) {
refreshParams()
baseCtx := context.Background()
......@@ -371,7 +453,7 @@ func TestLoadBalanceTask(t *testing.T) {
}
loadBalanceTask := &LoadBalanceTask{
BaseTask: BaseTask{
BaseTask: &BaseTask{
ctx: baseCtx,
Condition: NewTaskCondition(baseCtx),
triggerCondition: querypb.TriggerCondition_nodeDown,
......@@ -382,7 +464,7 @@ func TestLoadBalanceTask(t *testing.T) {
cluster: queryCoord.cluster,
meta: queryCoord.meta,
}
queryCoord.scheduler.Enqueue([]task{loadBalanceTask})
queryCoord.scheduler.Enqueue(loadBalanceTask)
res, err = queryCoord.ReleaseCollection(baseCtx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{
......@@ -400,6 +482,7 @@ func TestLoadBalanceTask(t *testing.T) {
}
func TestGrpcTaskBeforeHealthy(t *testing.T) {
refreshParams()
ctx := context.Background()
unHealthyCoord, err := startUnHealthyQueryCoord(ctx)
assert.Nil(t, err)
......
......@@ -423,25 +423,23 @@ func (m *MetaReplica) releaseCollection(collectionID UniqueID) error {
defer m.Unlock()
delete(m.collectionInfos, collectionID)
var err error
for id, info := range m.segmentInfos {
if info.CollectionID == collectionID {
err := removeSegmentInfo(id, m.client)
err = removeSegmentInfo(id, m.client)
if err != nil {
log.Error("remove segmentInfo error", zap.Any("error", err.Error()), zap.Int64("segmentID", id))
return err
log.Warn("remove segmentInfo error", zap.Any("error", err.Error()), zap.Int64("segmentID", id))
}
delete(m.segmentInfos, id)
}
}
delete(m.queryChannelInfos, collectionID)
err := removeGlobalCollectionInfo(collectionID, m.client)
err = removeGlobalCollectionInfo(collectionID, m.client)
if err != nil {
log.Error("remove collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
return err
log.Warn("remove collectionInfo error", zap.Any("error", err.Error()), zap.Int64("collectionID", collectionID))
}
return nil
return err
}
func (m *MetaReplica) releasePartition(collectionID UniqueID, partitionID UniqueID) error {
......
......@@ -214,7 +214,9 @@ func (rc *rootCoordMock) createCollection(collectionID UniqueID) {
if _, ok := rc.Col2partition[collectionID]; !ok {
rc.CollectionIDs = append(rc.CollectionIDs, collectionID)
rc.Col2partition[collectionID] = make([]UniqueID, 0)
partitionIDs := make([]UniqueID, 0)
partitionIDs = append(partitionIDs, defaultPartitionID+1)
rc.Col2partition[collectionID] = partitionIDs
}
}
......@@ -222,13 +224,30 @@ func (rc *rootCoordMock) createPartition(collectionID UniqueID, partitionID Uniq
rc.Lock()
defer rc.Unlock()
if _, ok := rc.Col2partition[collectionID]; ok {
rc.Col2partition[collectionID] = append(rc.Col2partition[collectionID], partitionID)
if partitionIDs, ok := rc.Col2partition[collectionID]; ok {
partitionExist := false
for _, id := range partitionIDs {
if id == partitionID {
partitionExist = true
break
}
}
if !partitionExist {
rc.Col2partition[collectionID] = append(rc.Col2partition[collectionID], partitionID)
}
return nil
}
return errors.New("collection not exist")
}
func (rc *rootCoordMock) CreatePartition(ctx context.Context, req *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
rc.createPartition(defaultCollectionID, defaultPartitionID)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
func (rc *rootCoordMock) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
collectionID := in.CollectionID
status := &commonpb.Status{
......@@ -244,7 +263,6 @@ func (rc *rootCoordMock) ShowPartitions(ctx context.Context, in *milvuspb.ShowPa
}
rc.createCollection(collectionID)
rc.createPartition(collectionID, defaultPartitionID)
return &milvuspb.ShowPartitionsResponse{
Status: status,
......@@ -267,16 +285,17 @@ type dataCoordMock struct {
minioKV kv.BaseKV
collections []UniqueID
col2DmChannels map[UniqueID][]*datapb.VchannelInfo
partitionID2Segment map[UniqueID]UniqueID
Segment2Binlog map[UniqueID][]*datapb.SegmentBinlogs
assignedSegmentID UniqueID
partitionID2Segment map[UniqueID][]UniqueID
Segment2Binlog map[UniqueID]*datapb.SegmentBinlogs
baseSegmentID UniqueID
channelNumPerCol int
}
func newDataCoordMock(ctx context.Context) (*dataCoordMock, error) {
collectionIDs := make([]UniqueID, 0)
col2DmChannels := make(map[UniqueID][]*datapb.VchannelInfo)
partitionID2Segment := make(map[UniqueID]UniqueID)
segment2Binglog := make(map[UniqueID][]*datapb.SegmentBinlogs)
partitionID2Segments := make(map[UniqueID][]UniqueID)
segment2Binglog := make(map[UniqueID]*datapb.SegmentBinlogs)
// create minio client
option := &minioKV.Option{
......@@ -296,9 +315,10 @@ func newDataCoordMock(ctx context.Context) (*dataCoordMock, error) {
minioKV: kv,
collections: collectionIDs,
col2DmChannels: col2DmChannels,
partitionID2Segment: partitionID2Segment,
partitionID2Segment: partitionID2Segments,
Segment2Binlog: segment2Binglog,
assignedSegmentID: defaultSegmentID,
baseSegmentID: defaultSegmentID,
channelNumPerCol: 2,
}, nil
}
......@@ -306,28 +326,36 @@ func (data *dataCoordMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetR
collectionID := req.CollectionID
partitionID := req.PartitionID
if _, ok := data.col2DmChannels[collectionID]; !ok {
segmentID := data.assignedSegmentID
data.partitionID2Segment[partitionID] = segmentID
fieldID2Paths, err := generateInsertBinLog(collectionID, partitionID, segmentID, "queryCoorf-mockDataCoord", data.minioKV)
if err != nil {
return nil, err
}
fieldBinlogs := make([]*datapb.FieldBinlog, 0)
for fieldID, path := range fieldID2Paths {
fieldBinlog := &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []string{path},
if _, ok := data.partitionID2Segment[partitionID]; !ok {
segmentIDs := make([]UniqueID, 0)
for i := 0; i < data.channelNumPerCol; i++ {
segmentID := data.baseSegmentID
if _, ok := data.Segment2Binlog[segmentID]; !ok {
fieldID2Paths, err := generateInsertBinLog(collectionID, partitionID, segmentID, "queryCoorf-mockDataCoord", data.minioKV)
if err != nil {
return nil, err
}
fieldBinlogs := make([]*datapb.FieldBinlog, 0)
for fieldID, path := range fieldID2Paths {
fieldBinlog := &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []string{path},
}
fieldBinlogs = append(fieldBinlogs, fieldBinlog)
}
segmentBinlog := &datapb.SegmentBinlogs{
SegmentID: segmentID,
FieldBinlogs: fieldBinlogs,
}
data.Segment2Binlog[segmentID] = segmentBinlog
}
fieldBinlogs = append(fieldBinlogs, fieldBinlog)
}
data.Segment2Binlog[segmentID] = make([]*datapb.SegmentBinlogs, 0)
segmentBinlog := &datapb.SegmentBinlogs{
SegmentID: segmentID,
FieldBinlogs: fieldBinlogs,
segmentIDs = append(segmentIDs, segmentID)
data.baseSegmentID++
}
data.Segment2Binlog[segmentID] = append(data.Segment2Binlog[segmentID], segmentBinlog)
data.partitionID2Segment[partitionID] = segmentIDs
}
if _, ok := data.col2DmChannels[collectionID]; !ok {
channelInfos := make([]*datapb.VchannelInfo, 0)
data.collections = append(data.collections, collectionID)
collectionName := funcutil.RandomString(8)
......@@ -339,20 +367,24 @@ func (data *dataCoordMock) GetRecoveryInfo(ctx context.Context, req *datapb.GetR
SeekPosition: &internalpb.MsgPosition{
ChannelName: vChannel,
},
FlushedSegments: []*datapb.SegmentInfo{{ID: segmentID}},
}
channelInfos = append(channelInfos, channelInfo)
}
data.col2DmChannels[collectionID] = channelInfos
}
segmentID := data.partitionID2Segment[partitionID]
binlogs := make([]*datapb.SegmentBinlogs, 0)
for _, segmentID := range data.partitionID2Segment[partitionID] {
if _, ok := data.Segment2Binlog[segmentID]; ok {
binlogs = append(binlogs, data.Segment2Binlog[segmentID])
}
}
return &datapb.GetRecoveryInfoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Channels: data.col2DmChannels[collectionID],
Binlogs: data.Segment2Binlog[segmentID],
Binlogs: binlogs,
}, nil
}
......
......@@ -87,6 +87,8 @@ func (qc *QueryCoord) Register() error {
// Init function initializes the queryCoord's meta, cluster, etcdKV and task scheduler
func (qc *QueryCoord) Init() error {
log.Debug("query coordinator start init")
//connect etcd
connectEtcdFn := func() error {
etcdKV, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
if err != nil {
......@@ -221,19 +223,17 @@ func (qc *QueryCoord) watchNodeLoop() {
SourceNodeIDs: offlineNodeIDs,
}
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_nodeDown)
loadBalanceTask := &LoadBalanceTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_nodeDown,
},
BaseTask: baseTask,
LoadBalanceRequest: loadBalanceSegment,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
qc.scheduler.Enqueue([]task{loadBalanceTask})
//TODO::deal enqueue error
qc.scheduler.Enqueue(loadBalanceTask)
log.Debug("start a loadBalance task", zap.Any("task", loadBalanceTask))
}
......@@ -271,21 +271,19 @@ func (qc *QueryCoord) watchNodeLoop() {
BalanceReason: querypb.TriggerCondition_nodeDown,
}
baseTask := newBaseTask(qc.loopCtx, querypb.TriggerCondition_nodeDown)
loadBalanceTask := &LoadBalanceTask{
BaseTask: BaseTask{
ctx: qc.loopCtx,
Condition: NewTaskCondition(qc.loopCtx),
triggerCondition: querypb.TriggerCondition_nodeDown,
},
BaseTask: baseTask,
LoadBalanceRequest: loadBalanceSegment,
rootCoord: qc.rootCoordClient,
dataCoord: qc.dataCoordClient,
cluster: qc.cluster,
meta: qc.meta,
}
qc.scheduler.Enqueue([]task{loadBalanceTask})
log.Debug("start a loadBalance task", zap.Any("task", loadBalanceTask))
qc.metricsCacheManager.InvalidateSystemInfoMetrics()
//TODO:: deal enqueue error
qc.scheduler.Enqueue(loadBalanceTask)
log.Debug("start a loadBalance task", zap.Any("task", loadBalanceTask))
}
}
}
......
......@@ -93,6 +93,11 @@ func startQueryCoord(ctx context.Context) (*QueryCoord, error) {
return coord, nil
}
func createDefaultPartition(ctx context.Context, queryCoord *QueryCoord) error {
_, err := queryCoord.rootCoordClient.CreatePartition(ctx, nil)
return err
}
func startUnHealthyQueryCoord(ctx context.Context) (*QueryCoord, error) {
factory := msgstream.NewPmsFactory()
......
此差异已折叠。
......@@ -49,6 +49,7 @@ func (tt *testTask) Timestamp() Timestamp {
}
func (tt *testTask) PreExecute(ctx context.Context) error {
tt.SetResultInfo(nil)
log.Debug("test task preExecute...")
return nil
}
......@@ -59,7 +60,7 @@ func (tt *testTask) Execute(ctx context.Context) error {
switch tt.baseMsg.MsgType {
case commonpb.MsgType_LoadSegments:
childTask := &LoadSegmentTask{
BaseTask: BaseTask{
BaseTask: &BaseTask{
ctx: tt.ctx,
Condition: NewTaskCondition(tt.ctx),
triggerCondition: tt.triggerCondition,
......@@ -70,13 +71,14 @@ func (tt *testTask) Execute(ctx context.Context) error {
},
NodeID: tt.nodeID,
},
meta: tt.meta,
cluster: tt.cluster,
meta: tt.meta,
cluster: tt.cluster,
excludeNodeIDs: []int64{},
}
tt.AddChildTask(childTask)
case commonpb.MsgType_WatchDmChannels:
childTask := &WatchDmChannelTask{
BaseTask: BaseTask{
BaseTask: &BaseTask{
ctx: tt.ctx,
Condition: NewTaskCondition(tt.ctx),
triggerCondition: tt.triggerCondition,
......@@ -87,13 +89,14 @@ func (tt *testTask) Execute(ctx context.Context) error {
},
NodeID: tt.nodeID,
},
cluster: tt.cluster,
meta: tt.meta,
cluster: tt.cluster,
meta: tt.meta,
excludeNodeIDs: []int64{},
}
tt.AddChildTask(childTask)
case commonpb.MsgType_WatchQueryChannels:
childTask := &WatchQueryChannelTask{
BaseTask: BaseTask{
BaseTask: &BaseTask{
ctx: tt.ctx,
Condition: NewTaskCondition(tt.ctx),
triggerCondition: tt.triggerCondition,
......@@ -129,12 +132,7 @@ func TestWatchQueryChannel_ClearEtcdInfoAfterAssignedNodeDown(t *testing.T) {
queryNode.addQueryChannels = returnFailedResult
nodeID := queryNode.queryNodeID
for {
_, err = queryCoord.cluster.getNodeByID(nodeID)
if err == nil {
break
}
}
waitQueryNodeOnline(queryCoord.cluster, nodeID)
testTask := &testTask{
BaseTask: BaseTask{
ctx: baseCtx,
......@@ -148,7 +146,7 @@ func TestWatchQueryChannel_ClearEtcdInfoAfterAssignedNodeDown(t *testing.T) {
meta: queryCoord.meta,
nodeID: nodeID,
}
queryCoord.scheduler.Enqueue([]task{testTask})
queryCoord.scheduler.Enqueue(testTask)
queryNode.stop()
err = removeNodeSession(queryNode.queryNodeID)
......@@ -169,7 +167,11 @@ func TestUnMarshalTask(t *testing.T) {
refreshParams()
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
assert.Nil(t, err)
taskScheduler := &TaskScheduler{}
baseCtx, cancel := context.WithCancel(context.Background())
taskScheduler := &TaskScheduler{
ctx: baseCtx,
cancel: cancel,
}
t.Run("Test LoadCollectionTask", func(t *testing.T) {
loadTask := &LoadCollectionTask{
......@@ -187,7 +189,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalLoadCollection")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1000, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_LoadCollection)
})
......@@ -208,7 +210,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalLoadPartition")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1001, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_LoadPartitions)
})
......@@ -229,7 +231,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalReleaseCollection")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1002, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_ReleaseCollection)
})
......@@ -250,7 +252,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalReleasePartition")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1003, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_ReleasePartitions)
})
......@@ -271,7 +273,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalLoadSegment")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1004, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_LoadSegments)
})
......@@ -292,7 +294,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalReleaseSegment")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1005, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_ReleaseSegments)
})
......@@ -313,7 +315,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalWatchDmChannel")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1006, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_WatchDmChannels)
})
......@@ -334,7 +336,7 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalWatchQueryChannel")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1007, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_WatchQueryChannels)
})
......@@ -356,17 +358,22 @@ func TestUnMarshalTask(t *testing.T) {
value, err := kv.Load("testMarshalLoadBalanceTask")
assert.Nil(t, err)
task, err := taskScheduler.unmarshalTask(value)
task, err := taskScheduler.unmarshalTask(1008, value)
assert.Nil(t, err)
assert.Equal(t, task.Type(), commonpb.MsgType_LoadBalanceSegments)
})
taskScheduler.Close()
}
func TestReloadTaskFromKV(t *testing.T) {
refreshParams()
kv, err := etcdkv.NewEtcdKV(Params.EtcdEndpoints, Params.MetaRootPath)
assert.Nil(t, err)
baseCtx, cancel := context.WithCancel(context.Background())
taskScheduler := &TaskScheduler{
ctx: baseCtx,
cancel: cancel,
client: kv,
triggerTaskQueue: NewTaskQueue(),
}
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册