未验证 提交 cdbc6d2c 编写于 作者: B bigsheeper 提交者: GitHub

Refactor query node and query service (#5751)

Signed-off-by: Nxige-16 <xi.ge@zilliz.com>
Signed-off-by: Nbigsheeper <yihao.dai@zilliz.com>
Co-authored-by: Nxige-16 <xi.ge@zilliz.com>
Co-authored-by: Nyudong.cai <yudong.cai@zilliz.com>
上级 4165b761
......@@ -42,6 +42,7 @@ require (
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/text v0.3.3
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/grpc v1.31.0
google.golang.org/protobuf v1.25.0 // indirect
......
......@@ -225,7 +225,7 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"StateNone\020\000\022\014\n\010Unissued\020\001\022\016\n\nInProgress\020"
"\002\022\014\n\010Finished\020\003\022\n\n\006Failed\020\004*X\n\014SegmentSt"
"ate\022\024\n\020SegmentStateNone\020\000\022\014\n\010NotExist\020\001\022"
"\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004*\272\006"
"\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004*\363\007"
"\n\007MsgType\022\r\n\tUndefined\020\000\022\024\n\020CreateCollec"
"tion\020d\022\022\n\016DropCollection\020e\022\021\n\rHasCollect"
"ion\020f\022\026\n\022DescribeCollection\020g\022\023\n\017ShowCol"
......@@ -235,21 +235,25 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"\014HasPartition\020\312\001\022\026\n\021DescribePartition\020\313\001"
"\022\023\n\016ShowPartitions\020\314\001\022\023\n\016LoadPartitions\020"
"\315\001\022\026\n\021ReleasePartitions\020\316\001\022\021\n\014ShowSegmen"
"ts\020\372\001\022\024\n\017DescribeSegment\020\373\001\022\020\n\013CreateInd"
"ex\020\254\002\022\022\n\rDescribeIndex\020\255\002\022\016\n\tDropIndex\020\256"
"\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005Flush\020\222\003\022"
"\013\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003\022\022\n\rGetIn"
"dexState\020\366\003\022\032\n\025GetIndexBuildProgress\020\367\003\022"
"\034\n\027GetCollectionStatistics\020\370\003\022\033\n\026GetPart"
"itionStatistics\020\371\003\022\r\n\010Retrieve\020\372\003\022\023\n\016Ret"
"rieveResult\020\373\003\022\020\n\013SegmentInfo\020\330\004\022\r\n\010Time"
"Tick\020\260\t\022\023\n\016QueryNodeStats\020\261\t\022\016\n\tLoadInde"
"x\020\262\t\022\016\n\tRequestID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n"
"\017AllocateSegment\020\265\t\022\026\n\021SegmentStatistics"
"\020\266\t\022\025\n\020SegmentFlushDone\020\267\t*\"\n\007DslType\022\007\n"
"\003Dsl\020\000\022\016\n\nBoolExprV1\020\001B5Z3github.com/mil"
"vus-io/milvus/internal/proto/commonpbb\006p"
"roto3"
"ts\020\372\001\022\024\n\017DescribeSegment\020\373\001\022\021\n\014LoadSegme"
"nts\020\374\001\022\024\n\017ReleaseSegments\020\375\001\022\024\n\017HandoffS"
"egments\020\376\001\022\030\n\023LoadBalanceSegments\020\377\001\022\020\n\013"
"CreateIndex\020\254\002\022\022\n\rDescribeIndex\020\255\002\022\016\n\tDr"
"opIndex\020\256\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005"
"Flush\020\222\003\022\013\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003"
"\022\022\n\rGetIndexState\020\366\003\022\032\n\025GetIndexBuildPro"
"gress\020\367\003\022\034\n\027GetCollectionStatistics\020\370\003\022\033"
"\n\026GetPartitionStatistics\020\371\003\022\r\n\010Retrieve\020"
"\372\003\022\023\n\016RetrieveResult\020\373\003\022\024\n\017WatchDmChanne"
"ls\020\374\003\022\025\n\020RemoveDmChannels\020\375\003\022\027\n\022WatchQue"
"ryChannels\020\376\003\022\030\n\023RemoveQueryChannels\020\377\003\022"
"\020\n\013SegmentInfo\020\330\004\022\r\n\010TimeTick\020\260\t\022\023\n\016Quer"
"yNodeStats\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016\n\tRequest"
"ID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n\017AllocateSegmen"
"t\020\265\t\022\026\n\021SegmentStatistics\020\266\t\022\025\n\020SegmentF"
"lushDone\020\267\t*\"\n\007DslType\022\007\n\003Dsl\020\000\022\016\n\nBoolE"
"xprV1\020\001B5Z3github.com/milvus-io/milvus/i"
"nternal/proto/commonpbb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_common_2eproto_deps[1] = {
};
......@@ -264,7 +268,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_com
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_common_2eproto_once;
static bool descriptor_table_common_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_common_2eproto = {
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 2045,
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 2230,
&descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_sccs, descriptor_table_common_2eproto_deps, 6, 0,
schemas, file_default_instances, TableStruct_common_2eproto::offsets,
file_level_metadata_common_2eproto, 6, file_level_enum_descriptors_common_2eproto, file_level_service_descriptors_common_2eproto,
......@@ -371,6 +375,10 @@ bool MsgType_IsValid(int value) {
case 206:
case 250:
case 251:
case 252:
case 253:
case 254:
case 255:
case 300:
case 301:
case 302:
......@@ -385,6 +393,10 @@ bool MsgType_IsValid(int value) {
case 505:
case 506:
case 507:
case 508:
case 509:
case 510:
case 511:
case 600:
case 1200:
case 1201:
......
......@@ -215,6 +215,10 @@ enum MsgType : int {
ReleasePartitions = 206,
ShowSegments = 250,
DescribeSegment = 251,
LoadSegments = 252,
ReleaseSegments = 253,
HandoffSegments = 254,
LoadBalanceSegments = 255,
CreateIndex = 300,
DescribeIndex = 301,
DropIndex = 302,
......@@ -229,6 +233,10 @@ enum MsgType : int {
GetPartitionStatistics = 505,
Retrieve = 506,
RetrieveResult = 507,
WatchDmChannels = 508,
RemoveDmChannels = 509,
WatchQueryChannels = 510,
RemoveQueryChannels = 511,
SegmentInfo = 600,
TimeTick = 1200,
QueryNodeStats = 1201,
......
......@@ -246,9 +246,9 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
return ret.(*commonpb.Status), err
}
func (c *Client) CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error) {
func (c *Client) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
ret, err := c.recall(func() (interface{}, error) {
return c.grpcClient.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{})
return c.grpcClient.CreateQueryChannel(ctx, req)
})
return ret.(*querypb.CreateQueryChannelResponse), err
}
......
......@@ -272,7 +272,7 @@ func (s *Server) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
}
func (s *Server) CreateQueryChannel(ctx context.Context, req *querypb.CreateQueryChannelRequest) (*querypb.CreateQueryChannelResponse, error) {
return s.queryservice.CreateQueryChannel(ctx)
return s.queryservice.CreateQueryChannel(ctx, req)
}
func (s *Server) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
......
......@@ -81,6 +81,10 @@ func NewMqMsgStream(ctx context.Context,
func (ms *mqMsgStream) AsProducer(channels []string) {
for _, channel := range channels {
if len(channel) == 0 {
log.Error("MsgStream asProducer's channel is a empty string")
break
}
fn := func() error {
pp, err := ms.client.CreateProducer(mqclient.ProducerOptions{Topic: channel})
if err != nil {
......
......@@ -16,9 +16,11 @@ import (
"errors"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
)
type MsgType = commonpb.MsgType
......@@ -882,3 +884,52 @@ func (sim *SegmentInfoMsg) Unmarshal(input MarshalType) (TsMsg, error) {
SegmentMsg: segMsg,
}, nil
}
/////////////////////////////////////////LoadBalanceSegments//////////////////////////////////////////
type LoadBalanceSegmentsMsg struct {
BaseMsg
querypb.LoadBalanceSegments
}
func (l *LoadBalanceSegmentsMsg) TraceCtx() context.Context {
return l.BaseMsg.Ctx
}
func (l *LoadBalanceSegmentsMsg) SetTraceCtx(ctx context.Context) {
l.BaseMsg.Ctx = ctx
}
func (l *LoadBalanceSegmentsMsg) ID() UniqueID {
return l.Base.MsgID
}
func (l *LoadBalanceSegmentsMsg) Type() MsgType {
return l.Base.MsgType
}
func (l *LoadBalanceSegmentsMsg) Marshal(input TsMsg) (MarshalType, error) {
load := input.(*LoadBalanceSegmentsMsg)
loadReq := &load.LoadBalanceSegments
mb, err := proto.Marshal(loadReq)
if err != nil {
return nil, err
}
return mb, nil
}
func (l *LoadBalanceSegmentsMsg) Unmarshal(input MarshalType) (TsMsg, error) {
loadReq := querypb.LoadBalanceSegments{}
in, err := ConvertToByteArray(input)
if err != nil {
return nil, err
}
err = proto.Unmarshal(in, &loadReq)
if err != nil {
return nil, err
}
loadMsg := &LoadBalanceSegmentsMsg{LoadBalanceSegments: loadReq}
loadMsg.BeginTimestamp = loadReq.Base.Timestamp
loadMsg.EndTimestamp = loadReq.Base.Timestamp
return loadMsg, nil
}
......@@ -63,6 +63,7 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
flushCompletedMsg := FlushCompletedMsg{}
queryNodeSegStatsMsg := QueryNodeStatsMsg{}
segmentStatisticsMsg := SegmentStatisticsMsg{}
loadBalanceSegmentsMsg := LoadBalanceSegmentsMsg{}
p := &ProtoUnmarshalDispatcher{}
p.TempMap = make(map[commonpb.MsgType]UnmarshalFunc)
......@@ -80,6 +81,7 @@ func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
p.TempMap[commonpb.MsgType_SegmentInfo] = segmentInfoMsg.Unmarshal
p.TempMap[commonpb.MsgType_SegmentFlushDone] = flushCompletedMsg.Unmarshal
p.TempMap[commonpb.MsgType_SegmentStatistics] = segmentStatisticsMsg.Unmarshal
p.TempMap[commonpb.MsgType_LoadBalanceSegments] = loadBalanceSegmentsMsg.Unmarshal
return p
}
......
......@@ -94,6 +94,10 @@ enum MsgType {
/* DEFINE REQUESTS: SEGMENT */
ShowSegments = 250;
DescribeSegment = 251;
LoadSegments = 252;
ReleaseSegments = 253;
HandoffSegments = 254;
LoadBalanceSegments = 255;
/* DEFINITION REQUESTS: INDEX */
CreateIndex = 300;
......@@ -114,6 +118,10 @@ enum MsgType {
GetPartitionStatistics = 505;
Retrieve = 506;
RetrieveResult = 507;
WatchDmChannels = 508;
RemoveDmChannels = 509;
WatchQueryChannels = 510;
RemoveQueryChannels = 511;
/* DATA SERVICE */
SegmentInfo = 600;
......
......@@ -208,8 +208,12 @@ const (
MsgType_LoadPartitions MsgType = 205
MsgType_ReleasePartitions MsgType = 206
// DEFINE REQUESTS: SEGMENT
MsgType_ShowSegments MsgType = 250
MsgType_DescribeSegment MsgType = 251
MsgType_ShowSegments MsgType = 250
MsgType_DescribeSegment MsgType = 251
MsgType_LoadSegments MsgType = 252
MsgType_ReleaseSegments MsgType = 253
MsgType_HandoffSegments MsgType = 254
MsgType_LoadBalanceSegments MsgType = 255
// DEFINITION REQUESTS: INDEX
MsgType_CreateIndex MsgType = 300
MsgType_DescribeIndex MsgType = 301
......@@ -227,6 +231,10 @@ const (
MsgType_GetPartitionStatistics MsgType = 505
MsgType_Retrieve MsgType = 506
MsgType_RetrieveResult MsgType = 507
MsgType_WatchDmChannels MsgType = 508
MsgType_RemoveDmChannels MsgType = 509
MsgType_WatchQueryChannels MsgType = 510
MsgType_RemoveQueryChannels MsgType = 511
// DATA SERVICE
MsgType_SegmentInfo MsgType = 600
// SYSTEM CONTROL
......@@ -259,6 +267,10 @@ var MsgType_name = map[int32]string{
206: "ReleasePartitions",
250: "ShowSegments",
251: "DescribeSegment",
252: "LoadSegments",
253: "ReleaseSegments",
254: "HandoffSegments",
255: "LoadBalanceSegments",
300: "CreateIndex",
301: "DescribeIndex",
302: "DropIndex",
......@@ -273,6 +285,10 @@ var MsgType_name = map[int32]string{
505: "GetPartitionStatistics",
506: "Retrieve",
507: "RetrieveResult",
508: "WatchDmChannels",
509: "RemoveDmChannels",
510: "WatchQueryChannels",
511: "RemoveQueryChannels",
600: "SegmentInfo",
1200: "TimeTick",
1201: "QueryNodeStats",
......@@ -303,6 +319,10 @@ var MsgType_value = map[string]int32{
"ReleasePartitions": 206,
"ShowSegments": 250,
"DescribeSegment": 251,
"LoadSegments": 252,
"ReleaseSegments": 253,
"HandoffSegments": 254,
"LoadBalanceSegments": 255,
"CreateIndex": 300,
"DescribeIndex": 301,
"DropIndex": 302,
......@@ -317,6 +337,10 @@ var MsgType_value = map[string]int32{
"GetPartitionStatistics": 505,
"Retrieve": 506,
"RetrieveResult": 507,
"WatchDmChannels": 508,
"RemoveDmChannels": 509,
"WatchQueryChannels": 510,
"RemoveQueryChannels": 511,
"SegmentInfo": 600,
"TimeTick": 1200,
"QueryNodeStats": 1201,
......@@ -661,79 +685,84 @@ func init() {
func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
var fileDescriptor_555bd8c177793206 = []byte{
// 1183 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xcb, 0x6e, 0x1b, 0xc7,
0x12, 0xd5, 0x90, 0x34, 0xa9, 0x29, 0x52, 0x54, 0xab, 0xf5, 0xb0, 0xec, 0x2b, 0x5c, 0x18, 0x5a,
0x19, 0x02, 0x2c, 0xdd, 0x7b, 0x8d, 0x9b, 0xac, 0xbc, 0xb0, 0x38, 0x96, 0x4c, 0xd8, 0x7a, 0x64,
0x28, 0x1b, 0x46, 0x36, 0xc6, 0x68, 0xa6, 0x44, 0x75, 0x3c, 0xd3, 0xcd, 0x74, 0xf7, 0xd8, 0xe2,
0x5f, 0x24, 0xfe, 0x87, 0xec, 0x92, 0x20, 0xef, 0x00, 0xf9, 0x82, 0xbc, 0xd7, 0xf9, 0x84, 0x7c,
0x40, 0x9e, 0xf6, 0x26, 0xa8, 0x9e, 0x21, 0x39, 0x01, 0x9c, 0x5d, 0xd7, 0xe9, 0xaa, 0xd3, 0xa7,
0xab, 0xfa, 0xcc, 0x40, 0x27, 0x56, 0x59, 0xa6, 0xe4, 0xf6, 0x48, 0x2b, 0xab, 0xf8, 0x72, 0x26,
0xd2, 0xa7, 0xb9, 0x29, 0xa2, 0xed, 0x62, 0x6b, 0xf3, 0x31, 0x34, 0x07, 0x36, 0xb2, 0xb9, 0xe1,
0xb7, 0x00, 0x50, 0x6b, 0xa5, 0x1f, 0xc7, 0x2a, 0xc1, 0x75, 0xef, 0x9a, 0x77, 0xbd, 0xfb, 0xbf,
0x7f, 0x6f, 0xbf, 0xa2, 0x66, 0xfb, 0x0e, 0xa5, 0xf5, 0x54, 0x82, 0xa1, 0x8f, 0x93, 0x25, 0x5f,
0x83, 0xa6, 0xc6, 0xc8, 0x28, 0xb9, 0x5e, 0xbb, 0xe6, 0x5d, 0xf7, 0xc3, 0x32, 0xda, 0x7c, 0x0d,
0x3a, 0xf7, 0x70, 0xfc, 0x30, 0x4a, 0x73, 0x3c, 0x8e, 0x84, 0xe6, 0x0c, 0xea, 0x4f, 0x70, 0xec,
0xf8, 0xfd, 0x90, 0x96, 0x7c, 0x05, 0x2e, 0x3d, 0xa5, 0xed, 0xb2, 0xb0, 0x08, 0x36, 0x37, 0xa0,
0xb1, 0x9b, 0xaa, 0xd3, 0xd9, 0x2e, 0x55, 0x74, 0x26, 0xbb, 0x37, 0xa0, 0x75, 0x3b, 0x49, 0x34,
0x1a, 0xc3, 0xbb, 0x50, 0x13, 0xa3, 0x92, 0xaf, 0x26, 0x46, 0x9c, 0x43, 0x63, 0xa4, 0xb4, 0x75,
0x6c, 0xf5, 0xd0, 0xad, 0x37, 0x9f, 0x7b, 0xd0, 0x3a, 0x30, 0xc3, 0xdd, 0xc8, 0x20, 0x7f, 0x1d,
0xe6, 0x33, 0x33, 0x7c, 0x6c, 0xc7, 0xa3, 0xc9, 0x2d, 0x37, 0x5e, 0x79, 0xcb, 0x03, 0x33, 0x3c,
0x19, 0x8f, 0x30, 0x6c, 0x65, 0xc5, 0x82, 0x94, 0x64, 0x66, 0xd8, 0x0f, 0x4a, 0xe6, 0x22, 0xe0,
0x1b, 0xe0, 0x5b, 0x91, 0xa1, 0xb1, 0x51, 0x36, 0x5a, 0xaf, 0x5f, 0xf3, 0xae, 0x37, 0xc2, 0x19,
0xc0, 0xaf, 0xc2, 0xbc, 0x51, 0xb9, 0x8e, 0xb1, 0x1f, 0xac, 0x37, 0x5c, 0xd9, 0x34, 0xde, 0xbc,
0x05, 0xfe, 0x81, 0x19, 0xde, 0xc5, 0x28, 0x41, 0xcd, 0xff, 0x03, 0x8d, 0xd3, 0xc8, 0x14, 0x8a,
0xda, 0xff, 0xac, 0x88, 0x6e, 0x10, 0xba, 0xcc, 0xad, 0xf7, 0x1a, 0xe0, 0x4f, 0x27, 0xc1, 0xdb,
0xd0, 0x1a, 0xe4, 0x71, 0x8c, 0xc6, 0xb0, 0x39, 0xbe, 0x0c, 0x8b, 0x0f, 0x24, 0x5e, 0x8c, 0x30,
0xb6, 0x98, 0xb8, 0x1c, 0xe6, 0xf1, 0x25, 0x58, 0xe8, 0x29, 0x29, 0x31, 0xb6, 0x7b, 0x91, 0x48,
0x31, 0x61, 0x35, 0xbe, 0x02, 0xec, 0x18, 0x75, 0x26, 0x8c, 0x11, 0x4a, 0x06, 0x28, 0x05, 0x26,
0xac, 0xce, 0x2f, 0xc3, 0x72, 0x4f, 0xa5, 0x29, 0xc6, 0x56, 0x28, 0x79, 0xa8, 0xec, 0x9d, 0x0b,
0x61, 0xac, 0x61, 0x0d, 0xa2, 0xed, 0xa7, 0x29, 0x0e, 0xa3, 0xf4, 0xb6, 0x1e, 0xe6, 0x19, 0x4a,
0xcb, 0x2e, 0x11, 0x47, 0x09, 0x06, 0x22, 0x43, 0x49, 0x4c, 0xac, 0x55, 0x41, 0xfb, 0x32, 0xc1,
0x0b, 0xea, 0x1f, 0x9b, 0xe7, 0x57, 0x60, 0xb5, 0x44, 0x2b, 0x07, 0x44, 0x19, 0x32, 0x9f, 0x2f,
0x42, 0xbb, 0xdc, 0x3a, 0x39, 0x3a, 0xbe, 0xc7, 0xa0, 0xc2, 0x10, 0xaa, 0x67, 0x21, 0xc6, 0x4a,
0x27, 0xac, 0x5d, 0x91, 0xf0, 0x10, 0x63, 0xab, 0x74, 0x3f, 0x60, 0x1d, 0x12, 0x5c, 0x82, 0x03,
0x8c, 0x74, 0x7c, 0x1e, 0xa2, 0xc9, 0x53, 0xcb, 0x16, 0x38, 0x83, 0xce, 0x9e, 0x48, 0xf1, 0x50,
0xd9, 0x3d, 0x95, 0xcb, 0x84, 0x75, 0x79, 0x17, 0xe0, 0x00, 0x6d, 0x54, 0x76, 0x60, 0x91, 0x8e,
0xed, 0x45, 0xf1, 0x39, 0x96, 0x00, 0xe3, 0x6b, 0xc0, 0x7b, 0x91, 0x94, 0xca, 0xf6, 0x34, 0x46,
0x16, 0xf7, 0x54, 0x9a, 0xa0, 0x66, 0x4b, 0x24, 0xe7, 0x6f, 0xb8, 0x48, 0x91, 0xf1, 0x59, 0x76,
0x80, 0x29, 0x4e, 0xb3, 0x97, 0x67, 0xd9, 0x25, 0x4e, 0xd9, 0x2b, 0x24, 0x7e, 0x37, 0x17, 0x69,
0xe2, 0x5a, 0x52, 0x8c, 0x65, 0x95, 0x34, 0x96, 0xe2, 0x0f, 0xef, 0xf7, 0x07, 0x27, 0x6c, 0x8d,
0xaf, 0xc2, 0x52, 0x89, 0x1c, 0xa0, 0xd5, 0x22, 0x76, 0xcd, 0xbb, 0x4c, 0x52, 0x8f, 0x72, 0x7b,
0x74, 0x76, 0x80, 0x99, 0xd2, 0x63, 0xb6, 0x4e, 0x03, 0x75, 0x4c, 0x93, 0x11, 0xb1, 0x2b, 0x9c,
0xc3, 0x42, 0x10, 0x84, 0xf8, 0x76, 0x8e, 0xc6, 0x86, 0x51, 0x8c, 0xec, 0xe7, 0xd6, 0xd6, 0x23,
0x00, 0x97, 0x46, 0x36, 0x47, 0xce, 0xa1, 0x3b, 0x8b, 0x0e, 0x95, 0x44, 0x36, 0xc7, 0x3b, 0x30,
0xff, 0x40, 0x0a, 0x63, 0x72, 0x4c, 0x98, 0x47, 0x2d, 0xea, 0xcb, 0x63, 0xad, 0x86, 0xe4, 0x2e,
0x56, 0xa3, 0xdd, 0x3d, 0x21, 0x85, 0x39, 0x77, 0x8f, 0x03, 0xa0, 0x59, 0xf6, 0xaa, 0xb1, 0xf5,
0x08, 0x3a, 0x03, 0x1c, 0xd2, 0x3b, 0x28, 0xb8, 0x57, 0x80, 0x55, 0xe3, 0x19, 0xfb, 0x54, 0xa1,
0x47, 0xef, 0x74, 0x5f, 0xab, 0x67, 0x42, 0x0e, 0x59, 0x8d, 0xc8, 0x06, 0x18, 0xa5, 0x8e, 0xb8,
0x0d, 0xad, 0xbd, 0x34, 0x77, 0xa7, 0x34, 0xb6, 0xbe, 0x6a, 0x3a, 0xbf, 0x3a, 0xdb, 0x2d, 0x80,
0xff, 0x40, 0x26, 0x78, 0x26, 0x24, 0x26, 0x6c, 0xce, 0xb5, 0xd6, 0x8d, 0x60, 0xf6, 0x84, 0x58,
0x42, 0xd7, 0x0a, 0xb4, 0x1a, 0x55, 0x30, 0xa4, 0xfe, 0xdc, 0x8d, 0x4c, 0x05, 0x3a, 0xa3, 0x79,
0x05, 0x68, 0x62, 0x2d, 0x4e, 0xab, 0xe5, 0x43, 0x9a, 0xcc, 0xe0, 0x5c, 0x3d, 0x9b, 0x61, 0x86,
0x9d, 0xd3, 0x49, 0xfb, 0x68, 0x07, 0x63, 0x63, 0x31, 0xeb, 0x29, 0x79, 0x26, 0x86, 0x86, 0x09,
0x3a, 0xe9, 0xbe, 0x8a, 0x92, 0x4a, 0xf9, 0x5b, 0x34, 0xb1, 0x10, 0x53, 0x8c, 0x4c, 0x95, 0xf5,
0x09, 0x5f, 0x81, 0xc5, 0x42, 0xea, 0x71, 0xa4, 0xad, 0x70, 0xe0, 0xd7, 0x9e, 0x9b, 0x91, 0x56,
0xa3, 0x19, 0xf6, 0x0d, 0x79, 0xb3, 0x73, 0x37, 0x32, 0x33, 0xe8, 0x5b, 0x8f, 0xaf, 0xc1, 0xd2,
0x44, 0xea, 0x0c, 0xff, 0xce, 0xe3, 0xcb, 0xd0, 0x25, 0xa9, 0x53, 0xcc, 0xb0, 0xef, 0x1d, 0x48,
0xa2, 0x2a, 0xe0, 0x0f, 0x8e, 0xa1, 0x54, 0x55, 0xc1, 0x7f, 0x74, 0x87, 0x11, 0x43, 0x39, 0x2a,
0xc3, 0x5e, 0x78, 0xa4, 0x74, 0x72, 0x58, 0x09, 0xb3, 0x97, 0x1e, 0x67, 0xd0, 0x2e, 0xf4, 0xbb,
0x17, 0xc3, 0xde, 0xaf, 0x39, 0xed, 0x65, 0x5e, 0x81, 0x7d, 0x50, 0xe3, 0x5d, 0xf0, 0xe9, 0x3e,
0x45, 0xfc, 0x61, 0x8d, 0xb7, 0xa1, 0xd9, 0x97, 0x06, 0xb5, 0x65, 0xef, 0xd0, 0x54, 0x9b, 0x85,
0x05, 0xd8, 0xbb, 0xf4, 0x76, 0x2e, 0xb9, 0x11, 0xb3, 0xe7, 0x6e, 0xa3, 0x30, 0x2b, 0xfb, 0xa5,
0xee, 0x14, 0x55, 0x9d, 0xfb, 0x6b, 0x9d, 0x4e, 0xda, 0x47, 0x3b, 0x7b, 0xaa, 0xec, 0xb7, 0x3a,
0xbf, 0x0a, 0xab, 0x13, 0xcc, 0xf9, 0x68, 0xfa, 0x48, 0x7f, 0xaf, 0xf3, 0x0d, 0xb8, 0xbc, 0x8f,
0x76, 0xd6, 0x7e, 0x2a, 0x12, 0xc6, 0x8a, 0xd8, 0xb0, 0x3f, 0xea, 0xfc, 0x5f, 0xb0, 0xb6, 0x8f,
0x76, 0xda, 0x86, 0xca, 0xe6, 0x9f, 0x75, 0xbe, 0x00, 0xf3, 0x21, 0x19, 0x0d, 0x9f, 0x22, 0x7b,
0x51, 0xa7, 0x5e, 0x4e, 0xc2, 0x52, 0xce, 0xcb, 0x3a, 0xb5, 0xa2, 0x6c, 0x4c, 0x5f, 0x9e, 0x29,
0xf6, 0x53, 0x83, 0xaa, 0x4e, 0x44, 0x86, 0x27, 0x22, 0x7e, 0xc2, 0x3e, 0xf2, 0xa9, 0xea, 0x8d,
0x1c, 0xf5, 0xf8, 0x50, 0x25, 0x48, 0xf4, 0x86, 0x7d, 0xec, 0x53, 0x6b, 0x68, 0x2c, 0x45, 0x6b,
0x3e, 0x71, 0x71, 0x69, 0xce, 0x7e, 0xc0, 0x3e, 0xa5, 0x8f, 0x1e, 0x94, 0xf1, 0xc9, 0xe0, 0x88,
0x7d, 0xe6, 0xd3, 0x1c, 0x6e, 0xa7, 0xa9, 0x8a, 0x23, 0x3b, 0x9d, 0xc3, 0xe7, 0x3e, 0x0d, 0xb2,
0xe2, 0xab, 0x52, 0xf8, 0x17, 0x3e, 0x5f, 0x9d, 0xfa, 0xcd, 0xb5, 0x35, 0x20, 0xbf, 0x7d, 0xe9,
0x6f, 0x6d, 0x42, 0x2b, 0x30, 0xa9, 0xf3, 0x4e, 0x0b, 0xea, 0x81, 0x49, 0xd9, 0x1c, 0x99, 0x7a,
0x57, 0xa9, 0xf4, 0xce, 0xc5, 0x48, 0x3f, 0xfc, 0x2f, 0xf3, 0x76, 0xff, 0xff, 0xe6, 0xcd, 0xa1,
0xb0, 0xe7, 0xf9, 0x29, 0xfd, 0x5b, 0x76, 0x8a, 0x9f, 0xcd, 0x0d, 0xa1, 0xca, 0xd5, 0x8e, 0x90,
0x16, 0xb5, 0x8c, 0xd2, 0x1d, 0xf7, 0xff, 0xd9, 0x29, 0xfe, 0x3f, 0xa3, 0xd3, 0xd3, 0xa6, 0x8b,
0x6f, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x77, 0x76, 0x64, 0xb2, 0x59, 0x08, 0x00, 0x00,
// 1263 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xcb, 0x72, 0xdb, 0x36,
0x17, 0x36, 0x45, 0xc5, 0x32, 0x61, 0xd9, 0x86, 0xe1, 0x4b, 0x9c, 0xfc, 0x9e, 0x7f, 0x32, 0x5e,
0x65, 0x3c, 0x13, 0xbb, 0x6d, 0xa6, 0xed, 0x2a, 0x8b, 0x58, 0x8c, 0x6d, 0x4d, 0xe2, 0x4b, 0x29,
0x27, 0xcd, 0x74, 0x93, 0x81, 0xc9, 0x63, 0x09, 0x0d, 0x09, 0xa8, 0x00, 0xe8, 0x58, 0x6f, 0xd1,
0xe6, 0x1d, 0xba, 0x6b, 0x3b, 0xbd, 0xf7, 0x15, 0x7a, 0x5f, 0xf7, 0x11, 0xba, 0xef, 0x3d, 0xd7,
0xce, 0x01, 0x29, 0x89, 0x99, 0x49, 0x77, 0x3c, 0xdf, 0xb9, 0xe0, 0xc3, 0x77, 0xce, 0x01, 0x49,
0x33, 0x56, 0x59, 0xa6, 0xe4, 0x46, 0x5f, 0x2b, 0xab, 0xd8, 0x42, 0x26, 0xd2, 0xd3, 0xdc, 0x14,
0xd6, 0x46, 0xe1, 0x5a, 0xbb, 0x47, 0x26, 0x3b, 0x96, 0xdb, 0xdc, 0xb0, 0x6b, 0x84, 0x80, 0xd6,
0x4a, 0xdf, 0x8b, 0x55, 0x02, 0x2b, 0xde, 0x25, 0xef, 0xf2, 0xec, 0x6b, 0xff, 0xdf, 0x78, 0x49,
0xce, 0xc6, 0x0d, 0x0c, 0x6b, 0xa9, 0x04, 0xa2, 0x00, 0x86, 0x9f, 0x6c, 0x99, 0x4c, 0x6a, 0xe0,
0x46, 0xc9, 0x95, 0xda, 0x25, 0xef, 0x72, 0x10, 0x95, 0xd6, 0xda, 0x1b, 0xa4, 0x79, 0x13, 0x06,
0x77, 0x78, 0x9a, 0xc3, 0x21, 0x17, 0x9a, 0x51, 0xe2, 0xdf, 0x87, 0x81, 0xab, 0x1f, 0x44, 0xf8,
0xc9, 0x16, 0xc9, 0xb9, 0x53, 0x74, 0x97, 0x89, 0x85, 0xb1, 0xb6, 0x4a, 0xea, 0x5b, 0xa9, 0x3a,
0x1e, 0x7b, 0x31, 0xa3, 0x39, 0xf4, 0x5e, 0x21, 0x8d, 0xeb, 0x49, 0xa2, 0xc1, 0x18, 0x36, 0x4b,
0x6a, 0xa2, 0x5f, 0xd6, 0xab, 0x89, 0x3e, 0x63, 0xa4, 0xde, 0x57, 0xda, 0xba, 0x6a, 0x7e, 0xe4,
0xbe, 0xd7, 0x1e, 0x7a, 0xa4, 0xb1, 0x67, 0xba, 0x5b, 0xdc, 0x00, 0x7b, 0x93, 0x4c, 0x65, 0xa6,
0x7b, 0xcf, 0x0e, 0xfa, 0xc3, 0x5b, 0xae, 0xbe, 0xf4, 0x96, 0x7b, 0xa6, 0x7b, 0x34, 0xe8, 0x43,
0xd4, 0xc8, 0x8a, 0x0f, 0x64, 0x92, 0x99, 0x6e, 0x3b, 0x2c, 0x2b, 0x17, 0x06, 0x5b, 0x25, 0x81,
0x15, 0x19, 0x18, 0xcb, 0xb3, 0xfe, 0x8a, 0x7f, 0xc9, 0xbb, 0x5c, 0x8f, 0xc6, 0x00, 0xbb, 0x48,
0xa6, 0x8c, 0xca, 0x75, 0x0c, 0xed, 0x70, 0xa5, 0xee, 0xd2, 0x46, 0xf6, 0xda, 0x35, 0x12, 0xec,
0x99, 0xee, 0x2e, 0xf0, 0x04, 0x34, 0x7b, 0x85, 0xd4, 0x8f, 0xb9, 0x29, 0x18, 0x4d, 0xff, 0x37,
0x23, 0xbc, 0x41, 0xe4, 0x22, 0xd7, 0x3f, 0xac, 0x93, 0x60, 0xd4, 0x09, 0x36, 0x4d, 0x1a, 0x9d,
0x3c, 0x8e, 0xc1, 0x18, 0x3a, 0xc1, 0x16, 0xc8, 0xdc, 0x6d, 0x09, 0x67, 0x7d, 0x88, 0x2d, 0x24,
0x2e, 0x86, 0x7a, 0x6c, 0x9e, 0xcc, 0xb4, 0x94, 0x94, 0x10, 0xdb, 0x6d, 0x2e, 0x52, 0x48, 0x68,
0x8d, 0x2d, 0x12, 0x7a, 0x08, 0x3a, 0x13, 0xc6, 0x08, 0x25, 0x43, 0x90, 0x02, 0x12, 0xea, 0xb3,
0xf3, 0x64, 0xa1, 0xa5, 0xd2, 0x14, 0x62, 0x2b, 0x94, 0xdc, 0x57, 0xf6, 0xc6, 0x99, 0x30, 0xd6,
0xd0, 0x3a, 0x96, 0x6d, 0xa7, 0x29, 0x74, 0x79, 0x7a, 0x5d, 0x77, 0xf3, 0x0c, 0xa4, 0xa5, 0xe7,
0xb0, 0x46, 0x09, 0x86, 0x22, 0x03, 0x89, 0x95, 0x68, 0xa3, 0x82, 0xb6, 0x65, 0x02, 0x67, 0xa8,
0x1f, 0x9d, 0x62, 0x17, 0xc8, 0x52, 0x89, 0x56, 0x0e, 0xe0, 0x19, 0xd0, 0x80, 0xcd, 0x91, 0xe9,
0xd2, 0x75, 0x74, 0x70, 0x78, 0x93, 0x92, 0x4a, 0x85, 0x48, 0x3d, 0x88, 0x20, 0x56, 0x3a, 0xa1,
0xd3, 0x15, 0x0a, 0x77, 0x20, 0xb6, 0x4a, 0xb7, 0x43, 0xda, 0x44, 0xc2, 0x25, 0xd8, 0x01, 0xae,
0xe3, 0x5e, 0x04, 0x26, 0x4f, 0x2d, 0x9d, 0x61, 0x94, 0x34, 0xb7, 0x45, 0x0a, 0xfb, 0xca, 0x6e,
0xab, 0x5c, 0x26, 0x74, 0x96, 0xcd, 0x12, 0xb2, 0x07, 0x96, 0x97, 0x0a, 0xcc, 0xe1, 0xb1, 0x2d,
0x1e, 0xf7, 0xa0, 0x04, 0x28, 0x5b, 0x26, 0xac, 0xc5, 0xa5, 0x54, 0xb6, 0xa5, 0x81, 0x5b, 0xd8,
0x56, 0x69, 0x02, 0x9a, 0xce, 0x23, 0x9d, 0x17, 0x70, 0x91, 0x02, 0x65, 0xe3, 0xe8, 0x10, 0x52,
0x18, 0x45, 0x2f, 0x8c, 0xa3, 0x4b, 0x1c, 0xa3, 0x17, 0x91, 0xfc, 0x56, 0x2e, 0xd2, 0xc4, 0x49,
0x52, 0xb4, 0x65, 0x09, 0x39, 0x96, 0xe4, 0xf7, 0x6f, 0xb5, 0x3b, 0x47, 0x74, 0x99, 0x2d, 0x91,
0xf9, 0x12, 0xd9, 0x03, 0xab, 0x45, 0xec, 0xc4, 0x3b, 0x8f, 0x54, 0x0f, 0x72, 0x7b, 0x70, 0xb2,
0x07, 0x99, 0xd2, 0x03, 0xba, 0x82, 0x0d, 0x75, 0x95, 0x86, 0x2d, 0xa2, 0x17, 0x18, 0x23, 0x33,
0x61, 0x18, 0xc1, 0x7b, 0x39, 0x18, 0x1b, 0xf1, 0x18, 0xe8, 0xaf, 0x8d, 0xf5, 0xbb, 0x84, 0xb8,
0x30, 0x5c, 0x73, 0x60, 0x8c, 0xcc, 0x8e, 0xad, 0x7d, 0x25, 0x81, 0x4e, 0xb0, 0x26, 0x99, 0xba,
0x2d, 0x85, 0x31, 0x39, 0x24, 0xd4, 0x43, 0x89, 0xda, 0xf2, 0x50, 0xab, 0x2e, 0x6e, 0x17, 0xad,
0xa1, 0x77, 0x5b, 0x48, 0x61, 0x7a, 0x6e, 0x38, 0x08, 0x99, 0x2c, 0xb5, 0xaa, 0xaf, 0xdf, 0x25,
0xcd, 0x0e, 0x74, 0x71, 0x0e, 0x8a, 0xda, 0x8b, 0x84, 0x56, 0xed, 0x71, 0xf5, 0x11, 0x43, 0x0f,
0xe7, 0x74, 0x47, 0xab, 0x07, 0x42, 0x76, 0x69, 0x0d, 0x8b, 0x75, 0x80, 0xa7, 0xae, 0xf0, 0x34,
0x69, 0x6c, 0xa7, 0xb9, 0x3b, 0xa5, 0xbe, 0xfe, 0x5b, 0xc3, 0xed, 0xab, 0x5b, 0xbb, 0x19, 0x12,
0xdc, 0x96, 0x09, 0x9c, 0x08, 0x09, 0x09, 0x9d, 0x70, 0xd2, 0xba, 0x16, 0x8c, 0x47, 0x88, 0x26,
0x78, 0xad, 0x50, 0xab, 0x7e, 0x05, 0x03, 0xd4, 0x67, 0x97, 0x9b, 0x0a, 0x74, 0x82, 0xfd, 0x0a,
0xc1, 0xc4, 0x5a, 0x1c, 0x57, 0xd3, 0xbb, 0xd8, 0x99, 0x4e, 0x4f, 0x3d, 0x18, 0x63, 0x86, 0xf6,
0xf0, 0xa4, 0x1d, 0xb0, 0x9d, 0x81, 0xb1, 0x90, 0xb5, 0x94, 0x3c, 0x11, 0x5d, 0x43, 0x05, 0x9e,
0x74, 0x4b, 0xf1, 0xa4, 0x92, 0xfe, 0x2e, 0x76, 0x2c, 0x82, 0x14, 0xb8, 0xa9, 0x56, 0xbd, 0xcf,
0x16, 0xc9, 0x5c, 0x41, 0xf5, 0x90, 0x6b, 0x2b, 0x1c, 0xf8, 0xad, 0xe7, 0x7a, 0xa4, 0x55, 0x7f,
0x8c, 0x7d, 0x87, 0xbb, 0xd9, 0xdc, 0xe5, 0x66, 0x0c, 0x7d, 0xef, 0xb1, 0x65, 0x32, 0x3f, 0xa4,
0x3a, 0xc6, 0x7f, 0xf0, 0xd8, 0x02, 0x99, 0x45, 0xaa, 0x23, 0xcc, 0xd0, 0x1f, 0x1d, 0x88, 0xa4,
0x2a, 0xe0, 0x4f, 0xae, 0x42, 0xc9, 0xaa, 0x82, 0xff, 0xec, 0x0e, 0xc3, 0x0a, 0x65, 0xab, 0x0c,
0x7d, 0xe4, 0x21, 0xd3, 0xe1, 0x61, 0x25, 0x4c, 0x1f, 0xbb, 0x40, 0xac, 0x3a, 0x0a, 0x7c, 0xe2,
0x02, 0xcb, 0x9a, 0x23, 0xf4, 0xa9, 0x43, 0x77, 0xb9, 0x4c, 0xd4, 0xc9, 0xc9, 0x08, 0x7d, 0xe6,
0xb1, 0x15, 0xb2, 0x80, 0xe9, 0x5b, 0x3c, 0xe5, 0x32, 0x1e, 0xc7, 0x3f, 0xf7, 0x18, 0x25, 0xd3,
0x85, 0x30, 0x6e, 0x14, 0xe9, 0x47, 0x35, 0x27, 0x4a, 0x49, 0xa0, 0xc0, 0x3e, 0xae, 0xb1, 0x59,
0x12, 0xa0, 0x50, 0x85, 0xfd, 0x49, 0x8d, 0x4d, 0x93, 0xc9, 0xb6, 0x34, 0xa0, 0x2d, 0x7d, 0x1f,
0xc7, 0x65, 0xb2, 0xd8, 0x2d, 0xfa, 0x01, 0x0e, 0xe5, 0x39, 0x37, 0x3b, 0xf4, 0xa1, 0x73, 0x14,
0xaf, 0x00, 0xfd, 0xdd, 0x77, 0x57, 0xad, 0x3e, 0x09, 0x7f, 0xf8, 0x78, 0xd2, 0x0e, 0xd8, 0xf1,
0x0e, 0xd0, 0x3f, 0x7d, 0x76, 0x91, 0x2c, 0x0d, 0x31, 0xb7, 0xa0, 0xa3, 0xe9, 0xff, 0xcb, 0x67,
0xab, 0xe4, 0xfc, 0x0e, 0xd8, 0x71, 0x5f, 0x31, 0x49, 0x18, 0x2b, 0x62, 0x43, 0xff, 0xf6, 0xd9,
0xff, 0xc8, 0xf2, 0x0e, 0xd8, 0x91, 0xbe, 0x15, 0xe7, 0x3f, 0x3e, 0x9b, 0x21, 0x53, 0x11, 0x6e,
0x30, 0x9c, 0x02, 0x7d, 0xe4, 0x63, 0x93, 0x86, 0x66, 0x49, 0xe7, 0xb1, 0x8f, 0xd2, 0xbd, 0xcd,
0x6d, 0xdc, 0x0b, 0xb3, 0x56, 0x8f, 0x4b, 0x09, 0xa9, 0xa1, 0x4f, 0x7c, 0xb6, 0x44, 0x68, 0x04,
0x99, 0x3a, 0x85, 0x0a, 0xfc, 0x14, 0x5f, 0x66, 0xe6, 0x82, 0xdf, 0xca, 0x41, 0x0f, 0x46, 0x8e,
0x67, 0x3e, 0x4a, 0x5d, 0xc4, 0xbf, 0xe8, 0x79, 0xee, 0xa3, 0xd4, 0xa5, 0xf2, 0x6d, 0x79, 0xa2,
0xe8, 0x2f, 0x75, 0x64, 0x75, 0x24, 0x32, 0x38, 0x12, 0xf1, 0x7d, 0xfa, 0x69, 0x80, 0xac, 0x5c,
0xd2, 0xbe, 0x4a, 0x00, 0xe9, 0x1b, 0xfa, 0x59, 0x80, 0xd2, 0x63, 0xeb, 0x0a, 0xe9, 0x3f, 0x77,
0x76, 0xf9, 0xaa, 0xb4, 0x43, 0xfa, 0x05, 0xbe, 0xd6, 0xa4, 0xb4, 0x8f, 0x3a, 0x07, 0xf4, 0xcb,
0x00, 0xaf, 0x71, 0x3d, 0x4d, 0x55, 0xcc, 0xed, 0x68, 0x80, 0xbe, 0x0a, 0x70, 0x02, 0x2b, 0x0f,
0x42, 0x29, 0xcc, 0xd7, 0x01, 0x5e, 0xaf, 0xc4, 0x5d, 0xdb, 0x42, 0x7c, 0x28, 0xbe, 0x09, 0xd6,
0xd7, 0x48, 0x23, 0x34, 0xa9, 0x5b, 0xfa, 0x06, 0xf1, 0x43, 0x93, 0xd2, 0x09, 0x7c, 0x8d, 0xb6,
0x94, 0x4a, 0x6f, 0x9c, 0xf5, 0xf5, 0x9d, 0x57, 0xa9, 0xb7, 0xf5, 0xfa, 0x3b, 0x57, 0xbb, 0xc2,
0xf6, 0xf2, 0x63, 0xfc, 0x29, 0x6e, 0x16, 0x7f, 0xc9, 0x2b, 0x42, 0x95, 0x5f, 0x9b, 0x42, 0x5a,
0xd0, 0x92, 0xa7, 0x9b, 0xee, 0xc7, 0xb9, 0x59, 0xfc, 0x38, 0xfb, 0xc7, 0xc7, 0x93, 0xce, 0xbe,
0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x16, 0x2d, 0x7a, 0x6b, 0x12, 0x09, 0x00, 0x00,
}
......@@ -7,7 +7,6 @@ option go_package = "github.com/milvus-io/milvus/internal/proto/querypb";
import "common.proto";
import "milvus.proto";
import "internal.proto";
import "data_service.proto";
import "schema.proto";
service QueryService {
......@@ -45,6 +44,7 @@ service QueryNode {
rpc GetSegmentInfo(GetSegmentInfoRequest) returns (GetSegmentInfoResponse) {}
}
//--------------------query service proto------------------
message RegisterNodeRequest {
common.MsgBase base = 1;
common.Address address = 2;
......@@ -65,6 +65,17 @@ message ShowCollectionsResponse {
repeated int64 collectionIDs = 2;
}
message ShowPartitionsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
}
message ShowPartitionsResponse {
common.Status status = 1;
repeated int64 partitionIDs = 2;
}
message LoadCollectionRequest {
common.MsgBase base = 1;
int64 dbID = 2;
......@@ -78,15 +89,36 @@ message ReleaseCollectionRequest {
int64 collectionID = 3;
}
message ShowPartitionsRequest {
message LoadPartitionsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
schema.CollectionSchema schema = 5;
}
message ShowPartitionsResponse {
message ReleasePartitionsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
}
message CreateQueryChannelRequest {
int64 collectionID = 1;
}
message CreateQueryChannelResponse {
common.Status status = 1;
repeated int64 partitionIDs = 2;
string request_channel = 2;
string result_channel = 3;
}
message GetPartitionStatesRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
}
enum PartitionState {
......@@ -104,103 +136,193 @@ message PartitionStates {
PartitionState state = 2;
}
message GetPartitionStatesRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
}
message GetPartitionStatesResponse {
common.Status status = 1;
repeated PartitionStates partition_descriptions = 2;
}
message LoadPartitionsRequest {
message GetSegmentInfoRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
schema.CollectionSchema schema = 5;
repeated int64 segmentIDs = 2;
}
message ReleasePartitionsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
message SegmentInfo {
int64 segmentID = 1;
int64 collectionID = 2;
int64 partitionID = 3;
int64 nodeID = 4;
int64 mem_size = 5;
int64 num_rows = 6;
string index_name = 7;
int64 indexID = 8;
string channelID = 9;
SegmentState segment_state = 10;
}
message CreateQueryChannelResponse {
message GetSegmentInfoResponse {
common.Status status = 1;
string request_channel = 2;
string result_channel = 3;
repeated SegmentInfo infos = 2;
}
//-----------------query node proto----------------
message AddQueryChannelRequest {
common.MsgBase base = 1;
string request_channelID = 2;
string result_channelID = 3;
int64 nodeID = 2;
int64 collectionID = 3;
string request_channelID = 4;
string result_channelID = 5;
}
message RemoveQueryChannelRequest {
common.Status status = 1;
common.MsgBase base = 2;
string request_channelID = 3;
string result_channelID = 4;
common.MsgBase base = 1;
int64 nodeID = 2;
int64 collectionID = 3;
string request_channelID = 4;
string result_channelID = 5;
}
message WatchDmChannelInfo {
string channelID = 1;
internal.MsgPosition pos = 2;
repeated int64 excluded_segments = 3;
}
//message excludedSegmentInfo {
// int64 segmentID = 1;
// internal.MsgPosition pos = 2;
//}
//message WatchDmChannelInfo {
// string channelID = 1;
// internal.MsgPosition pos = 2;
//}
message WatchDmChannelsRequest {
common.MsgBase base = 1;
int64 collectionID = 2;
repeated string channelIDs = 3;
repeated WatchDmChannelInfo infos = 4;
int64 nodeID = 2;
int64 collectionID = 3;
int64 partitionID = 4;
repeated VchannelInfo infos = 5;
schema.CollectionSchema schema = 6;
repeated CheckPoint exclude_infos = 7;
}
enum TriggerCondition {
handoff = 0;
loadBalance = 1;
grpcRequest = 2;
nodeDown = 3;
}
//message FieldBinlogPath {
// int64 filedID = 1;
// repeated string binlog_path = 2;
//}
//used for handoff task
message SegmentLoadInfo {
int64 segmentID = 1;
int64 partitionID = 2;
int64 collectionID = 3;
int64 dbID = 4;
int64 flush_time = 5;
repeated FieldBinlog binlog_paths = 6;
}
message LoadSegmentsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
int64 partitionID = 4;
repeated int64 segmentIDs = 5;
repeated int64 fieldIDs = 6;
repeated data.SegmentStateInfo segment_states = 7;
schema.CollectionSchema schema = 8;
int64 nodeID = 2;
repeated SegmentLoadInfo infos = 3;
schema.CollectionSchema schema = 4;
TriggerCondition load_condition = 5;
}
message ReleaseSegmentsRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
repeated int64 segmentIDs = 5;
int64 nodeID = 2;
int64 dbID = 3;
int64 collectionID = 4;
repeated int64 partitionIDs = 5;
repeated int64 segmentIDs = 6;
}
message SegmentInfo {
//----------------etcd-----------------
enum SegmentState {
None = 0;
Growing = 1;
Frozen = 2;
sealing = 3;
sealed = 4;
}
message DmChannelInfo {
int64 nodeID_loaded = 1;
repeated string channelIDs = 2;
}
message QueryChannelInfo {
int64 collectionID = 1;
string query_channelID = 2;
string query_result_channelID = 3;
}
message CollectionInfo {
int64 collectionID = 1;
repeated int64 partitionIDs = 2;
repeated DmChannelInfo channel_infos = 3;
schema.CollectionSchema schema = 6;
}
message HandoffSegments {
common.MsgBase base = 1;
repeated SegmentLoadInfo infos = 2;
}
message LoadBalanceSegmentInfo {
int64 segmentID = 1;
int64 collectionID = 2;
int64 partitionID = 3;
int64 mem_size = 4;
int64 num_rows = 5;
string index_name = 6;
int64 indexID = 7;
int64 partitionID = 2;
int64 collectionID = 3;
int64 source_nodeID = 4;
int64 dst_nodeID = 5;
bool source_done = 6;
bool dst_done = 7;
bool valid_info = 8;
}
message GetSegmentInfoRequest {
message LoadBalanceSegments {
common.MsgBase base = 1;
repeated int64 segmentIDs = 2;
repeated LoadBalanceSegmentInfo infos = 2;
}
message GetSegmentInfoResponse {
common.Status status = 1;
repeated SegmentInfo infos = 2;
//--------------temp used, delete after data_service.proto update-----
message CheckPoint {
int64 segmentID = 1;
internal.MsgPosition position = 2;
int64 num_of_rows = 3;
}
message CreateQueryChannelRequest {
message VchannelInfo {
int64 collectionID = 1;
string channelName = 2;
internal.MsgPosition seek_position = 3;
repeated CheckPoint checkPoints = 4;
repeated int64 flushedSegments = 5;
}
message SegmentBinlogs {
int64 segmentID = 1;
repeated FieldBinlog fieldBinlogs = 2;
}
message FieldBinlog{
int64 fieldID = 1;
repeated string binlogs = 2;
}
message GetRecoveryInfoResponse {
common.MsgBase base = 1;
repeated VchannelInfo channels = 2;
repeated SegmentBinlogs binlogs = 3;
}
message GetRecoveryInfoRequest {
common.MsgBase base = 1;
int64 collectionID = 2;
int64 partitionID = 3;
}
......@@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/sessionutil"
......@@ -159,7 +160,7 @@ func (node *ProxyNode) Init() error {
}
if node.queryService != nil {
resp, err := node.queryService.CreateQueryChannel(ctx)
resp, err := node.queryService.CreateQueryChannel(ctx, &querypb.CreateQueryChannelRequest{})
if err != nil {
log.Debug("ProxyNode CreateQueryChannel failed", zap.Error(err))
return err
......
......@@ -1142,8 +1142,9 @@ func (st *SearchTask) Execute(ctx context.Context) error {
err := st.queryMsgStream.Produce(&msgPack)
log.Debug("proxynode", zap.Int("length of searchMsg", len(msgPack.Msgs)))
log.Debug("proxy node sent one searchMsg",
zap.Any("collectionID", st.CollectionID),
zap.Any("msgID", tsMsg.ID()),
zap.Any("collectionID", st.CollectionID))
)
if err != nil {
log.Debug("proxynode", zap.String("send search request failed", err.Error()))
}
......
......@@ -23,6 +23,8 @@ package querynode
*/
import "C"
import (
"math"
"sync"
"unsafe"
"go.uber.org/zap"
......@@ -38,6 +40,9 @@ type Collection struct {
partitionIDs []UniqueID
schema *schemapb.CollectionSchema
watchedChannels []VChannel
releaseMu sync.RWMutex // guards releaseTime
releaseTime Timestamp
}
func (c *Collection) ID() UniqueID {
......@@ -73,6 +78,18 @@ func (c *Collection) getWatchedDmChannels() []VChannel {
return c.watchedChannels
}
func (c *Collection) setReleaseTime(t Timestamp) {
c.releaseMu.Lock()
defer c.releaseMu.Unlock()
c.releaseTime = t
}
func (c *Collection) getReleaseTime() Timestamp {
c.releaseMu.RLock()
defer c.releaseMu.RUnlock()
return c.releaseTime
}
func newCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) *Collection {
/*
CCollection
......@@ -93,6 +110,7 @@ func newCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) *Co
log.Debug("create collection", zap.Int64("collectionID", collectionID))
newCollection.setReleaseTime(Timestamp(math.MaxUint64))
return newCollection
}
......
......@@ -25,6 +25,7 @@ import "C"
import (
"errors"
"fmt"
"github.com/milvus-io/milvus/internal/proto/querypb"
"strconv"
"sync"
......@@ -52,9 +53,7 @@ type ReplicaInterface interface {
hasCollection(collectionID UniqueID) bool
getCollectionNum() int
getPartitionIDs(collectionID UniqueID) ([]UniqueID, error)
getVecFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error)
getFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error)
// partition
addPartition(collectionID UniqueID, partitionID UniqueID) error
......@@ -63,29 +62,23 @@ type ReplicaInterface interface {
hasPartition(partitionID UniqueID) bool
getPartitionNum() int
getSegmentIDs(partitionID UniqueID) ([]UniqueID, error)
enablePartition(partitionID UniqueID) error
disablePartition(partitionID UniqueID) error
getSegmentIDsByVChannel(partitionID UniqueID, vChannel VChannel) ([]UniqueID, error)
// segment
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID VChannel, segType segmentType, onService bool) error
setSegment(segment *Segment) error
removeSegment(segmentID UniqueID) error
getSegmentByID(segmentID UniqueID) (*Segment, error)
hasSegment(segmentID UniqueID) bool
getSegmentNum() int
setSegmentEnableIndex(segmentID UniqueID, enable bool) error
setSegmentEnableLoadBinLog(segmentID UniqueID, enable bool) error
getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
getSegmentStatistics() []*internalpb.SegmentStats
// excluded segments
initExcludedSegments(collectionID UniqueID)
removeExcludedSegments(collectionID UniqueID)
addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error
getExcludedSegments(collectionID UniqueID) ([]UniqueID, error)
addExcludedSegments(collectionID UniqueID, segmentInfos []*querypb.CheckPoint) error
getExcludedSegments(collectionID UniqueID) ([]*querypb.CheckPoint, error)
getEnabledSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
getSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
replaceGrowingSegmentBySealedSegment(segment *Segment) error
......@@ -98,7 +91,7 @@ type collectionReplica struct {
partitions map[UniqueID]*Partition
segments map[UniqueID]*Segment
excludedSegments map[UniqueID][]UniqueID // map[collectionID]segmentIDs
excludedSegments map[UniqueID][]*querypb.CheckPoint // map[collectionID]segmentIDs
}
//----------------------------------------------------------------------------------------------------- collection
......@@ -217,26 +210,6 @@ func (colReplica *collectionReplica) getVecFieldIDsByCollectionID(collectionID U
return vecFields, nil
}
func (colReplica *collectionReplica) getFieldIDsByCollectionID(collectionID UniqueID) ([]int64, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
fields, err := colReplica.getFieldsByCollectionIDPrivate(collectionID)
if err != nil {
return nil, err
}
targetFields := make([]int64, 0)
for _, field := range fields {
targetFields = append(targetFields, field.FieldID)
}
// add row id field
targetFields = append(targetFields, rowIDFieldID)
return targetFields, nil
}
func (colReplica *collectionReplica) getFieldsByCollectionIDPrivate(collectionID UniqueID) ([]*schemapb.FieldSchema, error) {
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
......@@ -336,60 +309,45 @@ func (colReplica *collectionReplica) getSegmentIDs(partitionID UniqueID) ([]Uniq
return colReplica.getSegmentIDsPrivate(partitionID)
}
func (colReplica *collectionReplica) getSegmentIDsPrivate(partitionID UniqueID) ([]UniqueID, error) {
partition, err2 := colReplica.getPartitionByIDPrivate(partitionID)
if err2 != nil {
return nil, err2
}
return partition.segmentIDs, nil
}
func (colReplica *collectionReplica) enablePartition(partitionID UniqueID) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
partition, err := colReplica.getPartitionByIDPrivate(partitionID)
func (colReplica *collectionReplica) getSegmentIDsByVChannel(partitionID UniqueID, vChannel VChannel) ([]UniqueID, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
segmentIDs, err := colReplica.getSegmentIDsPrivate(partitionID)
if err != nil {
return err
return nil, err
}
partition.enable = true
return nil
}
func (colReplica *collectionReplica) disablePartition(partitionID UniqueID) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
partition, err := colReplica.getPartitionByIDPrivate(partitionID)
if err != nil {
return err
segmentIDsTmp := make([]UniqueID, 0)
for _, segmentID := range segmentIDs {
segment, err := colReplica.getSegmentByIDPrivate(segmentID)
if err != nil {
return nil, err
}
if segment.vChannelID == vChannel {
segmentIDsTmp = append(segmentIDsTmp, segment.ID())
}
}
partition.enable = false
return nil
return segmentIDsTmp, nil
}
func (colReplica *collectionReplica) getEnabledPartitionIDsPrivate() []UniqueID {
partitionIDs := make([]UniqueID, 0)
for _, partition := range colReplica.partitions {
if partition.enable {
partitionIDs = append(partitionIDs, partition.partitionID)
}
func (colReplica *collectionReplica) getSegmentIDsPrivate(partitionID UniqueID) ([]UniqueID, error) {
partition, err2 := colReplica.getPartitionByIDPrivate(partitionID)
if err2 != nil {
return nil, err2
}
return partitionIDs
return partition.segmentIDs, nil
}
//----------------------------------------------------------------------------------------------------- segment
func (colReplica *collectionReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error {
func (colReplica *collectionReplica) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, vChannelID VChannel, segType segmentType, onService bool) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
if err != nil {
return err
}
var newSegment = newSegment(collection, segmentID, partitionID, collectionID, segType)
return colReplica.addSegmentPrivate(segmentID, partitionID, newSegment)
seg := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, onService)
return colReplica.addSegmentPrivate(segmentID, partitionID, seg)
}
func (colReplica *collectionReplica) addSegmentPrivate(segmentID UniqueID, partitionID UniqueID, segment *Segment) error {
......@@ -499,35 +457,6 @@ func (colReplica *collectionReplica) getSegmentStatistics() []*internalpb.Segmen
return statisticData
}
func (colReplica *collectionReplica) getEnabledSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
targetCollectionIDs := make([]UniqueID, 0)
targetPartitionIDs := make([]UniqueID, 0)
targetSegmentIDs := make([]UniqueID, 0)
for _, partitionID := range colReplica.getEnabledPartitionIDsPrivate() {
segmentIDs, err := colReplica.getSegmentIDsPrivate(partitionID)
if err != nil {
continue
}
for _, segmentID := range segmentIDs {
segment, err := colReplica.getSegmentByIDPrivate(segmentID)
if err != nil {
continue
}
if segment.getType() == segType {
targetCollectionIDs = append(targetCollectionIDs, segment.collectionID)
targetPartitionIDs = append(targetPartitionIDs, segment.partitionID)
targetSegmentIDs = append(targetSegmentIDs, segment.segmentID)
}
}
}
return targetCollectionIDs, targetPartitionIDs, targetSegmentIDs
}
func (colReplica *collectionReplica) getSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
......@@ -572,39 +501,11 @@ func (colReplica *collectionReplica) replaceGrowingSegmentBySealedSegment(segmen
return nil
}
func (colReplica *collectionReplica) setSegmentEnableIndex(segmentID UniqueID, enable bool) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
targetSegment, err := colReplica.getSegmentByIDPrivate(segmentID)
if targetSegment.segmentType != segmentTypeSealed {
return errors.New("unexpected segment type")
}
if err == nil && targetSegment != nil {
targetSegment.setEnableIndex(enable)
}
return nil
}
func (colReplica *collectionReplica) setSegmentEnableLoadBinLog(segmentID UniqueID, enable bool) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
targetSegment, err := colReplica.getSegmentByIDPrivate(segmentID)
if targetSegment.segmentType != segmentTypeGrowing {
return errors.New("unexpected segment type")
}
if err == nil && targetSegment != nil {
targetSegment.setLoadBinLogEnable(enable)
}
return nil
}
func (colReplica *collectionReplica) initExcludedSegments(collectionID UniqueID) {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
colReplica.excludedSegments[collectionID] = make([]UniqueID, 0)
colReplica.excludedSegments[collectionID] = make([]*querypb.CheckPoint, 0)
}
func (colReplica *collectionReplica) removeExcludedSegments(collectionID UniqueID) {
......@@ -614,7 +515,7 @@ func (colReplica *collectionReplica) removeExcludedSegments(collectionID UniqueI
delete(colReplica.excludedSegments, collectionID)
}
func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error {
func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID, segmentInfos []*querypb.CheckPoint) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
......@@ -622,11 +523,11 @@ func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID,
return errors.New("addExcludedSegments failed, cannot found collection, id =" + fmt.Sprintln(collectionID))
}
colReplica.excludedSegments[collectionID] = append(colReplica.excludedSegments[collectionID], segmentIDs...)
colReplica.excludedSegments[collectionID] = append(colReplica.excludedSegments[collectionID], segmentInfos...)
return nil
}
func (colReplica *collectionReplica) getExcludedSegments(collectionID UniqueID) ([]UniqueID, error) {
func (colReplica *collectionReplica) getExcludedSegments(collectionID UniqueID) ([]*querypb.CheckPoint, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
......@@ -650,37 +551,11 @@ func (colReplica *collectionReplica) freeAll() {
colReplica.segments = make(map[UniqueID]*Segment)
}
func (colReplica *collectionReplica) getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
targetCollectionIDs := make([]UniqueID, 0)
targetPartitionIDs := make([]UniqueID, 0)
targetSegmentIDs := make([]UniqueID, 0)
for _, segment := range colReplica.segments {
if !segment.enableLoadBinLog {
continue
}
if segment.getType() == segType {
if segType == segmentTypeSealed && !segment.getEnableIndex() {
continue
}
targetCollectionIDs = append(targetCollectionIDs, segment.collectionID)
targetPartitionIDs = append(targetPartitionIDs, segment.partitionID)
targetSegmentIDs = append(targetSegmentIDs, segment.segmentID)
}
}
return targetCollectionIDs, targetPartitionIDs, targetSegmentIDs
}
func newCollectionReplica() ReplicaInterface {
collections := make(map[UniqueID]*Collection)
partitions := make(map[UniqueID]*Partition)
segments := make(map[UniqueID]*Segment)
excludedSegments := make(map[UniqueID][]UniqueID)
excludedSegments := make(map[UniqueID][]*querypb.CheckPoint)
var replica ReplicaInterface = &collectionReplica{
collections: collections,
......
......@@ -172,7 +172,7 @@ func TestCollectionReplica_addSegment(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -191,7 +191,7 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -212,7 +212,7 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -231,7 +231,7 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segmentTypeGrowing)
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -255,36 +255,34 @@ func TestCollectionReplica_freeAll(t *testing.T) {
assert.NoError(t, err)
}
func TestReplaceGrowingSegmentBySealedSegment(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
segmentID := UniqueID(520)
initTestMeta(t, node, collectionID, segmentID)
_, _, segIDs := node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
assert.Equal(t, len(segIDs), 1)
collection, err := node.historical.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
ns := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeSealed)
err = node.historical.replica.replaceGrowingSegmentBySealedSegment(ns)
assert.NoError(t, err)
err = node.historical.replica.setSegmentEnableIndex(segmentID, true)
assert.NoError(t, err)
segmentNums := node.historical.replica.getSegmentNum()
assert.Equal(t, segmentNums, 1)
segment, err := node.historical.replica.getSegmentByID(segmentID)
assert.NoError(t, err)
assert.Equal(t, segment.getType(), segmentTypeSealed)
_, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
assert.Equal(t, len(segIDs), 0)
_, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeSealed)
assert.Equal(t, len(segIDs), 1)
err = node.Stop()
assert.NoError(t, err)
}
//func TestReplaceGrowingSegmentBySealedSegment(t *testing.T) {
// node := newQueryNodeMock()
// collectionID := UniqueID(0)
// segmentID := UniqueID(520)
// initTestMeta(t, node, collectionID, segmentID)
//
// _, _, segIDs := node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
// assert.Equal(t, len(segIDs), 1)
//
// collection, err := node.historical.replica.getCollectionByID(collectionID)
// assert.NoError(t, err)
// ns := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeSealed, true)
// err = node.historical.replica.replaceGrowingSegmentBySealedSegment(ns)
// assert.NoError(t, err)
//
// segmentNums := node.historical.replica.getSegmentNum()
// assert.Equal(t, segmentNums, 1)
//
// segment, err := node.historical.replica.getSegmentByID(segmentID)
// assert.NoError(t, err)
//
// assert.Equal(t, segment.getType(), segmentTypeSealed)
//
// _, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeGrowing)
// assert.Equal(t, len(segIDs), 0)
// _, _, segIDs = node.historical.replica.getSegmentsBySegmentType(segmentTypeSealed)
// assert.Equal(t, len(segIDs), 1)
//
// err = node.Stop()
// assert.NoError(t, err)
//}
......@@ -33,9 +33,9 @@ const (
type dataSyncService struct {
ctx context.Context
mu sync.Mutex // guards FlowGraphs
collectionFlowGraphs map[UniqueID][]*queryNodeFlowGraph // map[collectionID]flowGraphs
partitionFlowGraphs map[UniqueID][]*queryNodeFlowGraph // map[partitionID]flowGraphs
mu sync.Mutex // guards FlowGraphs
collectionFlowGraphs map[UniqueID]map[VChannel]*queryNodeFlowGraph // map[collectionID]flowGraphs
partitionFlowGraphs map[UniqueID]map[VChannel]*queryNodeFlowGraph // map[partitionID]flowGraphs
streamingReplica ReplicaInterface
tSafeReplica TSafeReplicaInterface
......@@ -47,10 +47,9 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.collectionFlowGraphs[collectionID]; ok {
return errors.New("collection flow graph has been existed, collectionID = " + fmt.Sprintln(collectionID))
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
dsService.collectionFlowGraphs[collectionID] = make(map[VChannel]*queryNodeFlowGraph)
}
dsService.collectionFlowGraphs[collectionID] = make([]*queryNodeFlowGraph, 0)
for _, vChannel := range vChannels {
// collection flow graph doesn't need partition id
partitionID := UniqueID(0)
......@@ -62,7 +61,7 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
dsService.tSafeReplica,
vChannel,
dsService.msFactory)
dsService.collectionFlowGraphs[collectionID] = append(dsService.collectionFlowGraphs[collectionID], newFlowGraph)
dsService.collectionFlowGraphs[collectionID][vChannel] = newFlowGraph
log.Debug("add collection flow graph",
zap.Any("collectionID", collectionID),
zap.Any("channel", vChannel))
......@@ -70,27 +69,37 @@ func (dsService *dataSyncService) addCollectionFlowGraph(collectionID UniqueID,
return nil
}
func (dsService *dataSyncService) getCollectionFlowGraphs(collectionID UniqueID) ([]*queryNodeFlowGraph, error) {
func (dsService *dataSyncService) getCollectionFlowGraphs(collectionID UniqueID, vChannels []string) (map[VChannel]*queryNodeFlowGraph, error) {
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
return nil, errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
}
return dsService.collectionFlowGraphs[collectionID], nil
tmpFGs := make(map[VChannel]*queryNodeFlowGraph)
for _, channel := range vChannels {
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
tmpFGs[channel] = dsService.collectionFlowGraphs[collectionID][channel]
}
}
return tmpFGs, nil
}
func (dsService *dataSyncService) startCollectionFlowGraph(collectionID UniqueID) error {
func (dsService *dataSyncService) startCollectionFlowGraph(collectionID UniqueID, vChannels []string) error {
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.collectionFlowGraphs[collectionID]; !ok {
return errors.New("collection flow graph doesn't existed, collectionID = " + fmt.Sprintln(collectionID))
}
for _, fg := range dsService.collectionFlowGraphs[collectionID] {
// start flow graph
log.Debug("start flow graph", zap.Any("channel", fg.channel))
go fg.flowGraph.Start()
for _, channel := range vChannels {
if _, ok := dsService.collectionFlowGraphs[collectionID][channel]; ok {
// start flow graph
log.Debug("start collection flow graph", zap.Any("channel", channel))
go dsService.collectionFlowGraphs[collectionID][channel].flowGraph.Start()
}
}
return nil
}
......@@ -114,10 +123,9 @@ func (dsService *dataSyncService) addPartitionFlowGraph(collectionID UniqueID, p
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.partitionFlowGraphs[partitionID]; ok {
return errors.New("partition flow graph has been existed, partitionID = " + fmt.Sprintln(partitionID))
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
dsService.partitionFlowGraphs[partitionID] = make(map[VChannel]*queryNodeFlowGraph)
}
dsService.partitionFlowGraphs[partitionID] = make([]*queryNodeFlowGraph, 0)
for _, vChannel := range vChannels {
newFlowGraph := newQueryNodeFlowGraph(dsService.ctx,
flowGraphTypePartition,
......@@ -127,31 +135,42 @@ func (dsService *dataSyncService) addPartitionFlowGraph(collectionID UniqueID, p
dsService.tSafeReplica,
vChannel,
dsService.msFactory)
dsService.partitionFlowGraphs[partitionID] = append(dsService.partitionFlowGraphs[partitionID], newFlowGraph)
dsService.partitionFlowGraphs[partitionID][vChannel] = newFlowGraph
}
return nil
}
func (dsService *dataSyncService) getPartitionFlowGraphs(partitionID UniqueID) ([]*queryNodeFlowGraph, error) {
func (dsService *dataSyncService) getPartitionFlowGraphs(partitionID UniqueID, vChannels []string) (map[VChannel]*queryNodeFlowGraph, error) {
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
return nil, errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
}
return dsService.partitionFlowGraphs[partitionID], nil
tmpFGs := make(map[VChannel]*queryNodeFlowGraph)
for _, channel := range vChannels {
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
tmpFGs[channel] = dsService.partitionFlowGraphs[partitionID][channel]
}
}
return tmpFGs, nil
}
func (dsService *dataSyncService) startPartitionFlowGraph(partitionID UniqueID) error {
func (dsService *dataSyncService) startPartitionFlowGraph(partitionID UniqueID, vChannels []string) error {
dsService.mu.Lock()
defer dsService.mu.Unlock()
if _, ok := dsService.partitionFlowGraphs[partitionID]; !ok {
return errors.New("partition flow graph doesn't existed, partitionID = " + fmt.Sprintln(partitionID))
}
for _, fg := range dsService.partitionFlowGraphs[partitionID] {
// start flow graph
go fg.flowGraph.Start()
for _, channel := range vChannels {
if _, ok := dsService.partitionFlowGraphs[partitionID][channel]; ok {
// start flow graph
log.Debug("start partition flow graph", zap.Any("channel", channel))
go dsService.partitionFlowGraphs[partitionID][channel].flowGraph.Start()
}
}
return nil
}
......@@ -177,8 +196,8 @@ func newDataSyncService(ctx context.Context,
return &dataSyncService{
ctx: ctx,
collectionFlowGraphs: make(map[UniqueID][]*queryNodeFlowGraph),
partitionFlowGraphs: make(map[UniqueID][]*queryNodeFlowGraph),
collectionFlowGraphs: make(map[UniqueID]map[VChannel]*queryNodeFlowGraph),
partitionFlowGraphs: make(map[UniqueID]map[VChannel]*queryNodeFlowGraph),
streamingReplica: streamingReplica,
tSafeReplica: tSafeReplica,
msFactory: factory,
......@@ -200,6 +219,6 @@ func (dsService *dataSyncService) close() {
}
}
}
dsService.collectionFlowGraphs = make(map[UniqueID][]*queryNodeFlowGraph)
dsService.partitionFlowGraphs = make(map[UniqueID][]*queryNodeFlowGraph)
dsService.collectionFlowGraphs = make(map[UniqueID]map[VChannel]*queryNodeFlowGraph)
dsService.partitionFlowGraphs = make(map[UniqueID]map[VChannel]*queryNodeFlowGraph)
}
......@@ -118,7 +118,7 @@ func TestDataSyncService_Start(t *testing.T) {
channels := []VChannel{"0"}
err = node.streaming.dataSyncService.addCollectionFlowGraph(collectionID, channels)
assert.NoError(t, err)
err = node.streaming.dataSyncService.startCollectionFlowGraph(collectionID)
err = node.streaming.dataSyncService.startCollectionFlowGraph(collectionID, channels)
assert.NoError(t, err)
<-node.queryNodeLoopCtx.Done()
......
......@@ -94,29 +94,49 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
// check if collection and partition exist
collection := fdmNode.replica.hasCollection(msg.CollectionID)
partition := fdmNode.replica.hasPartition(msg.PartitionID)
if !collection || !partition {
if fdmNode.graphType == flowGraphTypeCollection && !collection {
log.Debug("filter invalid insert message, collection dose not exist",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
if fdmNode.graphType == flowGraphTypePartition && !partition {
log.Debug("filter invalid insert message, partition dose not exist",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
// check if the collection from message is target collection
if msg.CollectionID != fdmNode.collectionID {
log.Debug("filter invalid insert message, collection is not the target collection",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
// if the flow graph type is partition, check if the partition is target partition
if fdmNode.graphType == flowGraphTypePartition && msg.PartitionID != fdmNode.partitionID {
log.Debug("filter invalid insert message, partition is not the target partition",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
// check if the segment is in excluded segments
// Check if the segment is in excluded segments,
// messages after seekPosition may contain the redundant data from flushed slice of segment,
// so we need to compare the endTimestamp of received messages and position's timestamp.
excludedSegments, err := fdmNode.replica.getExcludedSegments(fdmNode.collectionID)
//log.Debug("excluded segments", zap.String("segmentIDs", fmt.Sprintln(excludedSegments)))
if err != nil {
log.Error(err.Error())
return nil
}
for _, id := range excludedSegments {
if msg.SegmentID == id {
for _, segmentInfo := range excludedSegments {
if msg.SegmentID == segmentInfo.SegmentID && msg.EndTs() < segmentInfo.Position.Timestamp {
log.Debug("filter invalid insert message, segments are excluded segments",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
}
......@@ -128,6 +148,9 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
}
if len(msg.Timestamps) <= 0 {
log.Debug("filter invalid insert message, no message",
zap.Any("collectionID", msg.CollectionID),
zap.Any("partitionID", msg.PartitionID))
return nil
}
......
......@@ -75,9 +75,18 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
// 1. hash insertMessages to insertData
for _, task := range iMsg.insertMessages {
// check if partition exists, if not, create partition
if hasPartition := iNode.replica.hasPartition(task.PartitionID); !hasPartition {
err := iNode.replica.addPartition(task.CollectionID, task.PartitionID)
if err != nil {
log.Error(err.Error())
continue
}
}
// check if segment exists, if not, create this segment
if !iNode.replica.hasSegment(task.SegmentID) {
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, segmentTypeGrowing)
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, task.ChannelID, segmentTypeGrowing, true)
if err != nil {
log.Error(err.Error())
continue
......
......@@ -56,7 +56,7 @@ func newQueryNodeFlowGraph(ctx context.Context,
var dmStreamNode node = q.newDmInputNode(ctx1, factory)
var filterDmNode node = newFilteredDmNode(streamingReplica, flowGraphType, collectionID, partitionID)
var insertNode node = newInsertNode(streamingReplica)
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, collectionID, channel, factory)
var serviceTimeNode node = newServiceTimeNode(ctx1, tSafeReplica, flowGraphType, collectionID, partitionID, channel, factory)
q.flowGraph.AddNode(dmStreamNode)
q.flowGraph.AddNode(filterDmNode)
......
......@@ -24,10 +24,12 @@ import (
type serviceTimeNode struct {
baseNode
collectionID UniqueID
vChannel VChannel
tSafeReplica TSafeReplicaInterface
//timeTickMsgStream msgstream.MsgStream
graphType flowGraphType
collectionID UniqueID
partitionID UniqueID
vChannel VChannel
tSafeReplica TSafeReplicaInterface
timeTickMsgStream msgstream.MsgStream
}
func (stNode *serviceTimeNode) Name() string {
......@@ -57,11 +59,19 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
}
// update service time
channel := stNode.vChannel + strconv.FormatInt(stNode.collectionID, 10)
stNode.tSafeReplica.setTSafe(channel, serviceTimeMsg.timeRange.timestampMax)
var id UniqueID
if stNode.graphType == flowGraphTypePartition {
id = stNode.partitionID
} else {
id = stNode.collectionID
}
channelTmp := stNode.vChannel + strconv.FormatInt(stNode.collectionID, 10)
stNode.tSafeReplica.setTSafe(channelTmp, id, serviceTimeMsg.timeRange.timestampMax)
//log.Debug("update tSafe:",
// zap.Int64("tSafe", int64(serviceTimeMsg.timeRange.timestampMax)),
// zap.Any("collectionID", stNode.collectionID),
// zap.Any("id", id),
// zap.Any("channel", channelTmp),
//)
//if err := stNode.sendTimeTick(serviceTimeMsg.timeRange.timestampMax); err != nil {
......@@ -98,7 +108,9 @@ func (stNode *serviceTimeNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
func newServiceTimeNode(ctx context.Context,
tSafeReplica TSafeReplicaInterface,
graphType flowGraphType,
collectionID UniqueID,
partitionID UniqueID,
channel VChannel,
factory msgstream.Factory) *serviceTimeNode {
......@@ -109,19 +121,23 @@ func newServiceTimeNode(ctx context.Context,
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
//timeTimeMsgStream, err := factory.NewMsgStream(ctx)
//if err != nil {
// log.Error(err.Error())
//} else {
// timeTimeMsgStream.AsProducer([]string{Params.QueryTimeTickChannelName})
// log.Debug("query node AsProducer: " + Params.QueryTimeTickChannelName)
//}
timeTimeMsgStream, err := factory.NewMsgStream(ctx)
if err != nil {
log.Error(err.Error())
} else {
// TODO: use param table
timeTickChannel := "query-node-time-tick-0"
timeTimeMsgStream.AsProducer([]string{timeTickChannel})
log.Debug("query node AsProducer: " + timeTickChannel)
}
return &serviceTimeNode{
baseNode: baseNode,
collectionID: collectionID,
vChannel: channel,
tSafeReplica: tSafeReplica,
//timeTickMsgStream: timeTimeMsgStream,
baseNode: baseNode,
graphType: graphType,
collectionID: collectionID,
partitionID: partitionID,
vChannel: channel,
tSafeReplica: tSafeReplica,
timeTickMsgStream: timeTimeMsgStream,
}
}
......@@ -13,6 +13,8 @@ package querynode
import (
"context"
"errors"
"fmt"
"github.com/milvus-io/milvus/internal/msgstream"
"github.com/milvus-io/milvus/internal/types"
......@@ -20,7 +22,7 @@ import (
type historical struct {
replica ReplicaInterface
loadService *loadService
loader *segmentLoader
statsService *statsService
}
......@@ -30,25 +32,86 @@ func newHistorical(ctx context.Context,
indexService types.IndexService,
factory msgstream.Factory) *historical {
replica := newCollectionReplica()
ls := newLoadService(ctx, masterService, dataService, indexService, replica)
ss := newStatsService(ctx, replica, ls.segLoader.indexLoader.fieldStatsChan, factory)
loader := newSegmentLoader(ctx, masterService, indexService, dataService, replica)
ss := newStatsService(ctx, replica, loader.indexLoader.fieldStatsChan, factory)
return &historical{
replica: replica,
loadService: ls,
loader: loader,
statsService: ss,
}
}
func (h *historical) start() {
h.loadService.start()
h.statsService.start()
}
func (h *historical) close() {
h.loadService.close()
h.statsService.close()
// free collectionReplica
h.replica.freeAll()
}
func (h *historical) search(searchReqs []*searchRequest,
collID UniqueID,
partIDs []UniqueID,
plan *Plan,
searchTs Timestamp) ([]*SearchResult, []*Segment, error) {
searchResults := make([]*SearchResult, 0)
segmentResults := make([]*Segment, 0)
// get historical partition ids
var searchPartIDs []UniqueID
if len(partIDs) == 0 {
hisPartIDs, err := h.replica.getPartitionIDs(collID)
if len(hisPartIDs) == 0 {
// no partitions in collection, do empty search
return nil, nil, nil
}
if err != nil {
return searchResults, segmentResults, err
}
searchPartIDs = hisPartIDs
} else {
for _, id := range partIDs {
_, err := h.replica.getPartitionByID(id)
if err == nil {
searchPartIDs = append(searchPartIDs, id)
}
}
}
// all partitions have been released
if len(searchPartIDs) == 0 {
return nil, nil, errors.New("partitions have been released , collectionID = " +
fmt.Sprintln(collID) +
"target partitionIDs = " +
fmt.Sprintln(partIDs))
}
for _, partID := range searchPartIDs {
segIDs, err := h.replica.getSegmentIDs(partID)
if err != nil {
return searchResults, segmentResults, err
}
for _, segID := range segIDs {
seg, err := h.replica.getSegmentByID(segID)
if err != nil {
return searchResults, segmentResults, err
}
if !seg.getOnService() {
continue
}
searchResult, err := seg.segmentSearch(plan, searchReqs, []Timestamp{searchTs})
if err != nil {
return searchResults, segmentResults, err
}
searchResults = append(searchResults, searchResult)
segmentResults = append(segmentResults, seg)
}
}
return searchResults, segmentResults, nil
}
......@@ -14,6 +14,10 @@ package querynode
import (
"context"
"errors"
"fmt"
"math/rand"
"strconv"
"strings"
"go.uber.org/zap"
......@@ -70,30 +74,54 @@ func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.Stri
}
func (node *QueryNode) AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelRequest) (*commonpb.Status, error) {
//if node.searchService == nil || node.searchService.searchMsgStream == nil {
// errMsg := "null search service or null search message stream"
collectionID := in.CollectionID
if node.searchService == nil {
errMsg := "null search service, collectionID = " + fmt.Sprintln(collectionID)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
//if _, ok := node.searchService.searchCollections[in.CollectionID]; !ok {
// errMsg := "null search collection, collectionID = " + fmt.Sprintln(collectionID)
// status := &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
// Reason: errMsg,
// }
//
// return status, errors.New(errMsg)
//}
//
//// add request channel
//consumeChannels := []string{in.RequestChannelID}
// add search collection
if !node.searchService.hasSearchCollection(collectionID) {
node.searchService.addSearchCollection(collectionID)
log.Debug("add search collection", zap.Any("collectionID", collectionID))
}
// add request channel
sc := node.searchService.searchCollections[in.CollectionID]
consumeChannels := []string{in.RequestChannelID}
//consumeSubName := Params.MsgChannelSubName
//node.searchService.searchMsgStream.AsConsumer(consumeChannels, consumeSubName)
//node.retrieveService.retrieveMsgStream.AsConsumer(consumeChannels, "RetrieveSubName")
//log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
//
//// add result channel
//producerChannels := []string{in.ResultChannelID}
//node.searchService.searchResultMsgStream.AsProducer(producerChannels)
//node.retrieveService.retrieveResultMsgStream.AsProducer(producerChannels)
//log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
// Do nothing
consumeSubName := Params.MsgChannelSubName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
sc.searchMsgStream.AsConsumer(consumeChannels, consumeSubName)
node.retrieveService.retrieveMsgStream.AsConsumer(consumeChannels, "RetrieveSubName")
log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
// add result channel
producerChannels := []string{in.ResultChannelID}
sc.searchResultMsgStream.AsProducer(producerChannels)
node.retrieveService.retrieveResultMsgStream.AsProducer(producerChannels)
log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
// message stream need to asConsumer before start
// add search collection
if !node.searchService.hasSearchCollection(collectionID) {
node.searchService.addSearchCollection(collectionID)
log.Debug("add search collection", zap.Any("collectionID", collectionID))
}
sc.start()
log.Debug("start search collection", zap.Any("collectionID", collectionID))
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
......@@ -206,7 +234,11 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
log.Error(err.Error())
return status, err
}
log.Debug("loadSegmentsTask Enqueue done", zap.Any("collectionID", in.CollectionID))
segmentIDs := make([]UniqueID, 0)
for _, info := range in.Infos {
segmentIDs = append(segmentIDs, info.SegmentID)
}
log.Debug("loadSegmentsTask Enqueue done", zap.Int64s("segmentIDs", segmentIDs))
func() {
err = dct.WaitToFinish()
......@@ -214,7 +246,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
log.Error(err.Error())
return
}
log.Debug("loadSegmentsTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
log.Debug("loadSegmentsTask WaitToFinish done", zap.Int64s("segmentIDs", segmentIDs))
}()
status := &commonpb.Status{
......@@ -301,11 +333,17 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *queryPb.ReleaseS
ErrorCode: commonpb.ErrorCode_Success,
}
for _, id := range in.SegmentIDs {
err2 := node.historical.loadService.segLoader.replica.removeSegment(id)
if err2 != nil {
err := node.historical.replica.removeSegment(id)
if err != nil {
// not return, try to release all segments
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err.Error()
}
err = node.streaming.replica.removeSegment(id)
if err != nil {
// not return, try to release all segments
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
status.Reason = err2.Error()
status.Reason = err.Error()
}
}
return status, nil
......
......@@ -297,7 +297,6 @@ func (loader *indexLoader) setIndexInfo(collectionID UniqueID, segment *Segment,
if err != nil {
return err
}
log.Debug("QueryNode IndexLoader setIndexInfo", zap.Any("Req", req), zap.Any("response", response))
if response.Status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(response.Status.Reason)
}
......@@ -377,7 +376,7 @@ func newIndexLoader(ctx context.Context, masterService types.MasterService, inde
replica: replica,
fieldIndexes: make(map[string][]*internalpb.IndexStats),
fieldStatsChan: make(chan []*internalpb.FieldStats, 1),
fieldStatsChan: make(chan []*internalpb.FieldStats, 1024),
masterService: masterService,
indexService: indexService,
......
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querynode
import (
"context"
"errors"
"fmt"
"sync"
"time"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/types"
)
const loadingCheckInterval = 3
type loadService struct {
ctx context.Context
cancel context.CancelFunc
segLoader *segmentLoader
}
// -------------------------------------------- load index -------------------------------------------- //
func (s *loadService) start() {
wg := &sync.WaitGroup{}
for {
select {
case <-s.ctx.Done():
return
case <-time.After(loadingCheckInterval * time.Second):
wg.Add(2)
//go s.segLoader.indexLoader.doLoadIndex(wg)
go s.loadSegmentActively(wg)
wg.Wait()
}
}
}
func (s *loadService) close() {
s.cancel()
}
func (s *loadService) loadSegmentActively(wg *sync.WaitGroup) {
collectionIDs, partitionIDs, segmentIDs := s.segLoader.replica.getSegmentsToLoadBySegmentType(segmentTypeGrowing)
if len(collectionIDs) <= 0 {
wg.Done()
return
}
log.Debug("do load segment for growing segments:", zap.String("segmentIDs", fmt.Sprintln(segmentIDs)))
for i := range collectionIDs {
collection, err := s.segLoader.replica.getCollectionByID(collectionIDs[i])
if err != nil {
log.Warn(err.Error())
}
fieldIDs, err := s.segLoader.replica.getFieldIDsByCollectionID(collectionIDs[i])
if err != nil {
log.Error(err.Error())
continue
}
segment := newSegment(collection, segmentIDs[i], partitionIDs[i], collectionIDs[i], segmentTypeSealed)
segment.setLoadBinLogEnable(true)
err = s.loadSegmentInternal(collectionIDs[i], segment, fieldIDs)
if err == nil {
// replace segment
err = s.segLoader.replica.replaceGrowingSegmentBySealedSegment(segment)
}
if err != nil {
deleteSegment(segment)
log.Error(err.Error())
}
}
// sendQueryNodeStats
err := s.segLoader.indexLoader.sendQueryNodeStats()
if err != nil {
log.Error(err.Error())
wg.Done()
return
}
wg.Done()
}
// load segment passively
func (s *loadService) loadSegmentPassively(collectionID UniqueID, partitionID UniqueID, segmentIDs []UniqueID, fieldIDs []int64) error {
// TODO: interim solution
if len(fieldIDs) == 0 {
var err error
fieldIDs, err = s.segLoader.replica.getFieldIDsByCollectionID(collectionID)
if err != nil {
return err
}
}
for _, segmentID := range segmentIDs {
collection, err := s.segLoader.replica.getCollectionByID(collectionID)
if err != nil {
return err
}
_, err = s.segLoader.replica.getPartitionByID(partitionID)
if err != nil {
return err
}
segment := newSegment(collection, segmentID, partitionID, collectionID, segmentTypeSealed)
segment.setLoadBinLogEnable(true)
err = s.loadSegmentInternal(collectionID, segment, fieldIDs)
if err == nil {
err = s.segLoader.replica.setSegment(segment)
}
if err != nil {
log.Warn(err.Error())
err = s.addSegmentToLoadBuffer(segment)
if err != nil {
log.Warn(err.Error())
}
}
}
return nil
}
func (s *loadService) addSegmentToLoadBuffer(segment *Segment) error {
segmentID := segment.segmentID
partitionID := segment.partitionID
collectionID := segment.collectionID
deleteSegment(segment)
err := s.segLoader.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeGrowing)
if err != nil {
return err
}
err = s.segLoader.replica.setSegmentEnableLoadBinLog(segmentID, true)
if err != nil {
s.segLoader.replica.removeSegment(segmentID)
}
return err
}
func (s *loadService) loadSegmentInternal(collectionID UniqueID, segment *Segment, fieldIDs []int64) error {
// create segment
statesResp, err := s.segLoader.GetSegmentStates(segment.segmentID)
if err != nil {
return err
}
if statesResp.States[0].State != commonpb.SegmentState_Flushed {
return errors.New("segment not flush done")
}
insertBinlogPaths, srcFieldIDs, err := s.segLoader.getInsertBinlogPaths(segment.segmentID)
if err != nil {
return err
}
vectorFieldIDs, err := s.segLoader.replica.getVecFieldIDsByCollectionID(collectionID)
if err != nil {
return err
}
loadIndexFieldIDs := make([]int64, 0)
for _, vecFieldID := range vectorFieldIDs {
err = s.segLoader.indexLoader.setIndexInfo(collectionID, segment, vecFieldID)
if err != nil {
log.Warn("QueryNode load_service", zap.Any("SegmentID", segment.segmentID), zap.Error(err))
continue
}
loadIndexFieldIDs = append(loadIndexFieldIDs, vecFieldID)
}
// we don't need load to vector fields
fieldIDs = s.segLoader.filterOutVectorFields(fieldIDs, loadIndexFieldIDs)
//log.Debug("srcFieldIDs in internal:", srcFieldIDs)
//log.Debug("dstFieldIDs in internal:", fieldIDs)
targetFields, err := s.segLoader.checkTargetFields(insertBinlogPaths, srcFieldIDs, fieldIDs)
if err != nil {
return err
}
log.Debug("loading insert...")
err = s.segLoader.loadSegmentFieldsData(segment, targetFields)
if err != nil {
return err
}
for _, id := range loadIndexFieldIDs {
log.Debug("loading index...")
err = s.segLoader.indexLoader.loadIndex(segment, id)
if err != nil {
return err
}
}
return nil
}
func newLoadService(ctx context.Context, masterService types.MasterService, dataService types.DataService, indexService types.IndexService, replica ReplicaInterface) *loadService {
ctx1, cancel := context.WithCancel(ctx)
segLoader := newSegmentLoader(ctx1, masterService, indexService, dataService, replica)
return &loadService{
ctx: ctx1,
cancel: cancel,
segLoader: segLoader,
}
}
......@@ -19,11 +19,6 @@ import (
"math/rand"
"path"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/indexnode"
minioKV "github.com/milvus-io/milvus/internal/kv/minio"
......@@ -31,7 +26,6 @@ import (
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/storage"
)
......@@ -1025,96 +1019,97 @@ func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID,
return nil
}
func TestSegmentLoad_Search_Vector(t *testing.T) {
collectionID := UniqueID(0)
partitionID := UniqueID(1)
segmentID := UniqueID(2)
fieldIDs := []int64{0, 101}
// mock write insert bin log
keyPrefix := path.Join("query-node-seg-manager-test-minio-prefix", strconv.FormatInt(collectionID, 10), strconv.FormatInt(partitionID, 10))
node := newQueryNodeMock()
defer node.Stop()
ctx := node.queryNodeLoopCtx
node.historical.loadService = newLoadService(ctx, nil, nil, nil, node.historical.replica)
initTestMeta(t, node, collectionID, 0)
err := node.historical.replica.addPartition(collectionID, partitionID)
assert.NoError(t, err)
err = node.historical.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeSealed)
assert.NoError(t, err)
paths, srcFieldIDs, err := generateInsertBinLog(collectionID, partitionID, segmentID, keyPrefix)
assert.NoError(t, err)
fieldsMap, _ := node.historical.loadService.segLoader.checkTargetFields(paths, srcFieldIDs, fieldIDs)
assert.Equal(t, len(fieldsMap), 4)
segment, err := node.historical.replica.getSegmentByID(segmentID)
assert.NoError(t, err)
err = node.historical.loadService.segLoader.loadSegmentFieldsData(segment, fieldsMap)
assert.NoError(t, err)
indexPaths, err := generateIndex(segmentID)
assert.NoError(t, err)
indexInfo := &indexInfo{
indexPaths: indexPaths,
readyLoad: true,
}
err = segment.setIndexInfo(100, indexInfo)
assert.NoError(t, err)
err = node.historical.loadService.segLoader.indexLoader.loadIndex(segment, 100)
assert.NoError(t, err)
// do search
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
const DIM = 16
var searchRawData []byte
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
for _, ele := range vec {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
searchRawData = append(searchRawData, buf...)
}
placeholderValue := milvuspb.PlaceholderValue{
Tag: "$0",
Type: milvuspb.PlaceholderType_FloatVector,
Values: [][]byte{searchRawData},
}
placeholderGroup := milvuspb.PlaceholderGroup{
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
}
placeHolderGroupBlob, err := proto.Marshal(&placeholderGroup)
assert.NoError(t, err)
searchTimestamp := Timestamp(1020)
collection, err := node.historical.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
plan, err := createPlan(*collection, dslString)
assert.NoError(t, err)
holder, err := parseSearchRequest(plan, placeHolderGroupBlob)
assert.NoError(t, err)
placeholderGroups := make([]*searchRequest, 0)
placeholderGroups = append(placeholderGroups, holder)
// wait for segment building index
time.Sleep(1 * time.Second)
_, err = segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
assert.Nil(t, err)
plan.delete()
holder.delete()
<-ctx.Done()
}
//
//func TestSegmentLoad_Search_Vector(t *testing.T) {
// collectionID := UniqueID(0)
// partitionID := UniqueID(1)
// segmentID := UniqueID(2)
// fieldIDs := []int64{0, 101}
//
// // mock write insert bin log
// keyPrefix := path.Join("query-node-seg-manager-test-minio-prefix", strconv.FormatInt(collectionID, 10), strconv.FormatInt(partitionID, 10))
//
// node := newQueryNodeMock()
// defer node.Stop()
//
// ctx := node.queryNodeLoopCtx
// node.historical.loadService = newLoadService(ctx, nil, nil, nil, node.historical.replica)
//
// initTestMeta(t, node, collectionID, 0)
//
// err := node.historical.replica.addPartition(collectionID, partitionID)
// assert.NoError(t, err)
//
// err = node.historical.replica.addSegment(segmentID, partitionID, collectionID, segmentTypeSealed)
// assert.NoError(t, err)
//
// paths, srcFieldIDs, err := generateInsertBinLog(collectionID, partitionID, segmentID, keyPrefix)
// assert.NoError(t, err)
//
// fieldsMap, _ := node.historical.loadService.segLoader.checkTargetFields(paths, srcFieldIDs, fieldIDs)
// assert.Equal(t, len(fieldsMap), 4)
//
// segment, err := node.historical.replica.getSegmentByID(segmentID)
// assert.NoError(t, err)
//
// err = node.historical.loadService.segLoader.loadSegmentFieldsData(segment, fieldsMap)
// assert.NoError(t, err)
//
// indexPaths, err := generateIndex(segmentID)
// assert.NoError(t, err)
//
// indexInfo := &indexInfo{
// indexPaths: indexPaths,
// readyLoad: true,
// }
// err = segment.setIndexInfo(100, indexInfo)
// assert.NoError(t, err)
//
// err = node.historical.loadService.segLoader.indexLoader.loadIndex(segment, 100)
// assert.NoError(t, err)
//
// // do search
// dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
//
// const DIM = 16
// var searchRawData []byte
// var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
// for _, ele := range vec {
// buf := make([]byte, 4)
// binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
// searchRawData = append(searchRawData, buf...)
// }
// placeholderValue := milvuspb.PlaceholderValue{
// Tag: "$0",
// Type: milvuspb.PlaceholderType_FloatVector,
// Values: [][]byte{searchRawData},
// }
//
// placeholderGroup := milvuspb.PlaceholderGroup{
// Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
// }
//
// placeHolderGroupBlob, err := proto.Marshal(&placeholderGroup)
// assert.NoError(t, err)
//
// searchTimestamp := Timestamp(1020)
// collection, err := node.historical.replica.getCollectionByID(collectionID)
// assert.NoError(t, err)
// plan, err := createPlan(*collection, dslString)
// assert.NoError(t, err)
// holder, err := parseSearchRequest(plan, placeHolderGroupBlob)
// assert.NoError(t, err)
// placeholderGroups := make([]*searchRequest, 0)
// placeholderGroups = append(placeholderGroups, holder)
//
// // wait for segment building index
// time.Sleep(1 * time.Second)
//
// _, err = segment.segmentSearch(plan, placeholderGroups, []Timestamp{searchTimestamp})
// assert.Nil(t, err)
//
// plan.delete()
// holder.delete()
//
// <-ctx.Done()
//}
......@@ -122,7 +122,7 @@ func (p *ParamTable) Init() {
p.initSearchResultReceiveBufSize()
p.initStatsPublishInterval()
//p.initStatsChannelName()
p.initStatsChannelName()
p.initLogCfg()
})
......
......@@ -32,7 +32,6 @@ type Partition struct {
collectionID UniqueID
partitionID UniqueID
segmentIDs []UniqueID
enable bool
}
func (p *Partition) ID() UniqueID {
......@@ -57,7 +56,6 @@ func newPartition(collectionID UniqueID, partitionID UniqueID) *Partition {
var newPartition = &Partition{
collectionID: collectionID,
partitionID: partitionID,
enable: false,
}
log.Debug("create partition", zap.Int64("partitionID", partitionID))
......
......@@ -31,7 +31,7 @@ type Plan struct {
cPlan C.CPlan
}
func createPlan(col Collection, dsl string) (*Plan, error) {
func createPlan(col *Collection, dsl string) (*Plan, error) {
cDsl := C.CString(dsl)
defer C.free(unsafe.Pointer(cDsl))
var cPlan C.CPlan
......@@ -46,7 +46,7 @@ func createPlan(col Collection, dsl string) (*Plan, error) {
return newPlan, nil
}
func createPlanByExpr(col Collection, expr []byte) (*Plan, error) {
func createPlanByExpr(col *Collection, expr []byte) (*Plan, error) {
var cPlan C.CPlan
status := C.CreatePlanByExpr(col.collectionPtr, (*C.char)(unsafe.Pointer(&expr[0])), (C.int64_t)(len(expr)), &cPlan)
......
......@@ -30,7 +30,7 @@ func TestPlan_Plan(t *testing.T) {
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
plan, err := createPlan(*collection, dslString)
plan, err := createPlan(collection, dslString)
assert.NoError(t, err)
assert.NotEqual(t, plan, nil)
topk := plan.getTopK()
......@@ -49,7 +49,7 @@ func TestPlan_PlaceholderGroup(t *testing.T) {
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
plan, err := createPlan(*collection, dslString)
plan, err := createPlan(collection, dslString)
assert.NoError(t, err)
assert.NotNil(t, plan)
......
......@@ -192,15 +192,13 @@ func (node *QueryNode) Start() error {
// init services and manager
// TODO: pass node.streaming.replica to search service
node.searchService = newSearchService(node.queryNodeLoopCtx,
node.historical.replica,
node.streaming.replica,
node.streaming.tSafeReplica,
node.historical,
node.streaming,
node.msFactory)
node.retrieveService = newRetrieveService(node.queryNodeLoopCtx,
node.historical.replica,
node.streaming.replica,
node.streaming.tSafeReplica,
node.historical,
node.streaming,
node.msFactory,
)
......@@ -208,7 +206,6 @@ func (node *QueryNode) Start() error {
go node.scheduler.Start()
// start services
go node.searchService.start()
go node.retrieveService.start()
go node.historical.start()
node.UpdateStateCode(internalpb.StateCode_Healthy)
......
......@@ -132,20 +132,10 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
err = node.historical.replica.addPartition(collection.ID(), collectionMeta.PartitionIDs[0])
assert.NoError(t, err)
err = node.historical.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, segmentTypeGrowing)
err = node.historical.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
}
func initDmChannel(ctx context.Context, insertChannels []string, node *QueryNode) {
watchReq := &querypb.WatchDmChannelsRequest{
ChannelIDs: insertChannels,
}
_, err := node.WatchDmChannels(ctx, watchReq)
if err != nil {
panic(err)
}
}
func initSearchChannel(ctx context.Context, searchChan string, resultChan string, node *QueryNode) {
searchReq := &querypb.AddQueryChannelRequest{
RequestChannelID: searchChan,
......
......@@ -29,7 +29,7 @@ func TestReduce_AllFunc(t *testing.T) {
collectionMeta := genTestCollectionMeta(collectionID, false)
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segmentTypeGrowing)
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
const DIM = 16
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
......@@ -63,7 +63,7 @@ func TestReduce_AllFunc(t *testing.T) {
log.Print("marshal placeholderGroup failed")
}
plan, err := createPlan(*collection, dslString)
plan, err := createPlan(collection, dslString)
assert.NoError(t, err)
holder, err := parseSearchRequest(plan, placeGroupByte)
assert.NoError(t, err)
......
......@@ -36,10 +36,9 @@ type retrieveCollection struct {
releaseCtx context.Context
cancel context.CancelFunc
collectionID UniqueID
historicalReplica ReplicaInterface
streamingReplica ReplicaInterface
tSafeReplica TSafeReplicaInterface
collectionID UniqueID
historical *historical
streaming *streaming
msgBuffer chan *msgstream.RetrieveMsg
unsolvedMsgMu sync.Mutex
......@@ -57,9 +56,8 @@ type retrieveCollection struct {
func newRetrieveCollection(releaseCtx context.Context,
cancel context.CancelFunc,
collectionID UniqueID,
historicalReplica ReplicaInterface,
streamingReplica ReplicaInterface,
tSafeReplica TSafeReplicaInterface,
historical *historical,
streaming *streaming,
retrieveResultStream msgstream.MsgStream) *retrieveCollection {
receiveBufSize := Params.RetrieveReceiveBufSize
msgBuffer := make(chan *msgstream.RetrieveMsg, receiveBufSize)
......@@ -69,10 +67,9 @@ func newRetrieveCollection(releaseCtx context.Context,
releaseCtx: releaseCtx,
cancel: cancel,
collectionID: collectionID,
historicalReplica: historicalReplica,
streamingReplica: streamingReplica,
tSafeReplica: tSafeReplica,
collectionID: collectionID,
historical: historical,
streaming: streaming,
tSafeWatchers: make(map[VChannel]*tSafeWatcher),
......@@ -118,7 +115,7 @@ func (rc *retrieveCollection) waitNewTSafe() Timestamp {
}
t := Timestamp(math.MaxInt64)
for channel := range rc.tSafeWatchers {
ts := rc.tSafeReplica.getTSafe(channel)
ts := rc.streaming.tSafeReplica.getTSafe(channel)
if ts <= t {
t = ts
}
......@@ -133,7 +130,7 @@ func (rc *retrieveCollection) start() {
func (rc *retrieveCollection) register() {
// register tSafe watcher and init watcher select case
collection, err := rc.streamingReplica.getCollectionByID(rc.collectionID)
collection, err := rc.streaming.replica.getCollectionByID(rc.collectionID)
if err != nil {
log.Error(err.Error())
return
......@@ -141,9 +138,9 @@ func (rc *retrieveCollection) register() {
rc.watcherSelectCase = make([]reflect.SelectCase, 0)
for _, channel := range collection.getWatchedDmChannels() {
rc.tSafeReplica.addTSafe(channel)
rc.streaming.tSafeReplica.addTSafe(channel)
rc.tSafeWatchers[channel] = newTSafeWatcher()
rc.tSafeReplica.registerTSafeWatcher(channel, rc.tSafeWatchers[channel])
rc.streaming.tSafeReplica.registerTSafeWatcher(channel, rc.tSafeWatchers[channel])
rc.watcherSelectCase = append(rc.watcherSelectCase, reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(rc.tSafeWatchers[channel].watcherChan()),
......@@ -358,7 +355,7 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
timestamp := retrieveMsg.Base.Timestamp
collectionID := retrieveMsg.CollectionID
collection, err := rc.historicalReplica.getCollectionByID(collectionID)
collection, err := rc.historical.replica.getCollectionByID(collectionID)
if err != nil {
return err
}
......@@ -378,8 +375,8 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
var partitionIDsInStreaming []UniqueID
partitionIDsInQuery := retrieveMsg.PartitionIDs
if len(partitionIDsInQuery) == 0 {
partitionIDsInHistoricalCol, err1 := rc.historicalReplica.getPartitionIDs(collectionID)
partitionIDsInStreamingCol, err2 := rc.streamingReplica.getPartitionIDs(collectionID)
partitionIDsInHistoricalCol, err1 := rc.historical.replica.getPartitionIDs(collectionID)
partitionIDsInStreamingCol, err2 := rc.streaming.replica.getPartitionIDs(collectionID)
if err1 != nil && err2 != nil {
return err2
}
......@@ -390,11 +387,11 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
partitionIDsInStreaming = partitionIDsInStreamingCol
} else {
for _, id := range partitionIDsInQuery {
_, err1 := rc.historicalReplica.getPartitionByID(id)
_, err1 := rc.historical.replica.getPartitionByID(id)
if err1 == nil {
partitionIDsInHistorical = append(partitionIDsInHistorical, id)
}
_, err2 := rc.streamingReplica.getPartitionByID(id)
_, err2 := rc.streaming.replica.getPartitionByID(id)
if err2 == nil {
partitionIDsInStreaming = append(partitionIDsInStreaming, id)
}
......@@ -406,12 +403,12 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
var mergeList []*planpb.RetrieveResults
for _, partitionID := range partitionIDsInHistorical {
segmentIDs, err := rc.historicalReplica.getSegmentIDs(partitionID)
segmentIDs, err := rc.historical.replica.getSegmentIDs(partitionID)
if err != nil {
return err
}
for _, segmentID := range segmentIDs {
segment, err := rc.historicalReplica.getSegmentByID(segmentID)
segment, err := rc.historical.replica.getSegmentByID(segmentID)
if err != nil {
return err
}
......@@ -424,12 +421,12 @@ func (rc *retrieveCollection) retrieve(retrieveMsg *msgstream.RetrieveMsg) error
}
for _, partitionID := range partitionIDsInStreaming {
segmentIDs, err := rc.streamingReplica.getSegmentIDs(partitionID)
segmentIDs, err := rc.streaming.replica.getSegmentIDs(partitionID)
if err != nil {
return err
}
for _, segmentID := range segmentIDs {
segment, err := rc.streamingReplica.getSegmentByID(segmentID)
segment, err := rc.streaming.replica.getSegmentByID(segmentID)
if err != nil {
return err
}
......
此差异已折叠。
此差异已折叠。
......@@ -65,6 +65,7 @@ func (sService *statsService) start() {
for {
select {
case <-sService.ctx.Done():
log.Debug("stats service closed")
return
case <-time.After(time.Duration(sleepTimeInterval) * time.Millisecond):
sService.publicStatistic(nil)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -31,7 +31,8 @@ func TestQueryService_Init(t *testing.T) {
service.Start()
t.Run("Test create channel", func(t *testing.T) {
response, err := service.CreateQueryChannel(ctx)
request := &querypb.CreateQueryChannelRequest{}
response, err := service.CreateQueryChannel(ctx, request)
assert.Nil(t, err)
assert.Equal(t, response.RequestChannel, "query-0")
assert.Equal(t, response.ResultChannel, "queryResult-0")
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册