提交 0b478b61 编写于 作者: C cai.zhang 提交者: yefu.chen

Change Id to ID in go files according to golint check

Signed-off-by: Ncai.zhang <cai.zhang@zilliz.com>
上级 e541041b
......@@ -6,7 +6,6 @@ run:
- docs
- scripts
- internal/core
- internal/proto
linters-settings:
golint:
......@@ -34,7 +33,6 @@ issues:
- should be of the form
- should not use dot imports
- which can be annoying to use
- AllocId
service:
golangci-lint-version: 1.27.0 # use the fixed version to not introduce new linters unexpectedly
......@@ -36,19 +36,19 @@ func NewIDAllocator(ctx context.Context) (*IDAllocator, error) {
func (ta *IDAllocator) syncID() {
fmt.Println("syncID")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
req := &internalpb.IdRequest{
PeerId: 1,
req := &internalpb.IDRequest{
PeerID: 1,
Role: internalpb.PeerRole_Proxy,
Count: ta.countPerRPC,
}
resp, err := ta.masterClient.AllocId(ctx, req)
resp, err := ta.masterClient.AllocID(ctx, req)
cancel()
if err != nil {
log.Panic("syncID Failed!!!!!")
return
}
ta.idStart = resp.GetId()
ta.idStart = resp.GetID()
ta.idEnd = ta.idStart + int64(resp.GetCount())
}
......
......@@ -44,7 +44,7 @@ func (ta *TimestampAllocator) syncTs() {
fmt.Println("sync TS")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
req := &internalpb.TsoRequest{
PeerId: 1,
PeerID: 1,
Role: internalpb.PeerRole_Proxy,
Count: ta.countPerRPC,
}
......
此差异已折叠。
......@@ -83,7 +83,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, description_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, auto_id_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, autoid_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, fields_),
};
static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
......@@ -103,16 +103,16 @@ const char descriptor_table_protodef_schema_2eproto[] PROTOBUF_SECTION_VARIABLE(
"\035.milvus.proto.schema.DataType\0226\n\013type_p"
"arams\030\004 \003(\0132!.milvus.proto.common.KeyVal"
"uePair\0227\n\014index_params\030\005 \003(\0132!.milvus.pr"
"oto.common.KeyValuePair\"x\n\020CollectionSch"
"ema\022\014\n\004name\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\017"
"\n\007auto_id\030\003 \001(\010\0220\n\006fields\030\004 \003(\0132 .milvus"
".proto.schema.FieldSchema*\221\001\n\010DataType\022\010"
"\n\004NONE\020\000\022\010\n\004BOOL\020\001\022\010\n\004INT8\020\002\022\t\n\005INT16\020\003\022"
"\t\n\005INT32\020\004\022\t\n\005INT64\020\005\022\t\n\005FLOAT\020\n\022\n\n\006DOUB"
"LE\020\013\022\n\n\006STRING\020\024\022\021\n\rVECTOR_BINARY\020d\022\020\n\014V"
"ECTOR_FLOAT\020eBBZ@github.com/zilliztech/m"
"ilvus-distributed/internal/proto/schemap"
"bb\006proto3"
"oto.common.KeyValuePair\"w\n\020CollectionSch"
"ema\022\014\n\004name\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\016"
"\n\006autoID\030\003 \001(\010\0220\n\006fields\030\004 \003(\0132 .milvus."
"proto.schema.FieldSchema*\221\001\n\010DataType\022\010\n"
"\004NONE\020\000\022\010\n\004BOOL\020\001\022\010\n\004INT8\020\002\022\t\n\005INT16\020\003\022\t"
"\n\005INT32\020\004\022\t\n\005INT64\020\005\022\t\n\005FLOAT\020\n\022\n\n\006DOUBL"
"E\020\013\022\n\n\006STRING\020\024\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VE"
"CTOR_FLOAT\020eBBZ@github.com/zilliztech/mi"
"lvus-distributed/internal/proto/schemapb"
"b\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_schema_2eproto_deps[1] = {
&::descriptor_table_common_2eproto,
......@@ -124,7 +124,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_sch
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_schema_2eproto_once;
static bool descriptor_table_schema_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_schema_2eproto = {
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 609,
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 608,
&descriptor_table_schema_2eproto_once, descriptor_table_schema_2eproto_sccs, descriptor_table_schema_2eproto_deps, 2, 1,
schemas, file_default_instances, TableStruct_schema_2eproto::offsets,
file_level_metadata_schema_2eproto, 2, file_level_enum_descriptors_schema_2eproto, file_level_service_descriptors_schema_2eproto,
......@@ -680,7 +680,7 @@ CollectionSchema::CollectionSchema(const CollectionSchema& from)
if (!from.description().empty()) {
description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_);
}
auto_id_ = from.auto_id_;
autoid_ = from.autoid_;
// @@protoc_insertion_point(copy_constructor:milvus.proto.schema.CollectionSchema)
}
......@@ -688,7 +688,7 @@ void CollectionSchema::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_CollectionSchema_schema_2eproto.base);
name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
auto_id_ = false;
autoid_ = false;
}
CollectionSchema::~CollectionSchema() {
......@@ -719,7 +719,7 @@ void CollectionSchema::Clear() {
fields_.Clear();
name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
auto_id_ = false;
autoid_ = false;
_internal_metadata_.Clear();
}
......@@ -745,10 +745,10 @@ const char* CollectionSchema::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPA
CHK_(ptr);
} else goto handle_unusual;
continue;
// bool auto_id = 3;
// bool autoID = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
auto_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
autoid_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
......@@ -824,13 +824,13 @@ bool CollectionSchema::MergePartialFromCodedStream(
break;
}
// bool auto_id = 3;
// bool autoID = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
input, &auto_id_)));
input, &autoid_)));
} else {
goto handle_unusual;
}
......@@ -895,9 +895,9 @@ void CollectionSchema::SerializeWithCachedSizes(
2, this->description(), output);
}
// bool auto_id = 3;
if (this->auto_id() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(3, this->auto_id(), output);
// bool autoID = 3;
if (this->autoid() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(3, this->autoid(), output);
}
// repeated .milvus.proto.schema.FieldSchema fields = 4;
......@@ -944,9 +944,9 @@ void CollectionSchema::SerializeWithCachedSizes(
2, this->description(), target);
}
// bool auto_id = 3;
if (this->auto_id() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->auto_id(), target);
// bool autoID = 3;
if (this->autoid() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->autoid(), target);
}
// repeated .milvus.proto.schema.FieldSchema fields = 4;
......@@ -1003,8 +1003,8 @@ size_t CollectionSchema::ByteSizeLong() const {
this->description());
}
// bool auto_id = 3;
if (this->auto_id() != 0) {
// bool autoID = 3;
if (this->autoid() != 0) {
total_size += 1 + 1;
}
......@@ -1044,8 +1044,8 @@ void CollectionSchema::MergeFrom(const CollectionSchema& from) {
description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_);
}
if (from.auto_id() != 0) {
set_auto_id(from.auto_id());
if (from.autoid() != 0) {
set_autoid(from.autoid());
}
}
......@@ -1075,7 +1075,7 @@ void CollectionSchema::InternalSwap(CollectionSchema* other) {
GetArenaNoVirtual());
description_.Swap(&other->description_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
swap(auto_id_, other->auto_id_);
swap(autoid_, other->autoid_);
}
::PROTOBUF_NAMESPACE_ID::Metadata CollectionSchema::GetMetadata() const {
......
......@@ -411,7 +411,7 @@ class CollectionSchema :
kFieldsFieldNumber = 4,
kNameFieldNumber = 1,
kDescriptionFieldNumber = 2,
kAutoIdFieldNumber = 3,
kAutoIDFieldNumber = 3,
};
// repeated .milvus.proto.schema.FieldSchema fields = 4;
int fields_size() const;
......@@ -446,10 +446,10 @@ class CollectionSchema :
std::string* release_description();
void set_allocated_description(std::string* description);
// bool auto_id = 3;
void clear_auto_id();
bool auto_id() const;
void set_auto_id(bool value);
// bool autoID = 3;
void clear_autoid();
bool autoid() const;
void set_autoid(bool value);
// @@protoc_insertion_point(class_scope:milvus.proto.schema.CollectionSchema)
private:
......@@ -459,7 +459,7 @@ class CollectionSchema :
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::proto::schema::FieldSchema > fields_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr description_;
bool auto_id_;
bool autoid_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_schema_2eproto;
};
......@@ -750,18 +750,18 @@ inline void CollectionSchema::set_allocated_description(std::string* description
// @@protoc_insertion_point(field_set_allocated:milvus.proto.schema.CollectionSchema.description)
}
// bool auto_id = 3;
inline void CollectionSchema::clear_auto_id() {
auto_id_ = false;
// bool autoID = 3;
inline void CollectionSchema::clear_autoid() {
autoid_ = false;
}
inline bool CollectionSchema::auto_id() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.CollectionSchema.auto_id)
return auto_id_;
inline bool CollectionSchema::autoid() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.CollectionSchema.autoID)
return autoid_;
}
inline void CollectionSchema::set_auto_id(bool value) {
inline void CollectionSchema::set_autoid(bool value) {
auto_id_ = value;
// @@protoc_insertion_point(field_set:milvus.proto.schema.CollectionSchema.auto_id)
autoid_ = value;
// @@protoc_insertion_point(field_set:milvus.proto.schema.CollectionSchema.autoID)
}
// repeated .milvus.proto.schema.FieldSchema fields = 4;
......
......@@ -532,7 +532,7 @@ const char descriptor_table_protodef_service_5fmsg_2eproto[] PROTOBUF_SECTION_VA
"us\0221\n\004name\030\002 \001(\0132#.milvus.proto.service."
"PartitionName\0225\n\nstatistics\030\003 \003(\0132!.milv"
"us.proto.common.KeyValuePair\"$\n\005Score\022\013\n"
"\003tag\030\001 \001(\t\022\016\n\006values\030\002 \003(\002\"m\n\004Hits\022\013\n\003id"
"\003tag\030\001 \001(\t\022\016\n\006values\030\002 \003(\002\"m\n\004Hits\022\013\n\003ID"
"s\030\001 \003(\003\022+\n\010row_data\030\002 \003(\0132\031.milvus.proto"
".common.Blob\022+\n\006scores\030\003 \003(\0132\033.milvus.pr"
"oto.service.Score\"d\n\013QueryResult\022+\n\006stat"
......@@ -5598,7 +5598,7 @@ const char* Hits::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::inter
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
CHK_(ptr);
switch (tag >> 3) {
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(mutable_ids(), ptr, ctx);
......@@ -5662,7 +5662,7 @@ bool Hits::MergePartialFromCodedStream(
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
case 1: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
......@@ -5727,7 +5727,7 @@ void Hits::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
if (this->ids_size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(1, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
output->WriteVarint32(_ids_cached_byte_size_.load(
......@@ -5769,7 +5769,7 @@ void Hits::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
if (this->ids_size() > 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1,
......@@ -5819,7 +5819,7 @@ size_t Hits::ByteSizeLong() const {
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
{
size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
Int64Size(this->ids_);
......
......@@ -2422,11 +2422,11 @@ class Hits :
// accessors -------------------------------------------------------
enum : int {
kIdsFieldNumber = 1,
kIDsFieldNumber = 1,
kRowDataFieldNumber = 2,
kScoresFieldNumber = 3,
};
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
int ids_size() const;
void clear_ids();
::PROTOBUF_NAMESPACE_ID::int64 ids(int index) const;
......@@ -4109,7 +4109,7 @@ Score::mutable_values() {
// Hits
// repeated int64 ids = 1;
// repeated int64 IDs = 1;
inline int Hits::ids_size() const {
return ids_.size();
}
......@@ -4117,25 +4117,25 @@ inline void Hits::clear_ids() {
ids_.Clear();
}
inline ::PROTOBUF_NAMESPACE_ID::int64 Hits::ids(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.Hits.ids)
// @@protoc_insertion_point(field_get:milvus.proto.service.Hits.IDs)
return ids_.Get(index);
}
inline void Hits::set_ids(int index, ::PROTOBUF_NAMESPACE_ID::int64 value) {
ids_.Set(index, value);
// @@protoc_insertion_point(field_set:milvus.proto.service.Hits.ids)
// @@protoc_insertion_point(field_set:milvus.proto.service.Hits.IDs)
}
inline void Hits::add_ids(::PROTOBUF_NAMESPACE_ID::int64 value) {
ids_.Add(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.Hits.ids)
// @@protoc_insertion_point(field_add:milvus.proto.service.Hits.IDs)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >&
Hits::ids() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.Hits.ids)
// @@protoc_insertion_point(field_list:milvus.proto.service.Hits.IDs)
return ids_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 >*
Hits::mutable_ids() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.Hits.ids)
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.Hits.IDs)
return &ids_;
}
......
......@@ -50,10 +50,10 @@ func GrpcMarshal(c *Collection) *Collection {
}
pbSchema.Fields = schemaSlice
grpcCollection := &etcdpb.CollectionMeta{
Id: c.ID,
ID: c.ID,
Schema: pbSchema,
CreateTime: c.CreateTime,
SegmentIds: c.SegmentIDs,
SegmentIDs: c.SegmentIDs,
PartitionTags: c.PartitionTags,
}
out := proto.MarshalTextString(grpcCollection)
......
......@@ -84,11 +84,11 @@ func (t *createCollectionTask) Execute() error {
}
collection := etcdpb.CollectionMeta{
Id: collectionID,
ID: collectionID,
Schema: &schema,
CreateTime: ts,
// TODO: initial segment?
SegmentIds: make([]UniqueID, 0),
SegmentIDs: make([]UniqueID, 0),
// TODO: initial partition?
PartitionTags: make([]string, 0),
}
......@@ -123,7 +123,7 @@ func (t *dropCollectionTask) Execute() error {
return err
}
collectionID := collectionMeta.Id
collectionID := collectionMeta.ID
return t.mt.DeleteCollection(collectionID)
}
......
......@@ -44,7 +44,7 @@ func TestMaster_CreateCollectionTask(t *testing.T) {
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoId: false,
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
Name: "col1_f1",
......@@ -103,9 +103,9 @@ func TestMaster_CreateCollectionTask(t *testing.T) {
req := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqId: 1,
ReqID: 1,
Timestamp: 11,
ProxyId: 1,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
log.Printf("... [Create] collection col1\n")
......@@ -115,10 +115,10 @@ func TestMaster_CreateCollectionTask(t *testing.T) {
collMeta, err := svr.mt.GetCollectionByName(sch.Name)
assert.Nil(t, err)
t.Logf("collection id = %d", collMeta.Id)
t.Logf("collection id = %d", collMeta.ID)
assert.Equal(t, collMeta.CreateTime, uint64(11))
assert.Equal(t, collMeta.Schema.Name, "col1")
assert.Equal(t, collMeta.Schema.AutoId, false)
assert.Equal(t, collMeta.Schema.AutoID, false)
assert.Equal(t, len(collMeta.Schema.Fields), 2)
assert.Equal(t, collMeta.Schema.Fields[0].Name, "col1_f1")
assert.Equal(t, collMeta.Schema.Fields[1].Name, "col1_f2")
......@@ -157,9 +157,9 @@ func TestMaster_CreateCollectionTask(t *testing.T) {
reqDrop := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqId: 1,
ReqID: 1,
Timestamp: 11,
ProxyId: 1,
ProxyID: 1,
CollectionName: &ser,
}
......
......@@ -23,7 +23,7 @@ func ComputeCloseTime(ss internalpb.SegmentStats, kvbase *kv.EtcdKV) error {
memRate = 1
}
sec := int(conf.Config.Master.SegmentThreshole*0.2) / memRate
data, err := kvbase.Load("segment/" + strconv.Itoa(int(ss.SegmentId)))
data, err := kvbase.Load("segment/" + strconv.Itoa(int(ss.SegmentID)))
if err != nil {
return err
}
......@@ -37,7 +37,7 @@ func ComputeCloseTime(ss internalpb.SegmentStats, kvbase *kv.EtcdKV) error {
if err != nil {
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(ss.SegmentId)), updateData)
kvbase.Save("segment/"+strconv.Itoa(int(ss.SegmentID)), updateData)
//create new segment
newSegID, _ := id.AllocOne()
newSeg := segment.NewSegment(newSegID, seg.CollectionID, seg.CollectionName, "default", seg.ChannelStart, seg.ChannelEnd, currentTime, time.Unix(1<<36-1, 0))
......
......@@ -29,7 +29,7 @@ func TestComputeClosetTime(t *testing.T) {
var news internalpb.SegmentStats
for i := 0; i < 10; i++ {
news = internalpb.SegmentStats{
SegmentId: UniqueID(6875940398055133887),
SegmentID: UniqueID(6875940398055133887),
MemorySize: int64(i * 1000),
}
ComputeCloseTime(news, kvbase)
......
......@@ -347,19 +347,19 @@ func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequ
return response, nil
}
func (s *Master) AllocId(ctx context.Context, request *internalpb.IdRequest) (*internalpb.IdResponse, error) {
func (s *Master) AllocID(ctx context.Context, request *internalpb.IDRequest) (*internalpb.IDResponse, error) {
count := request.GetCount()
ts, err := id.AllocOne()
if err != nil {
return &internalpb.IdResponse{
return &internalpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
}, err
}
response := &internalpb.IdResponse{
response := &internalpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
Id: ts,
ID: ts,
Count: count,
}
......
......@@ -42,7 +42,7 @@ func TestMaster_CreateCollection(t *testing.T) {
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoId: false,
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
Name: "col1_f1",
......@@ -101,9 +101,9 @@ func TestMaster_CreateCollection(t *testing.T) {
req := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqId: 1,
ReqID: 1,
Timestamp: 11,
ProxyId: 1,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err := cli.CreateCollection(ctx, &req)
......@@ -112,10 +112,10 @@ func TestMaster_CreateCollection(t *testing.T) {
collMeta, err := svr.mt.GetCollectionByName(sch.Name)
assert.Nil(t, err)
t.Logf("collection id = %d", collMeta.Id)
t.Logf("collection id = %d", collMeta.ID)
assert.Equal(t, collMeta.CreateTime, uint64(11))
assert.Equal(t, collMeta.Schema.Name, "col1")
assert.Equal(t, collMeta.Schema.AutoId, false)
assert.Equal(t, collMeta.Schema.AutoID, false)
assert.Equal(t, len(collMeta.Schema.Fields), 2)
assert.Equal(t, collMeta.Schema.Fields[0].Name, "col1_f1")
assert.Equal(t, collMeta.Schema.Fields[1].Name, "col1_f2")
......
......@@ -252,7 +252,7 @@ func (s *Master) pulsarLoop() {
var m internalpb.SegmentStats
proto.Unmarshal(msg.Payload(), &m)
fmt.Printf("Received message msgId: %#v -- content: '%d'\n",
msg.ID(), m.SegmentId)
msg.ID(), m.SegmentID)
s.ssChan <- m
consumer.Ack(msg)
case <-ctx.Done():
......
......@@ -60,7 +60,7 @@ func (mt *metaTable) reloadFromKV() error {
if err != nil {
return err
}
mt.tenantID2Meta[tenantMeta.Id] = tenantMeta
mt.tenantID2Meta[tenantMeta.ID] = tenantMeta
}
_, values, err = mt.client.LoadWithPrefix("proxy")
......@@ -74,7 +74,7 @@ func (mt *metaTable) reloadFromKV() error {
if err != nil {
return err
}
mt.proxyID2Meta[proxyMeta.Id] = proxyMeta
mt.proxyID2Meta[proxyMeta.ID] = proxyMeta
}
_, values, err = mt.client.LoadWithPrefix("collection")
......@@ -88,8 +88,8 @@ func (mt *metaTable) reloadFromKV() error {
if err != nil {
return err
}
mt.collID2Meta[collectionMeta.Id] = collectionMeta
mt.collName2ID[collectionMeta.Schema.Name] = collectionMeta.Id
mt.collID2Meta[collectionMeta.ID] = collectionMeta
mt.collName2ID[collectionMeta.Schema.Name] = collectionMeta.ID
}
_, values, err = mt.client.LoadWithPrefix("segment")
......@@ -103,7 +103,7 @@ func (mt *metaTable) reloadFromKV() error {
if err != nil {
return err
}
mt.segID2Meta[segmentMeta.SegmentId] = segmentMeta
mt.segID2Meta[segmentMeta.SegmentID] = segmentMeta
}
return nil
......@@ -115,9 +115,9 @@ func (mt *metaTable) saveCollectionMeta(coll *pb.CollectionMeta) error {
if err != nil {
return err
}
mt.collID2Meta[coll.Id] = *coll
mt.collName2ID[coll.Schema.Name] = coll.Id
return mt.client.Save("/collection/"+strconv.FormatInt(coll.Id, 10), string(collBytes))
mt.collID2Meta[coll.ID] = *coll
mt.collName2ID[coll.Schema.Name] = coll.ID
return mt.client.Save("/collection/"+strconv.FormatInt(coll.ID, 10), string(collBytes))
}
// mt.ddLock.Lock() before call this function
......@@ -127,9 +127,9 @@ func (mt *metaTable) saveSegmentMeta(seg *pb.SegmentMeta) error {
return err
}
mt.segID2Meta[seg.SegmentId] = *seg
mt.segID2Meta[seg.SegmentID] = *seg
return mt.client.Save("/segment/"+strconv.FormatInt(seg.SegmentId, 10), string(segBytes))
return mt.client.Save("/segment/"+strconv.FormatInt(seg.SegmentID, 10), string(segBytes))
}
// mt.ddLock.Lock() before call this function
......@@ -156,7 +156,7 @@ func (mt *metaTable) saveCollectionAndDeleteSegmentsMeta(coll *pb.CollectionMeta
return err
}
kvs["/collection/"+strconv.FormatInt(coll.Id, 10)] = string(collStrs)
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = string(collStrs)
for _, segID := range segIDs {
_, ok := mt.segID2Meta[segID]
......@@ -166,7 +166,7 @@ func (mt *metaTable) saveCollectionAndDeleteSegmentsMeta(coll *pb.CollectionMeta
}
}
mt.collID2Meta[coll.Id] = *coll
mt.collID2Meta[coll.ID] = *coll
return mt.client.MultiSaveAndRemove(kvs, segIDStrs)
}
......@@ -178,18 +178,18 @@ func (mt *metaTable) saveCollectionsAndSegmentsMeta(coll *pb.CollectionMeta, seg
if err != nil {
return err
}
kvs["/collection/"+strconv.FormatInt(coll.Id, 10)] = string(collBytes)
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = string(collBytes)
mt.collID2Meta[coll.Id] = *coll
mt.collName2ID[coll.Schema.Name] = coll.Id
mt.collID2Meta[coll.ID] = *coll
mt.collName2ID[coll.Schema.Name] = coll.ID
segBytes, err := proto.Marshal(seg)
if err != nil {
return err
}
kvs["/segment/"+strconv.FormatInt(seg.SegmentId, 10)] = string(segBytes)
kvs["/segment/"+strconv.FormatInt(seg.SegmentID, 10)] = string(segBytes)
mt.segID2Meta[seg.SegmentId] = *seg
mt.segID2Meta[seg.SegmentID] = *seg
return mt.client.MultiSave(kvs)
}
......@@ -230,7 +230,7 @@ func (mt *metaTable) deleteCollectionsAndSegmentsMeta(collID UniqueID, segIDs []
func (mt *metaTable) AddCollection(coll *pb.CollectionMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
if len(coll.SegmentIds) != 0 {
if len(coll.SegmentIDs) != 0 {
return errors.Errorf("segment should be empty when creating collection")
}
if len(coll.PartitionTags) != 0 {
......@@ -257,7 +257,7 @@ func (mt *metaTable) DeleteCollection(collID UniqueID) error {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
}
err := mt.deleteCollectionsAndSegmentsMeta(collID, collMeta.SegmentIds)
err := mt.deleteCollectionsAndSegmentsMeta(collID, collMeta.SegmentIDs)
if err != nil {
_ = mt.reloadFromKV()
return err
......@@ -344,9 +344,9 @@ func (mt *metaTable) DeletePartition(collID UniqueID, tag string) error {
return nil
}
toDeleteSeg := make([]UniqueID, 0, len(collMeta.SegmentIds))
seg := make([]UniqueID, 0, len(collMeta.SegmentIds))
for _, s := range collMeta.SegmentIds {
toDeleteSeg := make([]UniqueID, 0, len(collMeta.SegmentIDs))
seg := make([]UniqueID, 0, len(collMeta.SegmentIDs))
for _, s := range collMeta.SegmentIDs {
sm, ok := mt.segID2Meta[s]
if !ok {
return errors.Errorf("can't find segment id = %d", s)
......@@ -358,7 +358,7 @@ func (mt *metaTable) DeletePartition(collID UniqueID, tag string) error {
}
}
collMeta.PartitionTags = pt
collMeta.SegmentIds = seg
collMeta.SegmentIDs = seg
err := mt.saveCollectionAndDeleteSegmentsMeta(&collMeta, toDeleteSeg)
if err != nil {
......@@ -371,9 +371,9 @@ func (mt *metaTable) DeletePartition(collID UniqueID, tag string) error {
func (mt *metaTable) AddSegment(seg *pb.SegmentMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
collID := seg.CollectionId
collID := seg.CollectionID
collMeta := mt.collID2Meta[collID]
collMeta.SegmentIds = append(collMeta.SegmentIds, seg.SegmentId)
collMeta.SegmentIDs = append(collMeta.SegmentIDs, seg.SegmentID)
err := mt.saveCollectionsAndSegmentsMeta(&collMeta, seg)
if err != nil {
_ = mt.reloadFromKV()
......@@ -402,14 +402,14 @@ func (mt *metaTable) DeleteSegment(segID UniqueID) error {
return errors.Errorf("can't find segment. id = " + strconv.FormatInt(segID, 10))
}
collMeta, ok := mt.collID2Meta[segMeta.CollectionId]
collMeta, ok := mt.collID2Meta[segMeta.CollectionID]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(segMeta.CollectionId, 10))
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(segMeta.CollectionID, 10))
}
for i := 0; i < len(collMeta.SegmentIds); i++ {
if collMeta.SegmentIds[i] == segID {
collMeta.SegmentIds = append(collMeta.SegmentIds[:i], collMeta.SegmentIds[i+1:]...)
for i := 0; i < len(collMeta.SegmentIDs); i++ {
if collMeta.SegmentIDs[i] == segID {
collMeta.SegmentIDs = append(collMeta.SegmentIDs[:i], collMeta.SegmentIDs[i+1:]...)
}
}
......
......@@ -28,63 +28,63 @@ func TestMetaTable_Collection(t *testing.T) {
defer meta.client.Close()
colMeta := pb.CollectionMeta{
Id: 100,
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []UniqueID{},
SegmentIDs: []UniqueID{},
PartitionTags: []string{},
}
colMeta2 := pb.CollectionMeta{
Id: 50,
ID: 50,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []UniqueID{},
SegmentIDs: []UniqueID{},
PartitionTags: []string{},
}
colMeta3 := pb.CollectionMeta{
Id: 30,
ID: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []UniqueID{},
SegmentIDs: []UniqueID{},
PartitionTags: []string{},
}
colMeta4 := pb.CollectionMeta{
Id: 30,
ID: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []UniqueID{1},
SegmentIDs: []UniqueID{1},
PartitionTags: []string{},
}
colMeta5 := pb.CollectionMeta{
Id: 30,
ID: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []UniqueID{1},
SegmentIDs: []UniqueID{1},
PartitionTags: []string{"1"},
}
segID1 := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
SegmentID: 200,
CollectionID: 100,
PartitionTag: "p1",
}
segID2 := pb.SegmentMeta{
SegmentId: 300,
CollectionId: 100,
SegmentID: 300,
CollectionID: 100,
PartitionTag: "p1",
}
segID3 := pb.SegmentMeta{
SegmentId: 400,
CollectionId: 100,
SegmentID: 400,
CollectionID: 100,
PartitionTag: "p2",
}
err = meta.AddCollection(&colMeta)
......@@ -97,11 +97,11 @@ func TestMetaTable_Collection(t *testing.T) {
assert.NotNil(t, err)
err = meta.AddCollection(&colMeta5)
assert.NotNil(t, err)
hasCollection := meta.HasCollection(colMeta.Id)
hasCollection := meta.HasCollection(colMeta.ID)
assert.True(t, hasCollection)
err = meta.AddPartition(colMeta.Id, "p1")
err = meta.AddPartition(colMeta.ID, "p1")
assert.Nil(t, err)
err = meta.AddPartition(colMeta.Id, "p2")
err = meta.AddPartition(colMeta.ID, "p2")
assert.Nil(t, err)
err = meta.AddSegment(&segID1)
assert.Nil(t, err)
......@@ -111,16 +111,16 @@ func TestMetaTable_Collection(t *testing.T) {
assert.Nil(t, err)
getColMeta, err := meta.GetCollectionByName(colMeta.Schema.Name)
assert.Nil(t, err)
assert.Equal(t, 3, len(getColMeta.SegmentIds))
err = meta.DeleteCollection(colMeta.Id)
assert.Equal(t, 3, len(getColMeta.SegmentIDs))
err = meta.DeleteCollection(colMeta.ID)
assert.Nil(t, err)
hasCollection = meta.HasCollection(colMeta.Id)
hasCollection = meta.HasCollection(colMeta.ID)
assert.False(t, hasCollection)
_, err = meta.GetSegmentByID(segID1.SegmentId)
_, err = meta.GetSegmentByID(segID1.SegmentID)
assert.NotNil(t, err)
_, err = meta.GetSegmentByID(segID2.SegmentId)
_, err = meta.GetSegmentByID(segID2.SegmentID)
assert.NotNil(t, err)
_, err = meta.GetSegmentByID(segID3.SegmentId)
_, err = meta.GetSegmentByID(segID3.SegmentID)
assert.NotNil(t, err)
err = meta.reloadFromKV()
......@@ -132,7 +132,7 @@ func TestMetaTable_Collection(t *testing.T) {
assert.Equal(t, 1, len(meta.collID2Meta))
assert.Equal(t, 0, len(meta.segID2Meta))
err = meta.DeleteCollection(colMeta3.Id)
err = meta.DeleteCollection(colMeta3.ID)
assert.Nil(t, err)
}
......@@ -151,34 +151,34 @@ func TestMetaTable_DeletePartition(t *testing.T) {
defer meta.client.Close()
colMeta := pb.CollectionMeta{
Id: 100,
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []UniqueID{},
SegmentIDs: []UniqueID{},
PartitionTags: []string{},
}
segID1 := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
SegmentID: 200,
CollectionID: 100,
PartitionTag: "p1",
}
segID2 := pb.SegmentMeta{
SegmentId: 300,
CollectionId: 100,
SegmentID: 300,
CollectionID: 100,
PartitionTag: "p1",
}
segID3 := pb.SegmentMeta{
SegmentId: 400,
CollectionId: 100,
SegmentID: 400,
CollectionID: 100,
PartitionTag: "p2",
}
err = meta.AddCollection(&colMeta)
assert.Nil(t, err)
err = meta.AddPartition(colMeta.Id, "p1")
err = meta.AddPartition(colMeta.ID, "p1")
assert.Nil(t, err)
err = meta.AddPartition(colMeta.Id, "p2")
err = meta.AddPartition(colMeta.ID, "p2")
assert.Nil(t, err)
err = meta.AddSegment(&segID1)
assert.Nil(t, err)
......@@ -189,22 +189,22 @@ func TestMetaTable_DeletePartition(t *testing.T) {
afterCollMeta, err := meta.GetCollectionByName("coll1")
assert.Nil(t, err)
assert.Equal(t, 2, len(afterCollMeta.PartitionTags))
assert.Equal(t, 3, len(afterCollMeta.SegmentIds))
assert.Equal(t, 3, len(afterCollMeta.SegmentIDs))
err = meta.DeletePartition(100, "p1")
assert.Nil(t, err)
afterCollMeta, err = meta.GetCollectionByName("coll1")
assert.Nil(t, err)
assert.Equal(t, 1, len(afterCollMeta.PartitionTags))
assert.Equal(t, 1, len(afterCollMeta.SegmentIds))
hasPartition := meta.HasPartition(colMeta.Id, "p1")
assert.Equal(t, 1, len(afterCollMeta.SegmentIDs))
hasPartition := meta.HasPartition(colMeta.ID, "p1")
assert.False(t, hasPartition)
hasPartition = meta.HasPartition(colMeta.Id, "p2")
hasPartition = meta.HasPartition(colMeta.ID, "p2")
assert.True(t, hasPartition)
_, err = meta.GetSegmentByID(segID1.SegmentId)
_, err = meta.GetSegmentByID(segID1.SegmentID)
assert.NotNil(t, err)
_, err = meta.GetSegmentByID(segID2.SegmentId)
_, err = meta.GetSegmentByID(segID2.SegmentID)
assert.NotNil(t, err)
_, err = meta.GetSegmentByID(segID3.SegmentId)
_, err = meta.GetSegmentByID(segID3.SegmentID)
assert.Nil(t, err)
afterCollMeta, err = meta.GetCollectionByName("coll1")
assert.Nil(t, err)
......@@ -239,42 +239,42 @@ func TestMetaTable_Segment(t *testing.T) {
assert.Nil(t, err)
colMeta := pb.CollectionMeta{
Id: 100,
ID: 100,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []UniqueID{},
SegmentIDs: []UniqueID{},
PartitionTags: []string{},
}
segMeta := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
SegmentID: 200,
CollectionID: 100,
PartitionTag: "p1",
}
err = meta.AddCollection(&colMeta)
assert.Nil(t, err)
err = meta.AddPartition(colMeta.Id, "p1")
err = meta.AddPartition(colMeta.ID, "p1")
assert.Nil(t, err)
err = meta.AddSegment(&segMeta)
assert.Nil(t, err)
getSegMeta, err := meta.GetSegmentByID(segMeta.SegmentId)
getSegMeta, err := meta.GetSegmentByID(segMeta.SegmentID)
assert.Nil(t, err)
assert.Equal(t, &segMeta, getSegMeta)
err = meta.CloseSegment(segMeta.SegmentId, Timestamp(11), 111)
err = meta.CloseSegment(segMeta.SegmentID, Timestamp(11), 111)
assert.Nil(t, err)
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentId)
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentID)
assert.Nil(t, err)
assert.Equal(t, getSegMeta.NumRows, int64(111))
assert.Equal(t, getSegMeta.CloseTime, uint64(11))
err = meta.DeleteSegment(segMeta.SegmentId)
err = meta.DeleteSegment(segMeta.SegmentID)
assert.Nil(t, err)
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentId)
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentID)
assert.Nil(t, getSegMeta)
assert.NotNil(t, err)
getColMeta, err := meta.GetCollectionByName(colMeta.Schema.Name)
assert.Nil(t, err)
assert.Equal(t, 0, len(getColMeta.SegmentIds))
assert.Equal(t, 0, len(getColMeta.SegmentIDs))
meta.tenantID2Meta = make(map[UniqueID]pb.TenantMeta)
meta.proxyID2Meta = make(map[UniqueID]pb.ProxyMeta)
......
......@@ -90,19 +90,19 @@ func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequ
return response, nil
}
func (s *Master) AllocId(ctx context.Context, request *internalpb.IdRequest) (*internalpb.IdResponse, error) {
func (s *Master) AllocID(ctx context.Context, request *internalpb.IDRequest) (*internalpb.IDResponse, error) {
count := request.GetCount()
ts, err := id.AllocOne()
if err != nil {
return &internalpb.IdResponse{
return &internalpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
}, err
}
response := &internalpb.IdResponse{
response := &internalpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
Id: ts,
ID: ts,
Count: count,
}
......
......@@ -76,7 +76,7 @@ func (t *createPartitionTask) Execute() error {
return err
}
collectionID := collectionMeta.Id
collectionID := collectionMeta.ID
err = (*t.kvBase).Save(partitionMetaPrefix+strconv.FormatInt(collectionID, 10), string(collectionJSON))
if err != nil {
return err
......@@ -113,7 +113,7 @@ func (t *dropPartitionTask) Execute() error {
return err
}
err = t.mt.DeletePartition(collectionMeta.Id, partitionName.Tag)
err = t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
......@@ -123,7 +123,7 @@ func (t *dropPartitionTask) Execute() error {
return err
}
collectionID := collectionMeta.Id
collectionID := collectionMeta.ID
err = (*t.kvBase).Save(partitionMetaPrefix+strconv.FormatInt(collectionID, 10), string(collectionJSON))
if err != nil {
return err
......@@ -160,7 +160,7 @@ func (t *hasPartitionTask) Execute() error {
return err
}
t.hasPartition = t.mt.HasPartition(collectionMeta.Id, partitionName.Tag)
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName.Tag)
return nil
}
......
......@@ -64,7 +64,7 @@ func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier
}
timeTickResult := internalPb.TimeTickMsg{
MsgType: internalPb.MsgType_kTimeTick,
PeerId: 0,
PeerID: 0,
Timestamp: timetick,
}
timeTickMsg := &ms.TimeTickMsg{
......
......@@ -72,16 +72,16 @@ func (ttBarrier *softTimeTickBarrier) Start() error {
if len(ttmsgs.Msgs) > 0 {
for _, timetickmsg := range ttmsgs.Msgs {
ttmsg := (*timetickmsg).(*ms.TimeTickMsg)
oldT, ok := ttBarrier.peer2LastTt[ttmsg.PeerId]
log.Printf("[softTimeTickBarrier] peer(%d)=%d\n", ttmsg.PeerId, ttmsg.Timestamp)
oldT, ok := ttBarrier.peer2LastTt[ttmsg.PeerID]
log.Printf("[softTimeTickBarrier] peer(%d)=%d\n", ttmsg.PeerID, ttmsg.Timestamp)
if !ok {
log.Printf("[softTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.PeerId)
log.Printf("[softTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.PeerID)
continue
}
if ttmsg.Timestamp > oldT {
ttBarrier.peer2LastTt[ttmsg.PeerId] = ttmsg.Timestamp
ttBarrier.peer2LastTt[ttmsg.PeerID] = ttmsg.Timestamp
// get a legal Timestamp
ts := ttBarrier.minTimestamp()
......@@ -189,20 +189,20 @@ func (ttBarrier *hardTimeTickBarrier) Start() error {
// Suppose ttmsg.Timestamp from stream is always larger than the previous one,
// that `ttmsg.Timestamp > oldT`
ttmsg := (*timetickmsg).(*ms.TimeTickMsg)
log.Printf("[hardTimeTickBarrier] peer(%d)=%d\n", ttmsg.PeerId, ttmsg.Timestamp)
log.Printf("[hardTimeTickBarrier] peer(%d)=%d\n", ttmsg.PeerID, ttmsg.Timestamp)
oldT, ok := ttBarrier.peer2Tt[ttmsg.PeerId]
oldT, ok := ttBarrier.peer2Tt[ttmsg.PeerID]
if !ok {
log.Printf("[hardTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.PeerId)
log.Printf("[hardTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.PeerID)
continue
}
if oldT > state {
log.Printf("[hardTimeTickBarrier] Warning: peer(%d) timestamp(%d) ahead\n",
ttmsg.PeerId, ttmsg.Timestamp)
ttmsg.PeerID, ttmsg.Timestamp)
}
ttBarrier.peer2Tt[ttmsg.PeerId] = ttmsg.Timestamp
ttBarrier.peer2Tt[ttmsg.PeerID] = ttmsg.Timestamp
newState := ttBarrier.minTimestamp()
if newState > state {
......
......@@ -21,7 +21,7 @@ func getTtMsg(msgType internalPb.MsgType, peerID UniqueID, timeStamp uint64) *ms
}
timeTickResult := internalPb.TimeTickMsg{
MsgType: internalPb.MsgType_kTimeTick,
PeerId: peerID,
PeerID: peerID,
Timestamp: timeStamp,
}
timeTickMsg := &ms.TimeTickMsg{
......
......@@ -395,7 +395,7 @@ func insertRepackFunc(tsMsgs []*TsMsg, hashKeys [][]int32) (map[int32]*MsgPack,
keys := hashKeys[i]
timestampLen := len(insertRequest.Timestamps)
rowIDLen := len(insertRequest.RowIds)
rowIDLen := len(insertRequest.RowIDs)
rowDataLen := len(insertRequest.RowData)
keysLen := len(keys)
......@@ -411,14 +411,14 @@ func insertRepackFunc(tsMsgs []*TsMsg, hashKeys [][]int32) (map[int32]*MsgPack,
sliceRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqId: insertRequest.ReqId,
ReqID: insertRequest.ReqID,
CollectionName: insertRequest.CollectionName,
PartitionTag: insertRequest.PartitionTag,
SegmentId: insertRequest.SegmentId,
ChannelId: insertRequest.ChannelId,
ProxyId: insertRequest.ProxyId,
SegmentID: insertRequest.SegmentID,
ChannelID: insertRequest.ChannelID,
ProxyID: insertRequest.ProxyID,
Timestamps: []uint64{insertRequest.Timestamps[index]},
RowIds: []int64{insertRequest.RowIds[index]},
RowIDs: []int64{insertRequest.RowIDs[index]},
RowData: []*commonPb.Blob{insertRequest.RowData[index]},
}
......@@ -458,10 +458,10 @@ func deleteRepackFunc(tsMsgs []*TsMsg, hashKeys [][]int32) (map[int32]*MsgPack,
sliceRequest := internalPb.DeleteRequest{
MsgType: internalPb.MsgType_kDelete,
ReqId: deleteRequest.ReqId,
ReqID: deleteRequest.ReqID,
CollectionName: deleteRequest.CollectionName,
ChannelId: deleteRequest.ChannelId,
ProxyId: deleteRequest.ProxyId,
ChannelID: deleteRequest.ChannelID,
ProxyID: deleteRequest.ProxyID,
Timestamps: []uint64{deleteRequest.Timestamps[index]},
PrimaryKeys: []int64{deleteRequest.PrimaryKeys[index]},
}
......
......@@ -37,14 +37,14 @@ func getTsMsg(msgType MsgType, reqID UniqueID, hashValue int32) *TsMsg {
case internalPb.MsgType_kInsert:
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqId: reqID,
ReqID: reqID,
CollectionName: "Collection",
PartitionTag: "Partition",
SegmentId: 1,
ChannelId: 1,
ProxyId: 1,
SegmentID: 1,
ChannelID: 1,
ProxyID: 1,
Timestamps: []Timestamp{1},
RowIds: []int64{1},
RowIDs: []int64{1},
RowData: []*commonPb.Blob{{}},
}
insertMsg := &InsertMsg{
......@@ -55,10 +55,10 @@ func getTsMsg(msgType MsgType, reqID UniqueID, hashValue int32) *TsMsg {
case internalPb.MsgType_kDelete:
deleteRequest := internalPb.DeleteRequest{
MsgType: internalPb.MsgType_kDelete,
ReqId: reqID,
ReqID: reqID,
CollectionName: "Collection",
ChannelId: 1,
ProxyId: 1,
ChannelID: 1,
ProxyID: 1,
Timestamps: []Timestamp{1},
PrimaryKeys: []IntPrimaryKey{1},
}
......@@ -70,10 +70,10 @@ func getTsMsg(msgType MsgType, reqID UniqueID, hashValue int32) *TsMsg {
case internalPb.MsgType_kSearch:
searchRequest := internalPb.SearchRequest{
MsgType: internalPb.MsgType_kSearch,
ReqId: reqID,
ProxyId: 1,
ReqID: reqID,
ProxyID: 1,
Timestamp: 1,
ResultChannelId: 1,
ResultChannelID: 1,
}
searchMsg := &SearchMsg{
BaseMsg: baseMsg,
......@@ -84,11 +84,11 @@ func getTsMsg(msgType MsgType, reqID UniqueID, hashValue int32) *TsMsg {
searchResult := internalPb.SearchResult{
MsgType: internalPb.MsgType_kSearchResult,
Status: &commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS},
ReqId: reqID,
ProxyId: 1,
QueryNodeId: 1,
ReqID: reqID,
ProxyID: 1,
QueryNodeID: 1,
Timestamp: 1,
ResultChannelId: 1,
ResultChannelID: 1,
}
searchResultMsg := &SearchResultMsg{
BaseMsg: baseMsg,
......@@ -98,7 +98,7 @@ func getTsMsg(msgType MsgType, reqID UniqueID, hashValue int32) *TsMsg {
case internalPb.MsgType_kTimeTick:
timeTickResult := internalPb.TimeTickMsg{
MsgType: internalPb.MsgType_kTimeTick,
PeerId: reqID,
PeerID: reqID,
Timestamp: 1,
}
timeTickMsg := &TimeTickMsg{
......@@ -119,7 +119,7 @@ func getTimeTickMsg(reqID UniqueID, hashValue int32, time uint64) *TsMsg {
}
timeTickResult := internalPb.TimeTickMsg{
MsgType: internalPb.MsgType_kTimeTick,
PeerId: reqID,
PeerID: reqID,
Timestamp: time,
}
timeTickMsg := &TimeTickMsg{
......@@ -357,14 +357,14 @@ func TestStream_PulsarMsgStream_InsertRepackFunc(t *testing.T) {
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqId: 1,
ReqID: 1,
CollectionName: "Collection",
PartitionTag: "Partition",
SegmentId: 1,
ChannelId: 1,
ProxyId: 1,
SegmentID: 1,
ChannelID: 1,
ProxyID: 1,
Timestamps: []Timestamp{1, 1},
RowIds: []int64{1, 3},
RowIDs: []int64{1, 3},
RowData: []*commonPb.Blob{{}, {}},
}
insertMsg := &InsertMsg{
......@@ -410,10 +410,10 @@ func TestStream_PulsarMsgStream_DeleteRepackFunc(t *testing.T) {
deleteRequest := internalPb.DeleteRequest{
MsgType: internalPb.MsgType_kDelete,
ReqId: 1,
ReqID: 1,
CollectionName: "Collection",
ChannelId: 1,
ProxyId: 1,
ChannelID: 1,
ProxyID: 1,
Timestamps: []Timestamp{1, 1},
PrimaryKeys: []int64{1, 3},
}
......@@ -460,7 +460,7 @@ func TestStream_PulsarMsgStream_DefaultRepackFunc(t *testing.T) {
timeTickRequest := internalPb.TimeTickMsg{
MsgType: internalPb.MsgType_kTimeTick,
PeerId: int64(1),
PeerID: int64(1),
Timestamp: uint64(1),
}
timeTick := &TimeTickMsg{
......
......@@ -51,7 +51,7 @@ func newRepackFunc(tsMsgs []*TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, err
keys := hashKeys[i]
timestampLen := len(insertRequest.Timestamps)
rowIDLen := len(insertRequest.RowIds)
rowIDLen := len(insertRequest.RowIDs)
rowDataLen := len(insertRequest.RowData)
keysLen := len(keys)
......@@ -67,14 +67,14 @@ func newRepackFunc(tsMsgs []*TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, err
sliceRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqId: insertRequest.ReqId,
ReqID: insertRequest.ReqID,
CollectionName: insertRequest.CollectionName,
PartitionTag: insertRequest.PartitionTag,
SegmentId: insertRequest.SegmentId,
ChannelId: insertRequest.ChannelId,
ProxyId: insertRequest.ProxyId,
SegmentID: insertRequest.SegmentID,
ChannelID: insertRequest.ChannelID,
ProxyID: insertRequest.ProxyID,
Timestamps: []uint64{insertRequest.Timestamps[index]},
RowIds: []int64{insertRequest.RowIds[index]},
RowIDs: []int64{insertRequest.RowIDs[index]},
RowData: []*commonPb.Blob{insertRequest.RowData[index]},
}
......@@ -97,14 +97,14 @@ func getMsg(reqID UniqueID, hashValue int32) *TsMsg {
}
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqId: reqID,
ReqID: reqID,
CollectionName: "Collection",
PartitionTag: "Partition",
SegmentId: 1,
ChannelId: 1,
ProxyId: 1,
SegmentID: 1,
ChannelID: 1,
ProxyID: 1,
Timestamps: []Timestamp{1},
RowIds: []int64{1},
RowIDs: []int64{1},
RowData: []*commonPb.Blob{{}},
}
insertMsg := InsertMsg{
......
......@@ -6,32 +6,32 @@ import "common.proto";
import "schema.proto";
message TenantMeta {
int64 id = 1;
int64 ID = 1;
int64 num_query_nodes = 2;
repeated string insert_channel_ids = 3;
string query_channel_id = 4;
repeated string insert_channelIDs = 3;
string query_channelID = 4;
}
message ProxyMeta {
int64 id = 1;
int64 ID = 1;
common.Address address = 2;
repeated string result_channel_ids = 3;
repeated string result_channelIDs = 3;
}
message CollectionMeta {
int64 id=1;
int64 ID=1;
schema.CollectionSchema schema=2;
uint64 create_time=3;
repeated int64 segment_ids=4;
repeated int64 segmentIDs=4;
repeated string partition_tags=5;
}
message SegmentMeta {
int64 segment_id=1;
int64 collection_id =2;
int64 segmentID=1;
int64 collectionID =2;
string partition_tag=3;
int32 channel_start=4;
int32 channel_end=5;
......
......@@ -23,10 +23,10 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type TenantMeta struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
NumQueryNodes int64 `protobuf:"varint,2,opt,name=num_query_nodes,json=numQueryNodes,proto3" json:"num_query_nodes,omitempty"`
InsertChannelIds []string `protobuf:"bytes,3,rep,name=insert_channel_ids,json=insertChannelIds,proto3" json:"insert_channel_ids,omitempty"`
QueryChannelId string `protobuf:"bytes,4,opt,name=query_channel_id,json=queryChannelId,proto3" json:"query_channel_id,omitempty"`
InsertChannelIDs []string `protobuf:"bytes,3,rep,name=insert_channelIDs,json=insertChannelIDs,proto3" json:"insert_channelIDs,omitempty"`
QueryChannelID string `protobuf:"bytes,4,opt,name=query_channelID,json=queryChannelID,proto3" json:"query_channelID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
......@@ -57,9 +57,9 @@ func (m *TenantMeta) XXX_DiscardUnknown() {
var xxx_messageInfo_TenantMeta proto.InternalMessageInfo
func (m *TenantMeta) GetId() int64 {
func (m *TenantMeta) GetID() int64 {
if m != nil {
return m.Id
return m.ID
}
return 0
}
......@@ -71,24 +71,24 @@ func (m *TenantMeta) GetNumQueryNodes() int64 {
return 0
}
func (m *TenantMeta) GetInsertChannelIds() []string {
func (m *TenantMeta) GetInsertChannelIDs() []string {
if m != nil {
return m.InsertChannelIds
return m.InsertChannelIDs
}
return nil
}
func (m *TenantMeta) GetQueryChannelId() string {
func (m *TenantMeta) GetQueryChannelID() string {
if m != nil {
return m.QueryChannelId
return m.QueryChannelID
}
return ""
}
type ProxyMeta struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Address *commonpb.Address `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
ResultChannelIds []string `protobuf:"bytes,3,rep,name=result_channel_ids,json=resultChannelIds,proto3" json:"result_channel_ids,omitempty"`
ResultChannelIDs []string `protobuf:"bytes,3,rep,name=result_channelIDs,json=resultChannelIDs,proto3" json:"result_channelIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
......@@ -119,9 +119,9 @@ func (m *ProxyMeta) XXX_DiscardUnknown() {
var xxx_messageInfo_ProxyMeta proto.InternalMessageInfo
func (m *ProxyMeta) GetId() int64 {
func (m *ProxyMeta) GetID() int64 {
if m != nil {
return m.Id
return m.ID
}
return 0
}
......@@ -133,18 +133,18 @@ func (m *ProxyMeta) GetAddress() *commonpb.Address {
return nil
}
func (m *ProxyMeta) GetResultChannelIds() []string {
func (m *ProxyMeta) GetResultChannelIDs() []string {
if m != nil {
return m.ResultChannelIds
return m.ResultChannelIDs
}
return nil
}
type CollectionMeta struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
CreateTime uint64 `protobuf:"varint,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
SegmentIds []int64 `protobuf:"varint,4,rep,packed,name=segment_ids,json=segmentIds,proto3" json:"segment_ids,omitempty"`
SegmentIDs []int64 `protobuf:"varint,4,rep,packed,name=segmentIDs,proto3" json:"segmentIDs,omitempty"`
PartitionTags []string `protobuf:"bytes,5,rep,name=partition_tags,json=partitionTags,proto3" json:"partition_tags,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
......@@ -176,9 +176,9 @@ func (m *CollectionMeta) XXX_DiscardUnknown() {
var xxx_messageInfo_CollectionMeta proto.InternalMessageInfo
func (m *CollectionMeta) GetId() int64 {
func (m *CollectionMeta) GetID() int64 {
if m != nil {
return m.Id
return m.ID
}
return 0
}
......@@ -197,9 +197,9 @@ func (m *CollectionMeta) GetCreateTime() uint64 {
return 0
}
func (m *CollectionMeta) GetSegmentIds() []int64 {
func (m *CollectionMeta) GetSegmentIDs() []int64 {
if m != nil {
return m.SegmentIds
return m.SegmentIDs
}
return nil
}
......@@ -212,8 +212,8 @@ func (m *CollectionMeta) GetPartitionTags() []string {
}
type SegmentMeta struct {
SegmentId int64 `protobuf:"varint,1,opt,name=segment_id,json=segmentId,proto3" json:"segment_id,omitempty"`
CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionTag string `protobuf:"bytes,3,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
ChannelStart int32 `protobuf:"varint,4,opt,name=channel_start,json=channelStart,proto3" json:"channel_start,omitempty"`
ChannelEnd int32 `protobuf:"varint,5,opt,name=channel_end,json=channelEnd,proto3" json:"channel_end,omitempty"`
......@@ -250,16 +250,16 @@ func (m *SegmentMeta) XXX_DiscardUnknown() {
var xxx_messageInfo_SegmentMeta proto.InternalMessageInfo
func (m *SegmentMeta) GetSegmentId() int64 {
func (m *SegmentMeta) GetSegmentID() int64 {
if m != nil {
return m.SegmentId
return m.SegmentID
}
return 0
}
func (m *SegmentMeta) GetCollectionId() int64 {
func (m *SegmentMeta) GetCollectionID() int64 {
if m != nil {
return m.CollectionId
return m.CollectionID
}
return 0
}
......@@ -316,37 +316,37 @@ func init() {
func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) }
var fileDescriptor_975d306d62b73e88 = []byte{
// 505 bytes of a gzipped FileDescriptorProto
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0x4d, 0x8f, 0xd3, 0x30,
0x10, 0x86, 0x95, 0xa6, 0x5f, 0x99, 0x7e, 0xec, 0x92, 0x53, 0x58, 0x58, 0x51, 0x15, 0x2d, 0xca,
0x01, 0x5a, 0x09, 0x24, 0x6e, 0x20, 0x60, 0xc5, 0x81, 0x03, 0x08, 0xd2, 0x9e, 0xb8, 0x44, 0x6e,
0x3c, 0x6a, 0x2d, 0xc5, 0x76, 0xb1, 0x1d, 0x96, 0xdd, 0x1b, 0xbf, 0x81, 0x2b, 0x3f, 0x88, 0x9f,
0x85, 0xfc, 0x41, 0x43, 0xa5, 0x1e, 0xf3, 0xcc, 0xeb, 0xf1, 0x3b, 0xef, 0x38, 0x70, 0x86, 0xa6,
0xa2, 0x25, 0x47, 0x43, 0x16, 0x7b, 0x25, 0x8d, 0x4c, 0xef, 0x71, 0x56, 0x7f, 0x6f, 0xb4, 0xff,
0x5a, 0xd8, 0xea, 0xc5, 0xb8, 0x92, 0x9c, 0x4b, 0xe1, 0xd1, 0xc5, 0x58, 0x57, 0x3b, 0xe4, 0x41,
0x3e, 0xff, 0x1d, 0x01, 0xac, 0x51, 0x10, 0x61, 0x3e, 0xa2, 0x21, 0xe9, 0x14, 0x3a, 0x8c, 0x66,
0xd1, 0x2c, 0xca, 0xe3, 0xa2, 0xc3, 0x68, 0xfa, 0x04, 0xce, 0x44, 0xc3, 0xcb, 0x6f, 0x0d, 0xaa,
0xdb, 0x52, 0x48, 0x8a, 0x3a, 0xeb, 0xb8, 0xe2, 0x44, 0x34, 0xfc, 0x8b, 0xa5, 0x9f, 0x2c, 0x4c,
0x9f, 0x42, 0xca, 0x84, 0x46, 0x65, 0xca, 0x6a, 0x47, 0x84, 0xc0, 0xba, 0x64, 0x54, 0x67, 0xf1,
0x2c, 0xce, 0x93, 0xe2, 0xdc, 0x57, 0xae, 0x7d, 0xe1, 0x03, 0xd5, 0x69, 0x0e, 0xe7, 0xbe, 0x63,
0x2b, 0xce, 0xba, 0xb3, 0x28, 0x4f, 0x8a, 0xa9, 0xe3, 0x07, 0xe9, 0xfc, 0x67, 0x04, 0xc9, 0x67,
0x25, 0x7f, 0xdc, 0x9e, 0x74, 0xf7, 0x12, 0x06, 0x84, 0x52, 0x85, 0xda, 0xbb, 0x1a, 0x3d, 0x7f,
0xb8, 0x38, 0x9a, 0x3e, 0xcc, 0xfd, 0xd6, 0x6b, 0x8a, 0x7f, 0x62, 0xeb, 0x56, 0xa1, 0x6e, 0xea,
0x93, 0x6e, 0x7d, 0xa5, 0x75, 0x3b, 0xff, 0x13, 0xc1, 0xf4, 0x5a, 0xd6, 0x35, 0x56, 0x86, 0x49,
0x71, 0xd2, 0xc8, 0x2b, 0xe8, 0xfb, 0x54, 0x83, 0x8f, 0xab, 0x63, 0x1f, 0x21, 0xf1, 0xb6, 0xc9,
0xca, 0x81, 0x22, 0x1c, 0x4a, 0x1f, 0xc1, 0xa8, 0x52, 0x48, 0x0c, 0x96, 0x86, 0x71, 0xcc, 0xe2,
0x59, 0x94, 0x77, 0x0b, 0xf0, 0x68, 0xcd, 0x38, 0x5a, 0x81, 0xc6, 0x2d, 0x47, 0x61, 0x9c, 0xd3,
0xee, 0x2c, 0xce, 0xe3, 0x02, 0x02, 0xb2, 0x89, 0x5e, 0xc1, 0x74, 0x4f, 0x94, 0x61, 0xb6, 0x79,
0x69, 0xc8, 0x56, 0x67, 0x3d, 0x37, 0xcd, 0xe4, 0x40, 0xd7, 0x64, 0xab, 0xe7, 0xbf, 0x3a, 0x30,
0x5a, 0xf9, 0x53, 0x6e, 0x8e, 0x4b, 0x80, 0xb6, 0x6f, 0x98, 0x27, 0x39, 0xb4, 0x4d, 0x1f, 0xc3,
0xa4, 0x3a, 0x78, 0xb6, 0x0a, 0xbf, 0xfb, 0x71, 0x0b, 0xbd, 0xe8, 0xe8, 0x6a, 0x67, 0x3f, 0x29,
0xc6, 0xff, 0xdf, 0xec, 0x3a, 0x85, 0xa8, 0xb5, 0x21, 0xca, 0xb8, 0x75, 0xf7, 0x8a, 0x71, 0x80,
0x2b, 0xcb, 0x5c, 0x0c, 0x41, 0x84, 0x82, 0x66, 0x3d, 0x27, 0x81, 0x80, 0xde, 0x0b, 0x9a, 0x3e,
0x80, 0x44, 0xee, 0x51, 0xf8, 0x94, 0xfa, 0x2e, 0xa5, 0xa1, 0x05, 0x2e, 0xa3, 0x4b, 0x80, 0xaa,
0x96, 0x3a, 0x64, 0x38, 0x70, 0xd5, 0xc4, 0x11, 0x57, 0xbe, 0x0f, 0x43, 0xfb, 0x92, 0x95, 0xbc,
0xd1, 0xd9, 0xd0, 0x8d, 0x31, 0x10, 0x0d, 0x2f, 0xe4, 0x8d, 0x7e, 0xf7, 0xe6, 0xeb, 0xeb, 0x2d,
0x33, 0xbb, 0x66, 0x63, 0x1f, 0xcc, 0xf2, 0x8e, 0xd5, 0x35, 0xbb, 0x33, 0x58, 0xed, 0x96, 0x7e,
0x89, 0xcf, 0x28, 0xd3, 0x46, 0xb1, 0x4d, 0x63, 0x90, 0x2e, 0x99, 0x30, 0xa8, 0x04, 0xa9, 0x97,
0x6e, 0xb3, 0x4b, 0xfb, 0x7f, 0xed, 0x37, 0x9b, 0xbe, 0xfb, 0x7a, 0xf1, 0x37, 0x00, 0x00, 0xff,
0xff, 0xfe, 0x37, 0xae, 0xbe, 0x8e, 0x03, 0x00, 0x00,
0x10, 0x86, 0x95, 0xa6, 0x5f, 0x99, 0x7e, 0xb1, 0x39, 0x85, 0x65, 0x81, 0xaa, 0x68, 0xa1, 0x12,
0xa2, 0x95, 0x40, 0xe2, 0x06, 0x02, 0xb6, 0x1c, 0x7a, 0x00, 0x41, 0xda, 0x13, 0x97, 0xc8, 0x4d,
0x46, 0xad, 0xa5, 0xd8, 0x2e, 0xb6, 0xc3, 0xb2, 0x7b, 0xe2, 0x2f, 0x70, 0xe5, 0x0f, 0xf1, 0xb7,
0x90, 0x3f, 0x48, 0xb7, 0x52, 0x8f, 0x79, 0xe6, 0xf5, 0xf8, 0x9d, 0x77, 0x1c, 0x18, 0xa1, 0xce,
0x8b, 0x8c, 0xa1, 0x26, 0xb3, 0xbd, 0x14, 0x5a, 0xc4, 0x67, 0x8c, 0x96, 0x3f, 0x2a, 0xe5, 0xbe,
0x66, 0xa6, 0x7a, 0xde, 0xcf, 0x05, 0x63, 0x82, 0x3b, 0x74, 0xde, 0x57, 0xf9, 0x0e, 0x99, 0x97,
0x4f, 0xfe, 0x04, 0x00, 0x6b, 0xe4, 0x84, 0xeb, 0x4f, 0xa8, 0x49, 0x3c, 0x84, 0xc6, 0x72, 0x91,
0x04, 0xe3, 0x60, 0x1a, 0xa6, 0x8d, 0xe5, 0x22, 0x7e, 0x0a, 0x23, 0x5e, 0xb1, 0xec, 0x7b, 0x85,
0xf2, 0x26, 0xe3, 0xa2, 0x40, 0x95, 0x34, 0x6c, 0x71, 0xc0, 0x2b, 0xf6, 0xd5, 0xd0, 0xcf, 0x06,
0xc6, 0xcf, 0xe1, 0x8c, 0x72, 0x85, 0x52, 0x67, 0xf9, 0x8e, 0x70, 0x8e, 0xe5, 0x72, 0xa1, 0x92,
0x70, 0x1c, 0x4e, 0xa3, 0xf4, 0x9e, 0x2b, 0x5c, 0xd5, 0x3c, 0x7e, 0x06, 0x23, 0xd7, 0xb0, 0xd6,
0x26, 0xcd, 0x71, 0x30, 0x8d, 0xd2, 0xa1, 0xc5, 0xb5, 0x72, 0xf2, 0x2b, 0x80, 0xe8, 0x8b, 0x14,
0x3f, 0x6f, 0x4e, 0x7a, 0x7b, 0x0d, 0x1d, 0x52, 0x14, 0x12, 0x95, 0xf3, 0xd4, 0x7b, 0x79, 0x31,
0x3b, 0x9a, 0xdd, 0x4f, 0xfd, 0xde, 0x69, 0xd2, 0xff, 0x62, 0xe3, 0x55, 0xa2, 0xaa, 0xca, 0x53,
0x5e, 0x5d, 0xe1, 0xe0, 0x75, 0xf2, 0x37, 0x80, 0xe1, 0x95, 0x28, 0x4b, 0xcc, 0x35, 0x15, 0xfc,
0xa4, 0x8f, 0x37, 0xd0, 0x76, 0x91, 0x7a, 0x1b, 0x97, 0xc7, 0x36, 0x7c, 0xdc, 0x87, 0x26, 0x2b,
0x0b, 0x52, 0x7f, 0x28, 0x7e, 0x0c, 0xbd, 0x5c, 0x22, 0xd1, 0x98, 0x69, 0xca, 0x30, 0x09, 0xc7,
0xc1, 0xb4, 0x99, 0x82, 0x43, 0x6b, 0xca, 0x30, 0x7e, 0x04, 0xa0, 0x70, 0xcb, 0x90, 0x6b, 0x63,
0xb4, 0x39, 0x0e, 0xa7, 0x61, 0x7a, 0x87, 0xc4, 0x97, 0x30, 0xdc, 0x13, 0xa9, 0xa9, 0xe9, 0x9d,
0x69, 0xb2, 0x55, 0x49, 0xcb, 0x0e, 0x33, 0xa8, 0xe9, 0x9a, 0x6c, 0xd5, 0xe4, 0x77, 0x03, 0x7a,
0x2b, 0x77, 0xca, 0x8e, 0x71, 0x01, 0x51, 0xdd, 0xc4, 0x4f, 0x73, 0x00, 0xf1, 0x04, 0xfa, 0x79,
0xed, 0x78, 0xb9, 0xf0, 0x5b, 0x3f, 0x62, 0xf1, 0x13, 0x18, 0x1c, 0x5d, 0x6c, 0xbd, 0x47, 0x69,
0xff, 0xee, 0xbd, 0x46, 0xe4, 0x63, 0xce, 0x94, 0x26, 0x52, 0xdb, 0x55, 0xb7, 0xd2, 0xbe, 0x87,
0x2b, 0xc3, 0x6c, 0x06, 0x5e, 0x84, 0xbc, 0x48, 0x5a, 0x56, 0x02, 0x1e, 0x7d, 0xe4, 0x45, 0xfc,
0x00, 0x22, 0xb1, 0x47, 0xee, 0x22, 0x6a, 0xdb, 0x88, 0xba, 0x06, 0xd8, 0x80, 0x1e, 0x02, 0xe4,
0xa5, 0x50, 0x3e, 0xc0, 0x8e, 0xad, 0x46, 0x96, 0xd8, 0xf2, 0x7d, 0xe8, 0x9a, 0x37, 0x2c, 0xc5,
0xb5, 0x4a, 0xba, 0x76, 0x8c, 0x0e, 0xaf, 0x58, 0x2a, 0xae, 0xd5, 0x87, 0x77, 0xdf, 0xde, 0x6e,
0xa9, 0xde, 0x55, 0x1b, 0xf3, 0x58, 0xe6, 0xb7, 0xb4, 0x2c, 0xe9, 0xad, 0xc6, 0x7c, 0x37, 0x77,
0x1b, 0x7c, 0x51, 0x50, 0xa5, 0x25, 0xdd, 0x54, 0x1a, 0x8b, 0x39, 0xe5, 0x1a, 0x25, 0x27, 0xe5,
0xdc, 0xae, 0x75, 0x6e, 0xfe, 0xac, 0xfd, 0x66, 0xd3, 0xb6, 0x5f, 0xaf, 0xfe, 0x05, 0x00, 0x00,
0xff, 0xff, 0xc2, 0xde, 0x28, 0x4b, 0x88, 0x03, 0x00, 0x00,
}
......@@ -47,20 +47,20 @@ enum PeerRole {
}
message IdRequest {
int64 peer_id = 1;
message IDRequest {
int64 peerID = 1;
PeerRole role = 2;
uint32 count = 3;
}
message IdResponse {
message IDResponse {
common.Status status = 1;
int64 id = 2;
int64 ID = 2;
uint32 count = 3;
}
message TsoRequest {
int64 peer_id = 1;
int64 peerID = 1;
PeerRole role = 2;
uint32 count = 3;
}
......@@ -74,113 +74,113 @@ message TsoResponse {
message CreateCollectionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
common.Blob schema = 5;
}
message DropCollectionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.CollectionName collection_name = 5;
}
message HasCollectionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.CollectionName collection_name = 5;
}
message DescribeCollectionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.CollectionName collection_name = 5;
}
message ShowCollectionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
}
message CreatePartitionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
}
message DropPartitionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
}
message HasPartitionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
}
message DescribePartitionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
}
message ShowPartitionRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
int64 proxyID = 4;
service.CollectionName collection_name = 5;
}
message InsertRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
string collection_name = 3;
string partition_tag = 4;
int64 segment_id = 5;
int64 channel_id = 6;
int64 proxy_id = 7;
int64 segmentID = 5;
int64 channelID = 6;
int64 proxyID = 7;
repeated uint64 timestamps = 8;
repeated int64 row_ids = 9;
repeated int64 rowIDs = 9;
repeated common.Blob row_data = 10;
}
message DeleteRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 reqID = 2;
string collection_name = 3;
int64 channel_id = 4;
int64 proxy_id = 5;
int64 channelID = 4;
int64 proxyID = 5;
repeated uint64 timestamps = 6;
repeated int64 primary_keys = 7;
}
......@@ -188,10 +188,10 @@ message DeleteRequest {
message SearchRequest {
MsgType msg_type = 1;
int64 req_id = 2;
int64 proxy_id = 3;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
int64 result_channel_id = 5;
int64 result_channelID = 5;
common.Blob query = 6;
}
......@@ -199,37 +199,37 @@ message SearchRequest {
message SearchResult {
MsgType msg_type = 1;
common.Status status = 2;
int64 req_id = 3;
int64 proxy_id = 4;
int64 query_node_id = 5;
int64 reqID = 3;
int64 proxyID = 4;
int64 query_nodeID = 5;
uint64 timestamp = 6;
int64 result_channel_id = 7;
int64 result_channelID = 7;
repeated service.Hits hits = 8;
}
message TimeTickMsg {
MsgType msg_type = 1;
int64 peer_id = 2;
int64 peerID = 2;
uint64 timestamp = 3;
}
message Key2Seg {
int64 row_id = 1;
int64 rowID = 1;
int64 primary_key = 2;
uint64 timestamp = 3;
bool is_valid = 4;
repeated int64 segment_ids = 5;
repeated int64 segmentIDs = 5;
}
message Key2SegMsg {
int64 req_id = 1;
int64 reqID = 1;
repeated Key2Seg key2seg = 2;
}
message SegmentStats {
int64 segment_id = 1;
int64 segmentID = 1;
int64 memory_size = 2;
int64 num_rows = 3;
bool recently_modified = 4;
......@@ -237,6 +237,6 @@ message SegmentStats {
message QueryNodeSegStats {
MsgType msg_type = 1;
int64 peer_id = 2;
int64 peerID = 2;
repeated SegmentStats seg_stats = 3;
}
\ No newline at end of file
......@@ -90,5 +90,5 @@ service Master {
rpc AllocTimestamp(internal.TsoRequest) returns (internal.TsoResponse) {}
rpc AllocId(internal.IdRequest) returns (internal.IdResponse) {}
rpc AllocID(internal.IDRequest) returns (internal.IDResponse) {}
}
......@@ -33,31 +33,31 @@ var fileDescriptor_f9c348dec43a6705 = []byte{
// 432 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x51, 0xaf, 0xd2, 0x30,
0x14, 0xc7, 0x79, 0xba, 0xc6, 0x86, 0xcb, 0xf5, 0xd6, 0x37, 0x7c, 0xf1, 0xee, 0xc9, 0x80, 0x6c,
0x46, 0xbf, 0x80, 0x02, 0x0f, 0x90, 0x68, 0x42, 0x80, 0x17, 0x35, 0x06, 0xbb, 0xad, 0x81, 0xc6,
0x46, 0xbf, 0x80, 0xc2, 0x1e, 0x20, 0xd1, 0x84, 0x00, 0x2f, 0x6a, 0x0c, 0x76, 0xa3, 0x81, 0xc6,
0x6e, 0x9d, 0x3d, 0x67, 0x98, 0xf0, 0xe1, 0xfc, 0x6c, 0x66, 0x1b, 0xdd, 0x56, 0xa1, 0x88, 0xf7,
0x8d, 0xb6, 0xff, 0xf3, 0xfb, 0x73, 0xce, 0xf9, 0x67, 0xa4, 0x9b, 0x30, 0x40, 0xae, 0xfd, 0x4c,
0x2b, 0x54, 0xf4, 0x79, 0x22, 0xe4, 0x3e, 0x87, 0xea, 0xe4, 0x57, 0x4f, 0xfd, 0x6e, 0xa4, 0x92,
0x44, 0xa5, 0xd5, 0x65, 0x9f, 0x8a, 0x14, 0xb9, 0x4e, 0x99, 0xdc, 0x24, 0xb0, 0x3d, 0xde, 0xdd,
0x03, 0xd7, 0x7b, 0x11, 0xf1, 0xe6, 0xea, 0xed, 0xef, 0xa7, 0xe4, 0xe6, 0x53, 0x59, 0x4f, 0x19,
0x79, 0x36, 0xd1, 0x9c, 0x21, 0x9f, 0x28, 0x29, 0x79, 0x84, 0x42, 0xa5, 0xd4, 0xf7, 0x2d, 0x27,
0xc3, 0xf4, 0xff, 0x16, 0x2e, 0xf9, 0xcf, 0x9c, 0x03, 0xf6, 0x5f, 0xd8, 0xfa, 0xe3, 0x3f, 0x5a,
0x21, 0xc3, 0x1c, 0xbc, 0x0e, 0xfd, 0x46, 0x7a, 0x53, 0xad, 0xb2, 0x96, 0xc1, 0x6b, 0x87, 0x81,
0x2d, 0xbb, 0x12, 0x1f, 0x92, 0xdb, 0x19, 0x83, 0x16, 0x7d, 0xe8, 0xa0, 0x5b, 0x2a, 0x03, 0xf7,
0x6c, 0xf1, 0x71, 0x56, 0xfe, 0x58, 0x29, 0xb9, 0xe4, 0x90, 0xa9, 0x14, 0xb8, 0xd7, 0xa1, 0x39,
0xa1, 0x53, 0x0e, 0x91, 0x16, 0x61, 0x7b, 0x4e, 0x6f, 0x5c, 0x6d, 0x9c, 0x48, 0x8d, 0xdb, 0xf0,
0xbc, 0x5b, 0x23, 0xac, 0x4a, 0xb3, 0xe2, 0xa7, 0xd7, 0xa1, 0x3f, 0xc8, 0xdd, 0x6a, 0xa7, 0x7e,
0x35, 0xcf, 0xe0, 0x1c, 0x9d, 0xad, 0x33, 0x7e, 0xaf, 0xce, 0xfb, 0xad, 0x50, 0x8b, 0x74, 0xfb,
0x51, 0x00, 0xb6, 0x7a, 0xdc, 0x90, 0xbb, 0x6a, 0xc1, 0x0b, 0xa6, 0x51, 0x94, 0x0d, 0x8e, 0x2e,
0x06, 0xa1, 0xd6, 0x5d, 0xb9, 0xa8, 0xaf, 0xe4, 0xb6, 0x58, 0x70, 0x83, 0x1f, 0x5e, 0x88, 0xc1,
0xff, 0xc2, 0xbf, 0x93, 0xee, 0x8c, 0x41, 0xc3, 0x1e, 0xb8, 0x43, 0x70, 0x82, 0xbe, 0x2e, 0x03,
0x9a, 0xdc, 0x9b, 0xc5, 0x36, 0x36, 0xc1, 0x3f, 0x22, 0x70, 0xe2, 0x35, 0x38, 0xef, 0x55, 0xeb,
0xec, 0x00, 0x08, 0xd2, 0x2b, 0x16, 0x5b, 0xbf, 0x82, 0x73, 0x66, 0x96, 0xec, 0x31, 0xeb, 0xff,
0x4c, 0x7a, 0x1f, 0xa4, 0x54, 0xd1, 0x5a, 0x24, 0x1c, 0x90, 0x25, 0x19, 0x7d, 0x70, 0x58, 0xad,
0x41, 0x39, 0x26, 0x67, 0x4b, 0x6a, 0xf4, 0x82, 0x3c, 0x29, 0xd1, 0xf3, 0x98, 0xbe, 0x74, 0x14,
0xcc, 0x63, 0x83, 0x7c, 0xb8, 0xa0, 0x30, 0xc4, 0xf1, 0xf8, 0xcb, 0xfb, 0xad, 0xc0, 0x5d, 0x1e,
0x16, 0x39, 0x08, 0x0e, 0x42, 0x4a, 0x71, 0x40, 0x1e, 0xed, 0x82, 0xaa, 0x76, 0x14, 0x0b, 0x40,
0x2d, 0xc2, 0x1c, 0x79, 0x1c, 0x18, 0x42, 0x50, 0x02, 0x83, 0xea, 0xbb, 0x99, 0x85, 0xe1, 0x4d,
0x79, 0x7e, 0xf7, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x05, 0x0f, 0x90, 0x65, 0x05, 0x00, 0x00,
0x2b, 0x54, 0xf4, 0x79, 0x22, 0xe4, 0x3e, 0x87, 0xea, 0xe4, 0x57, 0x4f, 0xfd, 0x6e, 0xac, 0x92,
0x44, 0xa5, 0xd5, 0x65, 0x9f, 0x8a, 0x14, 0xb9, 0x4e, 0x99, 0x5c, 0x27, 0xb0, 0x3d, 0xde, 0xdd,
0x03, 0xd7, 0x7b, 0x11, 0xf3, 0xe6, 0xea, 0xed, 0xef, 0xa7, 0xe4, 0xe6, 0x53, 0x59, 0x4f, 0x19,
0x79, 0x36, 0xd1, 0x9c, 0x21, 0x9f, 0x28, 0x29, 0x79, 0x8c, 0x42, 0xa5, 0xd4, 0xf7, 0x2d, 0x27,
0xc3, 0xf4, 0xff, 0x16, 0x2e, 0xf8, 0xcf, 0x9c, 0x03, 0xf6, 0x5f, 0xd8, 0xfa, 0xe3, 0x3f, 0x5a,
0x22, 0xc3, 0x1c, 0xbc, 0x0e, 0xfd, 0x46, 0x7a, 0xa1, 0x56, 0x59, 0xcb, 0xe0, 0xb5, 0xc3, 0xc0,
0x96, 0x5d, 0x89, 0x8f, 0xc8, 0xed, 0x94, 0x41, 0x8b, 0x3e, 0x74, 0xd0, 0x2d, 0x95, 0x81, 0x7b,
0xb6, 0xf8, 0x38, 0x2b, 0x7f, 0xac, 0x94, 0x5c, 0x70, 0xc8, 0x54, 0x0a, 0xdc, 0xeb, 0xd0, 0x9c,
0xd0, 0x90, 0x43, 0xac, 0x45, 0xd4, 0x9e, 0xd3, 0x1b, 0x57, 0x1b, 0x27, 0x52, 0xe3, 0x36, 0x3c,
0xef, 0xd6, 0x08, 0xab, 0xd2, 0xac, 0xf8, 0xe9, 0x75, 0xe8, 0x0f, 0x72, 0xb7, 0xdc, 0xa9, 0x5f,
0xcd, 0x33, 0x38, 0x47, 0x67, 0xeb, 0x8c, 0xdf, 0xab, 0xf3, 0x7e, 0x4b, 0xd4, 0x22, 0xdd, 0x7e,
0x14, 0x80, 0xad, 0x1e, 0xd7, 0xe4, 0xae, 0x5a, 0xf0, 0x9c, 0x69, 0x14, 0x65, 0x83, 0xa3, 0x8b,
0x41, 0xa8, 0x75, 0x57, 0x2e, 0xea, 0x2b, 0xb9, 0x2d, 0x16, 0xdc, 0xe0, 0x87, 0x17, 0x62, 0xf0,
0xbf, 0xf0, 0xef, 0xa4, 0x3b, 0x65, 0xd0, 0xb0, 0x07, 0xee, 0x10, 0x9c, 0xa0, 0xaf, 0xcb, 0x80,
0x26, 0xf7, 0x66, 0xb1, 0x8d, 0x4d, 0xf0, 0x8f, 0x08, 0x9c, 0x78, 0x0d, 0xce, 0x7b, 0xd5, 0x3a,
0x3b, 0x00, 0x82, 0xf4, 0x8a, 0xc5, 0xd6, 0xaf, 0xe0, 0x9c, 0x99, 0x25, 0x7b, 0xcc, 0xfa, 0x3f,
0x93, 0xde, 0x07, 0x29, 0x55, 0xbc, 0x12, 0x09, 0x07, 0x64, 0x49, 0x46, 0x1f, 0x1c, 0x56, 0x2b,
0x50, 0x8e, 0xc9, 0xd9, 0x92, 0x1a, 0x3d, 0x27, 0x4f, 0x4a, 0xf4, 0x2c, 0xa4, 0x2f, 0x1d, 0x05,
0xb3, 0xd0, 0x20, 0x1f, 0x2e, 0x28, 0x0c, 0x71, 0x3c, 0xfe, 0xf2, 0x7e, 0x2b, 0x70, 0x97, 0x47,
0x45, 0x0e, 0x82, 0x83, 0x90, 0x52, 0x1c, 0x90, 0xc7, 0xbb, 0xa0, 0xaa, 0x1d, 0x6d, 0x04, 0xa0,
0x16, 0x51, 0x8e, 0x7c, 0x13, 0x18, 0x42, 0x50, 0x02, 0x83, 0xea, 0xbb, 0x99, 0x45, 0xd1, 0x4d,
0x79, 0x7e, 0xf7, 0x27, 0x00, 0x00, 0xff, 0xff, 0xba, 0x9e, 0x0e, 0x5d, 0x65, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
......@@ -133,7 +133,7 @@ type MasterClient interface {
// @return StringListResponse
ShowPartitions(ctx context.Context, in *internalpb.ShowPartitionRequest, opts ...grpc.CallOption) (*servicepb.StringListResponse, error)
AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error)
AllocId(ctx context.Context, in *internalpb.IdRequest, opts ...grpc.CallOption) (*internalpb.IdResponse, error)
AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error)
}
type masterClient struct {
......@@ -243,9 +243,9 @@ func (c *masterClient) AllocTimestamp(ctx context.Context, in *internalpb.TsoReq
return out, nil
}
func (c *masterClient) AllocId(ctx context.Context, in *internalpb.IdRequest, opts ...grpc.CallOption) (*internalpb.IdResponse, error) {
out := new(internalpb.IdResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocId", in, out, opts...)
func (c *masterClient) AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error) {
out := new(internalpb.IDResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocID", in, out, opts...)
if err != nil {
return nil, err
}
......@@ -315,7 +315,7 @@ type MasterServer interface {
// @return StringListResponse
ShowPartitions(context.Context, *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error)
AllocTimestamp(context.Context, *internalpb.TsoRequest) (*internalpb.TsoResponse, error)
AllocId(context.Context, *internalpb.IdRequest) (*internalpb.IdResponse, error)
AllocID(context.Context, *internalpb.IDRequest) (*internalpb.IDResponse, error)
}
// UnimplementedMasterServer can be embedded to have forward compatible implementations.
......@@ -355,8 +355,8 @@ func (*UnimplementedMasterServer) ShowPartitions(ctx context.Context, req *inter
func (*UnimplementedMasterServer) AllocTimestamp(ctx context.Context, req *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocTimestamp not implemented")
}
func (*UnimplementedMasterServer) AllocId(ctx context.Context, req *internalpb.IdRequest) (*internalpb.IdResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocId not implemented")
func (*UnimplementedMasterServer) AllocID(ctx context.Context, req *internalpb.IDRequest) (*internalpb.IDResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocID not implemented")
}
func RegisterMasterServer(s *grpc.Server, srv MasterServer) {
......@@ -561,20 +561,20 @@ func _Master_AllocTimestamp_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _Master_AllocId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.IdRequest)
func _Master_AllocID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.IDRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).AllocId(ctx, in)
return srv.(MasterServer).AllocID(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.master.Master/AllocId",
FullMethod: "/milvus.proto.master.Master/AllocID",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).AllocId(ctx, req.(*internalpb.IdRequest))
return srv.(MasterServer).AllocID(ctx, req.(*internalpb.IDRequest))
}
return interceptor(ctx, in, info, handler)
}
......@@ -628,8 +628,8 @@ var _Master_serviceDesc = grpc.ServiceDesc{
Handler: _Master_AllocTimestamp_Handler,
},
{
MethodName: "AllocId",
Handler: _Master_AllocId_Handler,
MethodName: "AllocID",
Handler: _Master_AllocID_Handler,
},
},
Streams: []grpc.StreamDesc{},
......
......@@ -42,6 +42,6 @@ message FieldSchema {
message CollectionSchema {
string name = 1;
string description = 2;
bool auto_id = 3;
bool autoID = 3;
repeated FieldSchema fields = 4;
}
......@@ -153,7 +153,7 @@ func (m *FieldSchema) GetIndexParams() []*commonpb.KeyValuePair {
type CollectionSchema struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
AutoId bool `protobuf:"varint,3,opt,name=auto_id,json=autoId,proto3" json:"auto_id,omitempty"`
AutoID bool `protobuf:"varint,3,opt,name=autoID,proto3" json:"autoID,omitempty"`
Fields []*FieldSchema `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
......@@ -199,9 +199,9 @@ func (m *CollectionSchema) GetDescription() string {
return ""
}
func (m *CollectionSchema) GetAutoId() bool {
func (m *CollectionSchema) GetAutoID() bool {
if m != nil {
return m.AutoId
return m.AutoID
}
return false
}
......@@ -222,32 +222,31 @@ func init() {
func init() { proto.RegisterFile("schema.proto", fileDescriptor_1c5fb4d8cc22d66a) }
var fileDescriptor_1c5fb4d8cc22d66a = []byte{
// 419 bytes of a gzipped FileDescriptorProto
// 416 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0x5f, 0x8b, 0xd4, 0x30,
0x14, 0xc5, 0xed, 0xfc, 0x73, 0xe6, 0x76, 0x94, 0x18, 0x05, 0x07, 0x41, 0xa8, 0xfb, 0x34, 0x08,
0xb6, 0x38, 0x2b, 0xcb, 0xe2, 0x93, 0xdb, 0x9d, 0x59, 0x29, 0x0e, 0xed, 0x92, 0xad, 0x0b, 0xfa,
0x52, 0xd2, 0x26, 0x3a, 0x81, 0xf4, 0x0f, 0x6d, 0x2a, 0xce, 0x7e, 0x03, 0x1f, 0x7d, 0xf6, 0xcb,
0x4a, 0xd2, 0x0e, 0xac, 0xb0, 0x0f, 0xc2, 0xbe, 0xfd, 0xee, 0xbd, 0x39, 0x37, 0x39, 0x27, 0x30,
0x6f, 0xb2, 0x1d, 0xcf, 0xa9, 0x5b, 0xd5, 0xa5, 0x2a, 0xf1, 0xd3, 0x5c, 0xc8, 0x1f, 0x6d, 0xd3,
0x55, 0x6e, 0x37, 0x7a, 0x31, 0xcf, 0xca, 0x3c, 0x2f, 0x8b, 0xae, 0x79, 0xf4, 0x6b, 0x00, 0xf6,
0x85, 0xe0, 0x92, 0x5d, 0x99, 0x29, 0xc6, 0x30, 0x2a, 0x68, 0xce, 0x17, 0x96, 0x63, 0x2d, 0x67,
0xc4, 0x30, 0x76, 0xc0, 0x66, 0xbc, 0xc9, 0x6a, 0x51, 0x29, 0x51, 0x16, 0x8b, 0x81, 0x19, 0xdd,
0x6e, 0xe1, 0xf7, 0x30, 0x63, 0x54, 0xd1, 0x44, 0xed, 0x2b, 0xbe, 0x18, 0x3a, 0xd6, 0xf2, 0xf1,
0xea, 0xa5, 0x7b, 0xc7, 0xe5, 0xee, 0x9a, 0x2a, 0x1a, 0xef, 0x2b, 0x4e, 0xa6, 0xac, 0x27, 0xec,
0x83, 0xad, 0x65, 0x49, 0x45, 0x6b, 0x9a, 0x37, 0x8b, 0x91, 0x33, 0x5c, 0xda, 0xab, 0x57, 0xff,
0xaa, 0xfb, 0x27, 0x7f, 0xe2, 0xfb, 0x6b, 0x2a, 0x5b, 0x7e, 0x49, 0x45, 0x4d, 0x40, 0xab, 0x2e,
0x8d, 0x08, 0xaf, 0x61, 0x2e, 0x0a, 0xc6, 0x7f, 0x1e, 0x96, 0x8c, 0xff, 0x77, 0x89, 0x6d, 0x64,
0xdd, 0x96, 0xa3, 0x3f, 0x16, 0xa0, 0xf3, 0x52, 0x4a, 0x9e, 0x69, 0x53, 0xf7, 0x0a, 0xe4, 0x39,
0x3c, 0xa4, 0xad, 0x2a, 0x13, 0xc1, 0x4c, 0x1c, 0x53, 0x32, 0xd1, 0x65, 0xc0, 0xf0, 0x29, 0x4c,
0xbe, 0xe9, 0xb8, 0x0f, 0x46, 0x9d, 0x3b, 0x63, 0xba, 0xf5, 0x23, 0xa4, 0x3f, 0xff, 0xfa, 0xb7,
0x05, 0xd3, 0x43, 0x7c, 0x78, 0x0a, 0xa3, 0x30, 0x0a, 0x37, 0xe8, 0x81, 0x26, 0x3f, 0x8a, 0xb6,
0xc8, 0xd2, 0x14, 0x84, 0xf1, 0x29, 0x1a, 0xe0, 0x19, 0x8c, 0x83, 0x30, 0x7e, 0x7b, 0x82, 0x86,
0x3d, 0x1e, 0xaf, 0xd0, 0xa8, 0xc7, 0x93, 0x77, 0x68, 0xac, 0xf1, 0x62, 0x1b, 0x9d, 0xc5, 0x08,
0x30, 0xc0, 0x64, 0x1d, 0x7d, 0xf6, 0xb7, 0x1b, 0x64, 0x6b, 0xbe, 0x8a, 0x49, 0x10, 0x7e, 0x44,
0xcf, 0xf0, 0x13, 0x78, 0x74, 0xbd, 0x39, 0x8f, 0x23, 0x92, 0xf8, 0x41, 0x78, 0x46, 0xbe, 0x20,
0x86, 0x11, 0xcc, 0xfb, 0x56, 0x27, 0xe6, 0xbe, 0xff, 0xf5, 0xc3, 0x77, 0xa1, 0x76, 0x6d, 0xaa,
0xc3, 0xf5, 0x6e, 0x84, 0x94, 0xe2, 0x46, 0xf1, 0x6c, 0xe7, 0x75, 0xa6, 0xde, 0x30, 0xd1, 0xa8,
0x5a, 0xa4, 0xad, 0xe2, 0xcc, 0x13, 0x85, 0xe2, 0x75, 0x41, 0xa5, 0x67, 0x9c, 0x7a, 0x9d, 0xd3,
0x2a, 0x4d, 0x27, 0xa6, 0x3e, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x3c, 0x4e, 0x3c, 0xbb,
0x02, 0x00, 0x00,
0x14, 0xc5, 0xed, 0xfc, 0x63, 0xe6, 0x76, 0x94, 0x18, 0x45, 0x06, 0x41, 0xa8, 0xfb, 0x34, 0x08,
0xb6, 0x38, 0x2b, 0xcb, 0xe2, 0x93, 0xdb, 0xed, 0xac, 0x14, 0x87, 0x76, 0xc9, 0xd6, 0x05, 0x7d,
0x19, 0xd2, 0x26, 0x3a, 0x81, 0xf4, 0x0f, 0x6d, 0x2a, 0xce, 0x7e, 0x03, 0x1f, 0x7d, 0xf5, 0xd3,
0x4a, 0xd2, 0x0e, 0xac, 0x30, 0x0f, 0x82, 0x6f, 0xbf, 0x7b, 0x6f, 0xce, 0x4d, 0xce, 0x09, 0xcc,
0x9b, 0x6c, 0xc7, 0x73, 0xea, 0x56, 0x75, 0xa9, 0x4a, 0xfc, 0x24, 0x17, 0xf2, 0x7b, 0xdb, 0x74,
0x95, 0xdb, 0x8d, 0x9e, 0xcf, 0xb3, 0x32, 0xcf, 0xcb, 0xa2, 0x6b, 0x9e, 0xfc, 0x1c, 0x80, 0x7d,
0x25, 0xb8, 0x64, 0x37, 0x66, 0x8a, 0x31, 0x8c, 0x0a, 0x9a, 0xf3, 0x85, 0xe5, 0x58, 0xcb, 0x19,
0x31, 0x8c, 0x1d, 0xb0, 0x19, 0x6f, 0xb2, 0x5a, 0x54, 0x4a, 0x94, 0xc5, 0x62, 0x60, 0x46, 0xf7,
0x5b, 0xf8, 0x1d, 0xcc, 0x18, 0x55, 0x74, 0xab, 0xf6, 0x15, 0x5f, 0x0c, 0x1d, 0x6b, 0xf9, 0x68,
0xf5, 0xc2, 0x3d, 0x72, 0xb9, 0x1b, 0x50, 0x45, 0x93, 0x7d, 0xc5, 0xc9, 0x94, 0xf5, 0x84, 0x7d,
0xb0, 0xb5, 0x6c, 0x5b, 0xd1, 0x9a, 0xe6, 0xcd, 0x62, 0xe4, 0x0c, 0x97, 0xf6, 0xea, 0xe5, 0xdf,
0xea, 0xfe, 0xc9, 0x1f, 0xf9, 0xfe, 0x96, 0xca, 0x96, 0x5f, 0x53, 0x51, 0x13, 0xd0, 0xaa, 0x6b,
0x23, 0xc2, 0x01, 0xcc, 0x45, 0xc1, 0xf8, 0x8f, 0xc3, 0x92, 0xf1, 0xbf, 0x2e, 0xb1, 0x8d, 0xac,
0xdb, 0x72, 0xf2, 0xdb, 0x02, 0x74, 0x59, 0x4a, 0xc9, 0x33, 0x6d, 0xea, 0xbf, 0x02, 0x79, 0x06,
0x13, 0xda, 0xaa, 0x32, 0x0c, 0x4c, 0x1a, 0x53, 0xd2, 0x57, 0xf8, 0x1c, 0x26, 0x5f, 0x75, 0xda,
0x07, 0x9f, 0xce, 0xd1, 0x94, 0xee, 0x7d, 0x08, 0xe9, 0xcf, 0xbf, 0xfa, 0x65, 0xc1, 0xf4, 0x90,
0x1e, 0x9e, 0xc2, 0x28, 0x8a, 0xa3, 0x35, 0x7a, 0xa0, 0xc9, 0x8f, 0xe3, 0x0d, 0xb2, 0x34, 0x85,
0x51, 0x72, 0x8e, 0x06, 0x78, 0x06, 0xe3, 0x30, 0x4a, 0xde, 0x9c, 0xa1, 0x61, 0x8f, 0xa7, 0x2b,
0x34, 0xea, 0xf1, 0xec, 0x2d, 0x1a, 0x6b, 0xbc, 0xda, 0xc4, 0x17, 0x09, 0x02, 0x0c, 0x30, 0x09,
0xe2, 0x4f, 0xfe, 0x66, 0x8d, 0x6c, 0xcd, 0x37, 0x09, 0x09, 0xa3, 0x0f, 0xe8, 0x29, 0x7e, 0x0c,
0x0f, 0x6f, 0xd7, 0x97, 0x49, 0x4c, 0xb6, 0x7e, 0x18, 0x5d, 0x90, 0xcf, 0x88, 0x61, 0x04, 0xf3,
0xbe, 0xd5, 0x89, 0xb9, 0xef, 0x7f, 0x79, 0xff, 0x4d, 0xa8, 0x5d, 0x9b, 0xea, 0x6c, 0xbd, 0x3b,
0x21, 0xa5, 0xb8, 0x53, 0x3c, 0xdb, 0x79, 0x9d, 0xa9, 0xd7, 0x4c, 0x34, 0xaa, 0x16, 0x69, 0xab,
0x38, 0xf3, 0x44, 0xa1, 0x78, 0x5d, 0x50, 0xe9, 0x19, 0xa7, 0x5e, 0xe7, 0xb4, 0x4a, 0xd3, 0x89,
0xa9, 0x4f, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x74, 0xb3, 0xe8, 0x78, 0xba, 0x02, 0x00, 0x00,
}
......@@ -151,7 +151,7 @@ message Score {
* @brief Entities hit by query
*/
message Hits {
repeated int64 ids = 1;
repeated int64 IDs = 1;
repeated common.Blob row_data = 2;
repeated Score scores = 3;
}
......
......@@ -791,7 +791,7 @@ func (m *Score) GetValues() []float32 {
//*
// @brief Entities hit by query
type Hits struct {
Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
IDs []int64 `protobuf:"varint,1,rep,packed,name=IDs,proto3" json:"IDs,omitempty"`
RowData []*commonpb.Blob `protobuf:"bytes,2,rep,name=row_data,json=rowData,proto3" json:"row_data,omitempty"`
Scores []*Score `protobuf:"bytes,3,rep,name=scores,proto3" json:"scores,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
......@@ -824,9 +824,9 @@ func (m *Hits) XXX_DiscardUnknown() {
var xxx_messageInfo_Hits proto.InternalMessageInfo
func (m *Hits) GetIds() []int64 {
func (m *Hits) GetIDs() []int64 {
if m != nil {
return m.Ids
return m.IDs
}
return nil
}
......@@ -917,7 +917,7 @@ func init() {
func init() { proto.RegisterFile("service_msg.proto", fileDescriptor_b4b40b84dd2f74cb) }
var fileDescriptor_b4b40b84dd2f74cb = []byte{
// 762 bytes of a gzipped FileDescriptorProto
// 763 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x5d, 0x6f, 0xf3, 0x34,
0x14, 0x26, 0x4d, 0x5b, 0xba, 0xd3, 0xb4, 0x6f, 0x67, 0x0a, 0x0a, 0xdb, 0x4d, 0xc9, 0xab, 0x41,
0x05, 0xa2, 0x15, 0x1b, 0x12, 0xda, 0x05, 0x12, 0xed, 0x36, 0x60, 0x1f, 0xea, 0x86, 0x5b, 0x4d,
......@@ -955,15 +955,15 @@ var fileDescriptor_b4b40b84dd2f74cb = []byte{
0xad, 0xb8, 0x97, 0x74, 0xae, 0xff, 0xd6, 0x0d, 0x61, 0x02, 0xaf, 0x25, 0x39, 0x7f, 0x19, 0xd0,
0x5c, 0x3a, 0xe6, 0x8b, 0xfb, 0xf9, 0x06, 0x8a, 0xfa, 0x5b, 0x66, 0xdd, 0xbc, 0xfd, 0x8f, 0xff,
0xbe, 0x6e, 0xd0, 0x58, 0x27, 0xbc, 0x46, 0x27, 0x5f, 0x41, 0x69, 0xe8, 0x72, 0xb1, 0xcd, 0xf5,
0x36, 0x47, 0xa8, 0xb0, 0x1c, 0xa1, 0xdf, 0x0d, 0x28, 0xfe, 0xc8, 0x94, 0x76, 0x02, 0xe6, 0x65,
0x36, 0x65, 0xe2, 0x74, 0xb9, 0xe1, 0xd0, 0x85, 0x27, 0x3b, 0x74, 0x2a, 0x5a, 0xca, 0x61, 0xd1,
0xc2, 0xfe, 0x76, 0x05, 0x34, 0x4f, 0x9c, 0x5f, 0x75, 0x04, 0x54, 0xb5, 0xbf, 0x61, 0x2a, 0x93,
0x50, 0x3d, 0x4f, 0xf8, 0x0e, 0x14, 0x03, 0xa6, 0x64, 0x4e, 0x75, 0x6f, 0x3b, 0x6c, 0xda, 0x2a,
0xd6, 0xf7, 0x3e, 0xff, 0x0e, 0xde, 0x3c, 0x72, 0x79, 0x54, 0x81, 0xe2, 0xe0, 0x7a, 0x70, 0xd6,
0x78, 0x0f, 0xed, 0x42, 0xed, 0xf6, 0xec, 0x64, 0x74, 0x8d, 0xc7, 0xfd, 0xf3, 0x41, 0x0f, 0xdf,
0x35, 0x3c, 0xd4, 0x00, 0x2b, 0x0f, 0x7d, 0x7f, 0x75, 0xdd, 0x1b, 0x35, 0x68, 0xff, 0xe4, 0xe7,
0x9e, 0xcf, 0x54, 0x90, 0x4c, 0x52, 0x46, 0xdd, 0x07, 0x16, 0x86, 0xec, 0x41, 0x51, 0x37, 0xe8,
0x66, 0xd0, 0x5f, 0x7a, 0x4c, 0x2a, 0xc1, 0x26, 0x89, 0xa2, 0x5e, 0x97, 0x45, 0x8a, 0x8a, 0x88,
0x84, 0x5d, 0xcd, 0xa7, 0x9b, 0xf3, 0x89, 0x27, 0x93, 0xb2, 0x0e, 0x1c, 0xfd, 0x13, 0x00, 0x00,
0xff, 0xff, 0x23, 0x87, 0xde, 0xce, 0x37, 0x08, 0x00, 0x00,
0x36, 0x47, 0xa8, 0xb0, 0x1c, 0xa1, 0xdf, 0x0d, 0x28, 0xfe, 0xc8, 0x94, 0x76, 0x82, 0xf3, 0xd3,
0xcc, 0xa6, 0x4c, 0x9c, 0x2e, 0x37, 0x1c, 0xba, 0xf0, 0x64, 0x87, 0x4e, 0x45, 0x4b, 0x39, 0x2c,
0x5a, 0xd8, 0xdf, 0xae, 0x80, 0xe6, 0x89, 0xf3, 0xab, 0x8e, 0x80, 0xaa, 0xf6, 0x37, 0x4c, 0x65,
0x12, 0xaa, 0xe7, 0x09, 0xdf, 0x81, 0x62, 0xc0, 0x94, 0xcc, 0xa9, 0xee, 0x6d, 0x87, 0x4d, 0x5b,
0xc5, 0xfa, 0xde, 0xe7, 0xdf, 0xc1, 0x9b, 0x47, 0x2e, 0x8f, 0x2a, 0x50, 0x1c, 0x5c, 0x0f, 0xce,
0x1a, 0xef, 0xa1, 0x5d, 0xa8, 0xdd, 0x9e, 0x9d, 0x8c, 0xae, 0xf1, 0xb8, 0x7f, 0x3e, 0xe8, 0xe1,
0xbb, 0x86, 0x87, 0x1a, 0x60, 0xe5, 0xa1, 0xef, 0xaf, 0xae, 0x7b, 0xa3, 0x06, 0xed, 0x9f, 0xfc,
0xdc, 0xf3, 0x99, 0x0a, 0x92, 0x49, 0xca, 0xa8, 0xfb, 0xc0, 0xc2, 0x90, 0x3d, 0x28, 0xea, 0x06,
0xdd, 0x0c, 0xfa, 0x4b, 0x8f, 0x49, 0x25, 0xd8, 0x24, 0x51, 0xd4, 0xeb, 0xb2, 0x48, 0x51, 0x11,
0x91, 0xb0, 0xab, 0xf9, 0x74, 0x73, 0x3e, 0xf1, 0x64, 0x52, 0xd6, 0x81, 0xa3, 0x7f, 0x02, 0x00,
0x00, 0xff, 0xff, 0x12, 0x37, 0x33, 0x02, 0x37, 0x08, 0x00, 0x00,
}
......@@ -151,7 +151,7 @@ func (p *Proxy) queryResultLoop() {
}
tsMsg := msgPack.Msgs[0]
searchResultMsg, _ := (*tsMsg).(*msgstream.SearchResultMsg)
reqID := searchResultMsg.GetReqId()
reqID := searchResultMsg.GetReqID()
_, ok := queryResultBuf[reqID]
if !ok {
queryResultBuf[reqID] = make([]*internalpb.SearchResult, 0)
......
......@@ -13,7 +13,7 @@ import (
)
type task interface {
ID() UniqueID // return ReqId
ID() UniqueID // return ReqID
Type() internalpb.MsgType
BeginTs() Timestamp
EndTs() Timestamp
......@@ -50,7 +50,7 @@ func (it *InsertTask) EndTs() Timestamp {
}
func (it *InsertTask) ID() UniqueID {
return it.ReqId
return it.ReqID
}
func (it *InsertTask) Type() internalpb.MsgType {
......@@ -104,7 +104,7 @@ type CreateCollectionTask struct {
}
func (cct *CreateCollectionTask) ID() UniqueID {
return cct.ReqId
return cct.ReqID
}
func (cct *CreateCollectionTask) Type() internalpb.MsgType {
......@@ -172,7 +172,7 @@ type DropCollectionTask struct {
}
func (dct *DropCollectionTask) ID() UniqueID {
return dct.ReqId
return dct.ReqID
}
func (dct *DropCollectionTask) Type() internalpb.MsgType {
......@@ -241,7 +241,7 @@ type QueryTask struct {
}
func (qt *QueryTask) ID() UniqueID {
return qt.ReqId
return qt.ReqID
}
func (qt *QueryTask) Type() internalpb.MsgType {
......@@ -320,7 +320,7 @@ func (qt *QueryTask) Notify(err error) {
qt.resultChan <- &servicepb.QueryResult{}
return
}
k := len(searchResults[0].Hits[0].Ids) // k
k := len(searchResults[0].Hits[0].IDs) // k
queryResult := &servicepb.QueryResult{
Status: &commonpb.Status{
ErrorCode: 0,
......@@ -347,7 +347,7 @@ func (qt *QueryTask) Notify(err error) {
}
}
choiceOffset := locs[choice]
hits.Ids = append(hits.Ids, searchResults[choice].Hits[i].Ids[choiceOffset])
hits.IDs = append(hits.IDs, searchResults[choice].Hits[i].IDs[choiceOffset])
hits.RowData = append(hits.RowData, searchResults[choice].Hits[i].RowData[choiceOffset])
hits.Scores = append(hits.Scores, searchResults[choice].Hits[i].Scores[choiceOffset])
locs[choice]++
......@@ -369,7 +369,7 @@ type HasCollectionTask struct {
}
func (hct *HasCollectionTask) ID() UniqueID {
return hct.ReqId
return hct.ReqID
}
func (hct *HasCollectionTask) Type() internalpb.MsgType {
......@@ -440,7 +440,7 @@ type DescribeCollectionTask struct {
}
func (dct *DescribeCollectionTask) ID() UniqueID {
return dct.ReqId
return dct.ReqID
}
func (dct *DescribeCollectionTask) Type() internalpb.MsgType {
......@@ -510,7 +510,7 @@ type ShowCollectionsTask struct {
}
func (sct *ShowCollectionsTask) ID() UniqueID {
return sct.ReqId
return sct.ReqID
}
func (sct *ShowCollectionsTask) Type() internalpb.MsgType {
......
......@@ -130,19 +130,19 @@ type DqTaskQueue struct {
func (queue *DdTaskQueue) Enqueue(t task) error {
queue.lock.Lock()
defer queue.lock.Unlock()
// TODO: set Ts, ReqId, ProxyId
// TODO: set Ts, ReqID, ProxyID
queue.AddUnissuedTask(t)
return nil
}
func (queue *DmTaskQueue) Enqueue(t task) error {
// TODO: set Ts, ReqId, ProxyId
// TODO: set Ts, ReqID, ProxyID
queue.AddUnissuedTask(t)
return nil
}
func (queue *DqTaskQueue) Enqueue(t task) error {
// TODO: set Ts, ReqId, ProxyId
// TODO: set Ts, ReqID, ProxyID
queue.AddUnissuedTask(t)
return nil
}
......
......@@ -68,7 +68,7 @@ func (tt *timeTick) tick() error {
var timeTickMsg msgstream.TsMsg = &msgstream.TimeTickMsg{
TimeTickMsg: internalpb.TimeTickMsg{
MsgType: internalpb.MsgType_kTimeTick,
PeerId: tt.peerID,
PeerID: tt.peerID,
Timestamp: tt.currentTick,
},
}
......
......@@ -47,10 +47,10 @@ func TestColSegContainer_addCollection(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -60,7 +60,7 @@ func TestColSegContainer_addCollection(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
}
......@@ -99,10 +99,10 @@ func TestColSegContainer_removeCollection(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -112,7 +112,7 @@ func TestColSegContainer_removeCollection(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
err := node.container.removeCollection(collection)
......@@ -155,10 +155,10 @@ func TestColSegContainer_getCollectionByID(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -168,14 +168,14 @@ func TestColSegContainer_getCollectionByID(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
targetCollection, err := node.container.getCollectionByID(UniqueID(0))
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, "collection0")
assert.Equal(t, targetCollection.meta.Id, UniqueID(0))
assert.Equal(t, targetCollection.meta.ID, UniqueID(0))
}
func TestColSegContainer_getCollectionByName(t *testing.T) {
......@@ -213,10 +213,10 @@ func TestColSegContainer_getCollectionByName(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -226,14 +226,14 @@ func TestColSegContainer_getCollectionByName(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
targetCollection, err := node.container.getCollectionByName("collection0")
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, "collection0")
assert.Equal(t, targetCollection.meta.Id, UniqueID(0))
assert.Equal(t, targetCollection.meta.ID, UniqueID(0))
}
//----------------------------------------------------------------------------------------------------- partition
......@@ -272,10 +272,10 @@ func TestColSegContainer_addPartition(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -285,7 +285,7 @@ func TestColSegContainer_addPartition(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
for _, tag := range collectionMeta.PartitionTags {
......@@ -330,10 +330,10 @@ func TestColSegContainer_removePartition(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -343,7 +343,7 @@ func TestColSegContainer_removePartition(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
for _, tag := range collectionMeta.PartitionTags {
......@@ -390,10 +390,10 @@ func TestColSegContainer_getPartitionByTag(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -403,7 +403,7 @@ func TestColSegContainer_getPartitionByTag(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
for _, tag := range collectionMeta.PartitionTags {
......@@ -453,10 +453,10 @@ func TestColSegContainer_addSegment(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -466,7 +466,7 @@ func TestColSegContainer_addSegment(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
partition, err := node.container.addPartition(collection, collectionMeta.PartitionTags[0])
......@@ -515,10 +515,10 @@ func TestColSegContainer_removeSegment(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -528,7 +528,7 @@ func TestColSegContainer_removeSegment(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
partition, err := node.container.addPartition(collection, collectionMeta.PartitionTags[0])
......@@ -579,10 +579,10 @@ func TestColSegContainer_getSegmentByID(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -592,7 +592,7 @@ func TestColSegContainer_getSegmentByID(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
partition, err := node.container.addPartition(collection, collectionMeta.PartitionTags[0])
......@@ -644,10 +644,10 @@ func TestColSegContainer_hasSegment(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -657,7 +657,7 @@ func TestColSegContainer_hasSegment(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
partition, err := node.container.addPartition(collection, collectionMeta.PartitionTags[0])
......
......@@ -26,7 +26,7 @@ func (c *Collection) Name() string {
}
func (c *Collection) ID() UniqueID {
return (*c.meta).Id
return (*c.meta).ID
}
func (c *Collection) Partitions() *[]*Partition {
......
......@@ -46,10 +46,10 @@ func TestCollection_Partitions(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -59,7 +59,7 @@ func TestCollection_Partitions(t *testing.T) {
var collection = node.container.addCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, len(node.container.collections), 1)
for _, tag := range collectionMeta.PartitionTags {
......@@ -102,10 +102,10 @@ func TestCollection_newCollection(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -114,7 +114,7 @@ func TestCollection_newCollection(t *testing.T) {
collection := newCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
}
func TestCollection_deleteCollection(t *testing.T) {
......@@ -148,10 +148,10 @@ func TestCollection_deleteCollection(t *testing.T) {
}
collectionMeta := etcdpb.CollectionMeta{
Id: UniqueID(0),
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIds: []UniqueID{0},
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
......@@ -160,7 +160,7 @@ func TestCollection_deleteCollection(t *testing.T) {
collection := newCollection(&collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.Id, UniqueID(0))
assert.Equal(t, collection.meta.ID, UniqueID(0))
deleteCollection(collection)
}
......@@ -49,15 +49,15 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
// 1. hash insertMessages to insertData
for _, task := range iMsg.insertMessages {
if len(task.RowIds) != len(task.Timestamps) || len(task.RowIds) != len(task.RowData) {
if len(task.RowIDs) != len(task.Timestamps) || len(task.RowIDs) != len(task.RowData) {
// TODO: what if the messages are misaligned?
// Here, we ignore those messages and print error
log.Println("Error, misaligned messages detected")
continue
}
insertData.insertIDs[task.SegmentId] = append(insertData.insertIDs[task.SegmentId], task.RowIds...)
insertData.insertTimestamps[task.SegmentId] = append(insertData.insertTimestamps[task.SegmentId], task.Timestamps...)
insertData.insertRecords[task.SegmentId] = append(insertData.insertRecords[task.SegmentId], task.RowData...)
insertData.insertIDs[task.SegmentID] = append(insertData.insertIDs[task.SegmentID], task.RowIDs...)
insertData.insertTimestamps[task.SegmentID] = append(insertData.insertTimestamps[task.SegmentID], task.Timestamps...)
insertData.insertRecords[task.SegmentID] = append(insertData.insertRecords[task.SegmentID], task.RowData...)
}
// 2. do preInsert
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -66,8 +66,8 @@ package reader
// Uid: int64(i),
// PartitionTag: "partition0",
// Timestamp: uint64(i + 1000),
// SegmentId: int64(i),
// ChannelId: 0,
// SegmentID: int64(i),
// ChannelID: 0,
// Op: msgPb.OpType_INSERT,
// ClientId: 0,
// ExtraParams: nil,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册