提交 2064b014 编写于 作者: B bigsheeper 提交者: yefu.chen

Add loadFieldData, use partition id instead of tag

Signed-off-by: Nbigsheeper <yihao.dai@zilliz.com>
上级 76a7684d
......@@ -21,6 +21,8 @@ enum SegmentType {
Sealed = 2,
};
typedef enum SegmentType SegmentType;
enum ErrorCode {
Success = 0,
UnexpectedException = 1,
......@@ -31,6 +33,12 @@ typedef struct CStatus {
const char* error_msg;
} CStatus;
typedef struct CLoadFieldDataInfo {
int64_t field_id;
void* blob;
int64_t row_count;
} CLoadFieldDataInfo;
#ifdef __cplusplus
}
#endif
此差异已折叠。
......@@ -21,8 +21,9 @@
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
////////////////////////////// common interfaces //////////////////////////////
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, int seg_type) {
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type) {
auto col = (milvus::segcore::Collection*)collection;
std::unique_ptr<milvus::segcore::SegmentInterface> segment;
......@@ -46,6 +47,7 @@ NewSegment(CCollection collection, uint64_t segment_id, int seg_type) {
void
DeleteSegment(CSegmentInterface segment) {
// TODO: use dynamic cast, and return c status
auto s = (milvus::segcore::SegmentGrowing*)segment;
std::cout << "delete segment " << std::endl;
......@@ -58,8 +60,86 @@ DeleteQueryResult(CQueryResult query_result) {
delete res;
}
//////////////////////////////////////////////////////////////////
CStatus
Search(CSegmentInterface c_segment,
CPlan c_plan,
CPlaceholderGroup* c_placeholder_groups,
uint64_t* timestamps,
int num_groups,
CQueryResult* result) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
std::vector<const milvus::query::PlaceholderGroup*> placeholder_groups;
for (int i = 0; i < num_groups; ++i) {
placeholder_groups.push_back((const milvus::query::PlaceholderGroup*)c_placeholder_groups[i]);
}
auto query_result = std::make_unique<milvus::QueryResult>();
auto status = CStatus();
try {
*query_result = segment->Search(plan, placeholder_groups.data(), timestamps, num_groups);
if (plan->plan_node_->query_info_.metric_type_ != "IP") {
for (auto& dis : query_result->result_distances_) {
dis *= -1;
}
}
status.error_code = Success;
status.error_msg = "";
} catch (std::exception& e) {
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
}
*result = query_result.release();
// result_ids and result_distances have been allocated memory in goLang,
// so we don't need to malloc here.
// memcpy(result_ids, query_result.result_ids_.data(), query_result.get_row_count() * sizeof(long int));
// memcpy(result_distances, query_result.result_distances_.data(), query_result.get_row_count() * sizeof(float));
return status;
}
CStatus
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult c_result) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
auto result = (milvus::QueryResult*)c_result;
auto status = CStatus();
try {
segment->FillTargetEntry(plan, *result);
status.error_code = Success;
status.error_msg = "";
} catch (std::runtime_error& e) {
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
}
return status;
}
int64_t
GetMemoryUsageInBytes(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto mem_size = segment->GetMemoryUsageInBytes();
return mem_size;
}
int64_t
GetRowCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto row_count = segment->get_row_count();
return row_count;
}
int64_t
GetDeletedCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto deleted_count = segment->get_deleted_count();
return deleted_count;
}
////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus
Insert(CSegmentInterface c_segment,
int64_t reserved_offset,
......@@ -123,70 +203,33 @@ Delete(CSegmentInterface c_segment,
int64_t
PreDelete(CSegmentInterface c_segment, int64_t size) {
// TODO: use dynamic cast, and return c status
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
return segment->PreDelete(size);
}
////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus
Search(CSegmentInterface c_segment,
CPlan c_plan,
CPlaceholderGroup* c_placeholder_groups,
uint64_t* timestamps,
int num_groups,
CQueryResult* result) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
std::vector<const milvus::query::PlaceholderGroup*> placeholder_groups;
for (int i = 0; i < num_groups; ++i) {
placeholder_groups.push_back((const milvus::query::PlaceholderGroup*)c_placeholder_groups[i]);
}
LoadFieldData(CSegmentInterface c_segment, CLoadFieldDataInfo load_field_data_info) {
auto segment = (milvus::segcore::SegmentSealed*)c_segment;
auto query_result = std::make_unique<milvus::QueryResult>();
auto status = CStatus();
try {
*query_result = segment->Search(plan, placeholder_groups.data(), timestamps, num_groups);
if (plan->plan_node_->query_info_.metric_type_ != "IP") {
for (auto& dis : query_result->result_distances_) {
dis *= -1;
}
}
auto load_info =
LoadFieldDataInfo{load_field_data_info.field_id, load_field_data_info.blob, load_field_data_info.row_count};
segment->LoadFieldData(load_info);
auto status = CStatus();
status.error_code = Success;
status.error_msg = "";
return status;
} catch (std::exception& e) {
auto status = CStatus();
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
return status;
}
*result = query_result.release();
// result_ids and result_distances have been allocated memory in goLang,
// so we don't need to malloc here.
// memcpy(result_ids, query_result.result_ids_.data(), query_result.get_row_count() * sizeof(long int));
// memcpy(result_distances, query_result.result_distances_.data(), query_result.get_row_count() * sizeof(float));
return status;
}
CStatus
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult c_result) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
auto result = (milvus::QueryResult*)c_result;
auto status = CStatus();
try {
segment->FillTargetEntry(plan, *result);
status.error_code = Success;
status.error_msg = "";
} catch (std::runtime_error& e) {
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
}
return status;
}
////////////////////////////// deprecated interfaces //////////////////////////////
CStatus
UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info) {
auto status = CStatus();
......@@ -203,7 +246,6 @@ UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info
return status;
}
}
//////////////////////////////////////////////////////////////////
int
Close(CSegmentInterface c_segment) {
......@@ -223,26 +265,3 @@ IsOpened(CSegmentInterface c_segment) {
auto status = segment->get_state();
return status == milvus::segcore::SegmentGrowing::SegmentState::Open;
}
int64_t
GetMemoryUsageInBytes(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto mem_size = segment->GetMemoryUsageInBytes();
return mem_size;
}
//////////////////////////////////////////////////////////////////
int64_t
GetRowCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto row_count = segment->get_row_count();
return row_count;
}
int64_t
GetDeletedCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto deleted_count = segment->get_deleted_count();
return deleted_count;
}
......@@ -26,8 +26,9 @@ extern "C" {
typedef void* CSegmentInterface;
typedef void* CQueryResult;
////////////////////////////// common interfaces //////////////////////////////
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, int seg_type);
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
void
DeleteSegment(CSegmentInterface segment);
......@@ -35,9 +36,27 @@ DeleteSegment(CSegmentInterface segment);
void
DeleteQueryResult(CQueryResult query_result);
//////////////////////////////////////////////////////////////////
CStatus
Search(CSegmentInterface c_segment,
CPlan plan,
CPlaceholderGroup* placeholder_groups,
uint64_t* timestamps,
int num_groups,
CQueryResult* result);
// interface for growing segment
CStatus
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult result);
int64_t
GetMemoryUsageInBytes(CSegmentInterface c_segment);
int64_t
GetRowCount(CSegmentInterface c_segment);
int64_t
GetDeletedCount(CSegmentInterface c_segment);
////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus
Insert(CSegmentInterface c_segment,
int64_t reserved_offset,
......@@ -64,23 +83,13 @@ Delete(CSegmentInterface c_segment,
int64_t
PreDelete(CSegmentInterface c_segment, int64_t size);
// common interface
////////////////////////////// interfaces for growing segment //////////////////////////////
CStatus
Search(CSegmentInterface c_segment,
CPlan plan,
CPlaceholderGroup* placeholder_groups,
uint64_t* timestamps,
int num_groups,
CQueryResult* result);
LoadFieldData(CSegmentInterface c_segment, CLoadFieldDataInfo load_field_data_info);
// common interface
CStatus
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult result);
// deprecated
////////////////////////////// deprecated interfaces //////////////////////////////
CStatus
UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info);
//////////////////////////////////////////////////////////////////
// deprecated
int
......@@ -94,20 +103,6 @@ BuildIndex(CCollection c_collection, CSegmentInterface c_segment);
bool
IsOpened(CSegmentInterface c_segment);
// common interface
int64_t
GetMemoryUsageInBytes(CSegmentInterface c_segment);
//////////////////////////////////////////////////////////////////
// common interface
int64_t
GetRowCount(CSegmentInterface c_segment);
// ???
int64_t
GetDeletedCount(CSegmentInterface c_segment);
#ifdef __cplusplus
}
#endif
......@@ -52,7 +52,7 @@ TEST(CApiTest, GetCollectionNameTest) {
TEST(CApiTest, SegmentTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
DeleteCollection(collection);
DeleteSegment(segment);
}
......@@ -60,7 +60,7 @@ TEST(CApiTest, SegmentTest) {
TEST(CApiTest, InsertTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
......@@ -95,7 +95,7 @@ TEST(CApiTest, InsertTest) {
TEST(CApiTest, DeleteTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
long delete_row_ids[] = {100000, 100001, 100002};
unsigned long delete_timestamps[] = {0, 0, 0};
......@@ -112,7 +112,7 @@ TEST(CApiTest, DeleteTest) {
TEST(CApiTest, SearchTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
......@@ -201,7 +201,7 @@ TEST(CApiTest, SearchTest) {
// TEST(CApiTest, BuildIndexTest) {
// auto schema_tmp_conf = "";
// auto collection = NewCollection(schema_tmp_conf);
// auto segment = NewSegment(collection, 0, 1);
// auto segment = NewSegment(collection, 0, Growing);
//
// std::vector<char> raw_data;
// std::vector<uint64_t> timestamps;
......@@ -285,7 +285,7 @@ TEST(CApiTest, SearchTest) {
TEST(CApiTest, IsOpenedTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto is_opened = IsOpened(segment);
assert(is_opened);
......@@ -297,7 +297,7 @@ TEST(CApiTest, IsOpenedTest) {
TEST(CApiTest, CloseTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto status = Close(segment);
assert(status == 0);
......@@ -309,7 +309,7 @@ TEST(CApiTest, CloseTest) {
TEST(CApiTest, GetMemoryUsageInBytesTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto old_memory_usage_size = GetMemoryUsageInBytes(segment);
std::cout << "old_memory_usage_size = " << old_memory_usage_size << std::endl;
......@@ -428,7 +428,7 @@ generate_index(
// TEST(CApiTest, TestSearchPreference) {
// auto schema_tmp_conf = "";
// auto collection = NewCollection(schema_tmp_conf);
// auto segment = NewSegment(collection, 0, 1);
// auto segment = NewSegment(collection, 0, Growing);
//
// auto beg = chrono::high_resolution_clock::now();
// auto next = beg;
......@@ -547,7 +547,7 @@ generate_index(
TEST(CApiTest, GetDeletedCountTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
long delete_row_ids[] = {100000, 100001, 100002};
unsigned long delete_timestamps[] = {0, 0, 0};
......@@ -568,7 +568,7 @@ TEST(CApiTest, GetDeletedCountTest) {
TEST(CApiTest, GetRowCountTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
int N = 10000;
auto [raw_data, timestamps, uids] = generate_data(N);
......@@ -592,7 +592,7 @@ TEST(CApiTest, GetRowCountTest) {
// "\u003e\ncreate_time: 1600416765\nsegment_ids: 6873737669791618215\npartition_tags: \"default\"\n";
//
// auto collection = NewCollection(schema_string.data());
// auto segment = NewSegment(collection, 0, 1);
// auto segment = NewSegment(collection, 0, Growing);
// DeleteCollection(collection);
// DeleteSegment(segment);
//}
......@@ -629,7 +629,7 @@ TEST(CApiTest, MergeInto) {
TEST(CApiTest, Reduce) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
......@@ -845,7 +845,7 @@ TEST(CApiTest, UpdateSegmentIndex_Without_Predicate) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
......@@ -970,7 +970,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Range) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
......@@ -1108,7 +1108,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Term) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
......@@ -1245,7 +1245,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Range) {
std::string schema_string = generate_collection_shema("JACCARD", "16", true);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
......@@ -1384,7 +1384,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Term) {
std::string schema_string = generate_collection_shema("JACCARD", "16", true);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0, 1);
auto segment = NewSegment(collection, 0, Growing);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
......
package masterservice
import (
"context"
"time"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"google.golang.org/grpc"
)
// grpc client
type GrpcClient struct {
grpcClient masterpb.MasterServiceClient
conn *grpc.ClientConn
//inner member
addr string
timeout time.Duration
retry int
}
func NewGrpcClient(addr string, timeout time.Duration) (*GrpcClient, error) {
return &GrpcClient{
grpcClient: nil,
conn: nil,
addr: addr,
timeout: timeout,
retry: 3,
}, nil
}
func (c *GrpcClient) Init(params *cms.InitParams) error {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
var err error
for i := 0; i < c.retry; i++ {
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock()); err == nil {
break
}
}
if err != nil {
return err
}
c.grpcClient = masterpb.NewMasterServiceClient(c.conn)
return nil
}
func (c *GrpcClient) Start() error {
return nil
}
func (c *GrpcClient) Stop() error {
return c.conn.Close()
}
//TODO, grpc, get service state from server
func (c *GrpcClient) GetServiceStates() (*internalpb2.ServiceStates, error) {
return nil, nil
}
//DDL request
func (c *GrpcClient) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return c.grpcClient.CreateCollection(context.Background(), in)
}
func (c *GrpcClient) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
return c.grpcClient.DropCollection(context.Background(), in)
}
func (c *GrpcClient) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
return c.grpcClient.HasCollection(context.Background(), in)
}
func (c *GrpcClient) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
return c.grpcClient.DescribeCollection(context.Background(), in)
}
func (c *GrpcClient) GetCollectionStatistics(in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
return c.grpcClient.GetCollectionStatistics(context.Background(), in)
}
func (c *GrpcClient) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
return c.grpcClient.ShowCollections(context.Background(), in)
}
func (c *GrpcClient) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
return c.grpcClient.CreatePartition(context.Background(), in)
}
func (c *GrpcClient) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
return c.grpcClient.DropPartition(context.Background(), in)
}
func (c *GrpcClient) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
return c.grpcClient.HasPartition(context.Background(), in)
}
func (c *GrpcClient) GetPartitionStatistics(in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
return c.grpcClient.GetPartitionStatistics(context.Background(), in)
}
func (c *GrpcClient) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
return c.grpcClient.ShowPartitions(context.Background(), in)
}
//index builder service
func (c *GrpcClient) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return c.grpcClient.CreateIndex(context.Background(), in)
}
func (c *GrpcClient) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return c.grpcClient.DescribeIndex(context.Background(), in)
}
//global timestamp allocator
func (c *GrpcClient) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
return c.grpcClient.AllocTimestamp(context.Background(), in)
}
func (c *GrpcClient) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
return c.grpcClient.AllocID(context.Background(), in)
}
//receiver time tick from proxy service, and put it into this channel
func (c *GrpcClient) GetTimeTickChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetTimeTickChannel(context.Background(), empty)
}
//receive ddl from rpc and time tick from proxy service, and put them into this channel
func (c *GrpcClient) GetDdChannel(in *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetDdChannel(context.Background(), in)
}
//just define a channel, not used currently
func (c *GrpcClient) GetStatisticsChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetStatisticsChannel(context.Background(), empty)
}
func (c *GrpcClient) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
return c.grpcClient.DescribeSegment(context.Background(), in)
}
func (c *GrpcClient) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
return c.grpcClient.ShowSegments(context.Background(), in)
}
package masterservice
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
func TestGrpcService(t *testing.T) {
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
cms.Params.Address = "127.0.0.1"
cms.Params.Port = (randVal % 100) + 10000
cms.Params.NodeID = 0
cms.Params.PulsarAddress = "pulsar://127.0.0.1:6650"
cms.Params.EtcdAddress = "127.0.0.1:2379"
cms.Params.MetaRootPath = fmt.Sprintf("/%d/test/meta", randVal)
cms.Params.KvRootPath = fmt.Sprintf("/%d/test/kv", randVal)
cms.Params.ProxyTimeTickChannel = fmt.Sprintf("proxyTimeTick%d", randVal)
cms.Params.MsgChannelSubName = fmt.Sprintf("msgChannel%d", randVal)
cms.Params.TimeTickChannel = fmt.Sprintf("timeTick%d", randVal)
cms.Params.DdChannel = fmt.Sprintf("ddChannel%d", randVal)
cms.Params.StatisticsChannel = fmt.Sprintf("stateChannel%d", randVal)
cms.Params.MaxPartitionNum = 64
cms.Params.DefaultPartitionTag = "_default"
t.Logf("master service port = %d", cms.Params.Port)
svr, err := NewGrpcServer()
assert.Nil(t, err)
core := svr.core.(*cms.Core)
core.ProxyTimeTickChan = make(chan typeutil.Timestamp, 8)
timeTickArray := make([]typeutil.Timestamp, 0, 16)
core.SendTimeTick = func(ts typeutil.Timestamp) error {
t.Logf("send time tick %d", ts)
timeTickArray = append(timeTickArray, ts)
return nil
}
createCollectionArray := make([]*cms.CreateCollectionReqTask, 0, 16)
core.DdCreateCollectionReq = func(req *cms.CreateCollectionReqTask) error {
t.Logf("Create Colllection %s", req.Req.CollectionName)
createCollectionArray = append(createCollectionArray, req)
return nil
}
dropCollectionArray := make([]*cms.DropCollectionReqTask, 0, 16)
core.DdDropCollectionReq = func(req *cms.DropCollectionReqTask) error {
t.Logf("Drop Collection %s", req.Req.CollectionName)
dropCollectionArray = append(dropCollectionArray, req)
return nil
}
createPartitionArray := make([]*cms.CreatePartitionReqTask, 0, 16)
core.DdCreatePartitionReq = func(req *cms.CreatePartitionReqTask) error {
t.Logf("Create Partition %s", req.Req.PartitionName)
createPartitionArray = append(createPartitionArray, req)
return nil
}
dropPartitionArray := make([]*cms.DropPartitionReqTask, 0, 16)
core.DdDropPartitionReq = func(req *cms.DropPartitionReqTask) error {
t.Logf("Drop Partition %s", req.Req.PartitionName)
dropPartitionArray = append(dropPartitionArray, req)
return nil
}
core.GetSegmentMeta = func(id typeutil.UniqueID) (*etcdpb.SegmentMeta, error) {
return &etcdpb.SegmentMeta{
SegmentID: 20,
CollectionID: 10,
PartitionTag: "_default",
ChannelStart: 50,
ChannelEnd: 100,
OpenTime: 1000,
CloseTime: 2000,
NumRows: 16,
MemSize: 1024,
BinlogFilePaths: []*etcdpb.FieldBinlogFiles{
{
FieldID: 101,
BinlogFiles: []string{"/test/binlog/file"},
},
},
}, nil
}
err = svr.Init(&cms.InitParams{ProxyTimeTickChannel: fmt.Sprintf("proxyTimeTick%d", randVal)})
assert.Nil(t, err)
err = svr.Start()
assert.Nil(t, err)
cli, err := NewGrpcClient(fmt.Sprintf("127.0.0.1:%d", cms.Params.Port), 3*time.Second)
assert.Nil(t, err)
err = cli.Init(&cms.InitParams{ProxyTimeTickChannel: fmt.Sprintf("proxyTimeTick%d", randVal)})
assert.Nil(t, err)
err = cli.Start()
assert.Nil(t, err)
t.Run("create collection", func(t *testing.T) {
schema := schemapb.CollectionSchema{
Name: "testColl",
Description: "testColl",
AutoID: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: 100,
Name: "vector",
IsPrimaryKey: false,
Description: "vector",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: nil,
IndexParams: nil,
},
},
}
sbf, err := proto.Marshal(&schema)
assert.Nil(t, err)
req := &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreateCollection,
MsgID: 100,
Timestamp: 100,
SourceID: 100,
},
DbName: "testDb",
CollectionName: "testColl",
Schema: sbf,
}
status, err := cli.CreateCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(createCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, createCollectionArray[0].Req.Base.MsgType, commonpb.MsgType_kCreateCollection)
assert.Equal(t, createCollectionArray[0].Req.CollectionName, "testColl")
})
t.Run("has collection", func(t *testing.T) {
req := &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 101,
Timestamp: 101,
SourceID: 101,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, true)
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 102,
Timestamp: 102,
SourceID: 102,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, false)
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 102,
Timestamp: 102,
SourceID: 102,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
})
t.Run("describe collection", func(t *testing.T) {
req := &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDescribeCollection,
MsgID: 103,
Timestamp: 103,
SourceID: 103,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.DescribeCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Schema.Name, "testColl")
})
t.Run("get collection statistics", func(t *testing.T) {
req := &milvuspb.CollectionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO,miss msg type
MsgID: 104,
Timestamp: 104,
SourceID: 104,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.GetCollectionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "0")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "0")
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
seg := &etcdpb.SegmentMeta{
SegmentID: 101,
CollectionID: collMeta.ID,
PartitionTag: cms.Params.DefaultPartitionTag,
}
err = core.MetaTable.AddSegment(seg)
assert.Nil(t, err)
req = &milvuspb.CollectionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO,miss msg type
MsgID: 105,
Timestamp: 105,
SourceID: 105,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err = cli.GetCollectionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "16")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "1024")
})
t.Run("show collection", func(t *testing.T) {
req := &milvuspb.ShowCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kShowCollections,
MsgID: 106,
Timestamp: 106,
SourceID: 106,
},
DbName: "testDb",
}
rsp, err := cli.ShowCollections(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.CollectionNames[0], "testColl")
assert.Equal(t, len(rsp.CollectionNames), 1)
})
t.Run("create partition", func(t *testing.T) {
req := &milvuspb.CreatePartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreatePartition,
MsgID: 107,
Timestamp: 107,
SourceID: 107,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
status, err := cli.CreatePartition(req)
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
assert.Equal(t, len(collMeta.PartitionIDs), 2)
assert.Equal(t, collMeta.PartitionTags[1], "testPartition")
})
t.Run("has partition", func(t *testing.T) {
req := &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasPartition,
MsgID: 108,
Timestamp: 108,
SourceID: 108,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
rsp, err := cli.HasPartition(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, true)
})
t.Run("get partition statistics", func(t *testing.T) {
req := &milvuspb.PartitionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO, msg type
MsgID: 109,
Timestamp: 109,
SourceID: 109,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: cms.Params.DefaultPartitionTag,
}
rsp, err := cli.GetPartitionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "16")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "1024")
})
t.Run("show partition", func(t *testing.T) {
req := &milvuspb.ShowPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kShowPartitions,
MsgID: 110,
Timestamp: 110,
SourceID: 110,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.ShowPartitions(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.PartitionNames), 2)
})
t.Run("drop partition", func(t *testing.T) {
req := &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropPartition,
MsgID: 199,
Timestamp: 199,
SourceID: 199,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
status, err := cli.DropPartition(req)
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
assert.Equal(t, len(collMeta.PartitionIDs), 1)
assert.Equal(t, collMeta.PartitionTags[0], cms.Params.DefaultPartitionTag)
})
t.Run("drop collection", func(t *testing.T) {
req := &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropCollection,
MsgID: 200,
Timestamp: 200,
SourceID: 200,
},
DbName: "testDb",
CollectionName: "testColl",
}
status, err := cli.DropCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(dropCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, dropCollectionArray[0].Req.Base.MsgType, commonpb.MsgType_kDropCollection)
assert.Equal(t, dropCollectionArray[0].Req.CollectionName, "testColl")
req = &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropCollection,
MsgID: 200,
Timestamp: 200,
SourceID: 200,
},
DbName: "testDb",
CollectionName: "testColl",
}
status, err = cli.DropCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(dropCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
})
err = cli.Stop()
assert.Nil(t, err)
err = svr.Stop()
assert.Nil(t, err)
}
package masterservice
import (
"context"
"fmt"
"net"
"sync"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"google.golang.org/grpc"
)
// grpc wrapper
type GrpcServer struct {
core cms.Interface
grpcServer *grpc.Server
grpcError error
grpcErrMux sync.Mutex
ctx context.Context
cancel context.CancelFunc
}
func NewGrpcServer() (*GrpcServer, error) {
s := &GrpcServer{}
var err error
s.ctx, s.cancel = context.WithCancel(context.Background())
if s.core, err = cms.NewCore(s.ctx); err != nil {
return nil, err
}
s.grpcServer = grpc.NewServer()
s.grpcError = nil
masterpb.RegisterMasterServiceServer(s.grpcServer, s)
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", cms.Params.Port))
if err != nil {
return nil, err
}
go func() {
if err := s.grpcServer.Serve(lis); err != nil {
s.grpcErrMux.Lock()
defer s.grpcErrMux.Unlock()
s.grpcError = err
}
}()
s.grpcErrMux.Lock()
err = s.grpcError
s.grpcErrMux.Unlock()
if err != nil {
return nil, err
}
return s, nil
}
func (s *GrpcServer) Init(params *cms.InitParams) error {
return s.core.Init(params)
}
func (s *GrpcServer) Start() error {
return s.core.Start()
}
func (s *GrpcServer) Stop() error {
err := s.core.Stop()
s.cancel()
s.grpcServer.GracefulStop()
return err
}
func (s *GrpcServer) GetServiceStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ServiceStates, error) {
return nil, nil
}
//DDL request
func (s *GrpcServer) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return s.core.CreateCollection(in)
}
func (s *GrpcServer) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
return s.core.DropCollection(in)
}
func (s *GrpcServer) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
return s.core.HasCollection(in)
}
func (s *GrpcServer) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
return s.core.DescribeCollection(in)
}
func (s *GrpcServer) GetCollectionStatistics(ctx context.Context, in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
return s.core.GetCollectionStatistics(in)
}
func (s *GrpcServer) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
return s.core.ShowCollections(in)
}
func (s *GrpcServer) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
return s.core.CreatePartition(in)
}
func (s *GrpcServer) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
return s.core.DropPartition(in)
}
func (s *GrpcServer) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
return s.core.HasPartition(in)
}
func (s *GrpcServer) GetPartitionStatistics(ctx context.Context, in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
return s.core.GetPartitionStatistics(in)
}
func (s *GrpcServer) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
return s.core.ShowPartitions(in)
}
//index builder service
func (s *GrpcServer) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return s.core.CreateIndex(in)
}
func (s *GrpcServer) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return s.core.DescribeIndex(in)
}
//global timestamp allocator
func (s *GrpcServer) AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
return s.core.AllocTimestamp(in)
}
func (s *GrpcServer) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
return s.core.AllocID(in)
}
//receiver time tick from proxy service, and put it into this channel
func (s *GrpcServer) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetTimeTickChannel(empty)
}
//receive ddl from rpc and time tick from proxy service, and put them into this channel
func (s *GrpcServer) GetDdChannel(ctx context.Context, in *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetDdChannel(in)
}
//just define a channel, not used currently
func (s *GrpcServer) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetStatisticsChannel(empty)
}
func (s *GrpcServer) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
return s.core.DescribeSegment(in)
}
func (s *GrpcServer) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
return s.core.ShowSegments(in)
}
//TODO, move to query node
func (s *GrpcServer) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
panic("implement me")
}
//TODO, move to data service
func (s *GrpcServer) AssignSegmentID(ctx context.Context, request *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
panic("implement me")
}
......@@ -495,3 +495,7 @@ func (s *Master) DescribeSegment(ctx context.Context, request *milvuspb.Describe
func (s *Master) ShowSegments(ctx context.Context, request *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
panic("implement me")
}
func (s *Master) GetDdChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
panic("implement me")
}
package masterservice
// system filed id:
// 0: unique row id
// 1: timestamp
// 100: first user field id
// 101: second user field id
// 102: ...
const (
StartOfUserFieldID = 100
RowIDField = 0
TimeStampField = 1
RowIDFieldName = "RowID"
TimeStampFieldName = "Timestamp"
)
package masterservice
import (
"log"
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
// Allocator is a Timestamp Oracle allocator.
type Allocator interface {
// Initialize is used to initialize a TSO allocator.
// It will synchronize TSO with etcd and initialize the
// memory for later allocation work.
Initialize() error
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
UpdateTSO() error
// SetTSO sets the physical part with given tso. It's mainly used for BR restore
// and can not forcibly set the TSO smaller than now.
SetTSO(tso uint64) error
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
GenerateTSO(count uint32) (uint64, error)
// Reset is used to reset the TSO allocator.
Reset()
}
// GlobalTSOAllocator is the global single point TSO allocator.
type GlobalTSOAllocator struct {
tso *timestampOracle
}
// NewGlobalTSOAllocator creates a new global TSO allocator.
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
var saveInterval = 3 * time.Second
return &GlobalTSOAllocator{
tso: &timestampOracle{
kvBase: kvBase,
saveInterval: saveInterval,
maxResetTSGap: func() time.Duration { return 3 * time.Second },
key: key,
},
}
}
// Initialize will initialize the created global TSO allocator.
func (gta *GlobalTSOAllocator) Initialize() error {
return gta.tso.InitTimestamp()
}
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
func (gta *GlobalTSOAllocator) UpdateTSO() error {
return gta.tso.UpdateTimestamp()
}
// SetTSO sets the physical part with given tso.
func (gta *GlobalTSOAllocator) SetTSO(tso uint64) error {
return gta.tso.ResetUserTimestamp(tso)
}
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
var physical, logical int64
if count == 0 {
return 0, errors.New("tso count should be positive")
}
maxRetryCount := 10
for i := 0; i < maxRetryCount; i++ {
current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO))
if current == nil || current.physical.Equal(typeutil.ZeroTime) {
// If it's leader, maybe SyncTimestamp hasn't completed yet
log.Println("sync hasn't completed yet, wait for a while")
time.Sleep(200 * time.Millisecond)
continue
}
physical = current.physical.UnixNano() / int64(time.Millisecond)
logical = atomic.AddInt64(&current.logical, int64(count))
if logical >= maxLogical {
log.Println("logical part outside of max logical interval, please check ntp time",
zap.Int("retry-count", i))
time.Sleep(UpdateTimestampStep)
continue
}
return tsoutil.ComposeTS(physical, logical), nil
}
return 0, errors.New("can not get timestamp")
}
func (gta *GlobalTSOAllocator) Alloc(count uint32) (typeutil.Timestamp, error) {
//return gta.tso.SyncTimestamp()
start, err := gta.GenerateTSO(count)
if err != nil {
return typeutil.ZeroTimestamp, err
}
//ret := make([]typeutil.Timestamp, count)
//for i:=uint32(0); i < count; i++{
// ret[i] = start + uint64(i)
//}
return start, err
}
func (gta *GlobalTSOAllocator) AllocOne() (typeutil.Timestamp, error) {
return gta.GenerateTSO(1)
}
// Reset is used to reset the TSO allocator.
func (gta *GlobalTSOAllocator) Reset() {
gta.tso.ResetTimestamp()
}
package masterservice
import (
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
// GlobalTSOAllocator is the global single point TSO allocator.
type GlobalIDAllocator struct {
allocator Allocator
}
func NewGlobalIDAllocator(key string, base kv.TxnBase) *GlobalIDAllocator {
return &GlobalIDAllocator{
allocator: NewGlobalTSOAllocator(key, base),
}
}
// Initialize will initialize the created global TSO allocator.
func (gia *GlobalIDAllocator) Initialize() error {
return gia.allocator.Initialize()
}
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
func (gia *GlobalIDAllocator) Alloc(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
timestamp, err := gia.allocator.GenerateTSO(count)
if err != nil {
return 0, 0, err
}
idStart := typeutil.UniqueID(timestamp)
idEnd := idStart + int64(count)
return idStart, idEnd, nil
}
func (gia *GlobalIDAllocator) AllocOne() (typeutil.UniqueID, error) {
timestamp, err := gia.allocator.GenerateTSO(1)
if err != nil {
return 0, err
}
idStart := typeutil.UniqueID(timestamp)
return idStart, nil
}
func (gia *GlobalIDAllocator) UpdateID() error {
return gia.allocator.UpdateTSO()
}
此差异已折叠。
此差异已折叠。
package masterservice
import (
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
)
var Params ParamTable
type ParamTable struct {
paramtable.BaseTable
Address string
Port int
NodeID uint64
PulsarAddress string
EtcdAddress string
MetaRootPath string
KvRootPath string
ProxyTimeTickChannel string
MsgChannelSubName string
TimeTickChannel string
DdChannel string
StatisticsChannel string
MaxPartitionNum int64
DefaultPartitionTag string
}
此差异已折叠。
此差异已折叠。
......@@ -18,6 +18,32 @@ message ProxyMeta {
repeated string result_channelIDs = 3;
}
message PartitionInfo {
string partition_name = 1;
int64 partitionID = 2;
repeated int64 segmentIDs = 3;
}
message CollectionInfo {
int64 ID = 1;
schema.CollectionSchema schema = 2;
uint64 create_time = 3;
repeated int64 partitionIDs = 4;
}
message IndexInfo {
string index_name = 1;
int64 indexID = 2;
repeated common.KeyValuePair index_params = 3;
}
message SegmentIndexInfo {
int64 segmentID = 1;
int64 fieldID = 2;
int64 indexID = 3;
int64 buildID = 4;
}
message CollectionMeta {
int64 ID=1;
schema.CollectionSchema schema=2;
......
......@@ -125,6 +125,8 @@ service MasterService {
rpc GetTimeTickChannel(common.Empty) returns (milvus.StringResponse) {}
rpc GetDdChannel(common.Empty) returns (milvus.StringResponse) {}
rpc GetStatisticsChannel(common.Empty) returns (milvus.StringResponse) {}
}
\ No newline at end of file
......@@ -119,7 +119,7 @@ message LoadSegmentRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
repeated int64 partitionIDs = 4;
int64 partitionID = 4;
repeated int64 segmentIDs = 5;
repeated int64 fieldIDs = 6;
}
......
......@@ -226,7 +226,7 @@ func TestCollectionReplica_addSegment(t *testing.T) {
const segmentNum = 3
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment(UniqueID(i), tag, collectionID)
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -246,7 +246,7 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment(UniqueID(i), tag, collectionID)
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -268,7 +268,7 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment(UniqueID(i), tag, collectionID)
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......@@ -288,7 +288,7 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
tag := "default"
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment(UniqueID(i), tag, collectionID)
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
assert.NoError(t, err)
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
......
......@@ -86,7 +86,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
log.Println(err)
continue
}
err = iNode.replica.addSegment(task.SegmentID, task.PartitionName, collection.ID())
err = iNode.replica.addSegment2(task.SegmentID, task.PartitionName, collection.ID(), segTypeGrowing)
if err != nil {
log.Println(err)
continue
......
......@@ -160,7 +160,7 @@ func (mService *metaService) processSegmentCreate(id string, value string) {
// TODO: what if seg == nil? We need to notify master and return rpc request failed
if seg != nil {
err := mService.replica.addSegment(seg.SegmentID, seg.PartitionTag, seg.CollectionID)
err := mService.replica.addSegment2(seg.SegmentID, seg.PartitionTag, seg.CollectionID, segTypeGrowing)
if err != nil {
log.Println(err)
return
......
......@@ -14,9 +14,14 @@ import "C"
type Partition struct {
partitionTag string
id UniqueID
segments []*Segment
}
func (p *Partition) ID() UniqueID {
return p.id
}
func (p *Partition) Tag() string {
return (*p).partitionTag
}
......
......@@ -20,7 +20,7 @@ func TestPartition_Segments(t *testing.T) {
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.replica.addSegment(UniqueID(i), targetPartition.partitionTag, collection.ID())
err := node.replica.addSegment2(UniqueID(i), targetPartition.partitionTag, collection.ID(), segTypeGrowing)
assert.NoError(t, err)
}
......
此差异已折叠。
......@@ -110,7 +110,7 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionName string, collecti
err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionTags[0])
assert.NoError(t, err)
err = node.replica.addSegment(segmentID, collectionMeta.PartitionTags[0], collectionID)
err = node.replica.addSegment2(segmentID, collectionMeta.PartitionTags[0], collectionID, segTypeGrowing)
assert.NoError(t, err)
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册