提交 4542ed38 编写于 作者: B bigsheeper 提交者: yefu.chen

Add unittest about timestamp, add client id

Signed-off-by: Nbigsheeper <yihao.dai@zilliz.com>
上级 960b79b6
...@@ -28,7 +28,7 @@ timesync: ...@@ -28,7 +28,7 @@ timesync:
storage: storage:
driver: TIKV driver: TIKV
address: localhost address: localhost
port: 2379 port: 0
accesskey: ab accesskey: ab
secretkey: dd secretkey: dd
...@@ -41,7 +41,7 @@ pulsar: ...@@ -41,7 +41,7 @@ pulsar:
reader: reader:
clientid: 1 clientid: 1
stopflag: -1 stopflag: -1
readerqueuesize: 10240 readerqueuesize: 1024
searchchansize: 10000 searchchansize: 10000
key2segchansize: 10000 key2segchansize: 10000
inserttopicstart: 0 inserttopicstart: 0
......
...@@ -615,9 +615,9 @@ SegmentNaive::GetMemoryUsageInBytes() { ...@@ -615,9 +615,9 @@ SegmentNaive::GetMemoryUsageInBytes() {
total_bytes += vec_ptr->IndexSize(); total_bytes += vec_ptr->IndexSize();
} }
} }
int64_t ins_n = (record_.reserved + DefaultElementPerChunk - 1) & ~(DefaultElementPerChunk - 1); int64_t ins_n = (record_.reserved + DefaultElementPerChunk - 1) & (DefaultElementPerChunk - 1);
total_bytes += ins_n * (schema_->get_total_sizeof() + 16 + 1); total_bytes += ins_n * (schema_->get_total_sizeof() + 16 + 1);
int64_t del_n = (deleted_record_.reserved + DefaultElementPerChunk - 1) & ~(DefaultElementPerChunk - 1); int64_t del_n = (deleted_record_.reserved + DefaultElementPerChunk - 1) & (DefaultElementPerChunk - 1);
total_bytes += del_n * (16 * 2); total_bytes += del_n * (16 * 2);
return total_bytes; return total_bytes;
} }
......
...@@ -289,9 +289,6 @@ TEST(CApiTest, GetMemoryUsageInBytesTest) { ...@@ -289,9 +289,6 @@ TEST(CApiTest, GetMemoryUsageInBytesTest) {
auto partition = NewPartition(collection, partition_name); auto partition = NewPartition(collection, partition_name);
auto segment = NewSegment(partition, 0); auto segment = NewSegment(partition, 0);
auto old_memory_usage_size = GetMemoryUsageInBytes(segment);
std::cout << "old_memory_usage_size = " << old_memory_usage_size << std::endl;
std::vector<char> raw_data; std::vector<char> raw_data;
std::vector<uint64_t> timestamps; std::vector<uint64_t> timestamps;
std::vector<int64_t> uids; std::vector<int64_t> uids;
...@@ -320,8 +317,6 @@ TEST(CApiTest, GetMemoryUsageInBytesTest) { ...@@ -320,8 +317,6 @@ TEST(CApiTest, GetMemoryUsageInBytesTest) {
auto memory_usage_size = GetMemoryUsageInBytes(segment); auto memory_usage_size = GetMemoryUsageInBytes(segment);
std::cout << "new_memory_usage_size = " << memory_usage_size << std::endl;
assert(memory_usage_size == 1898459); assert(memory_usage_size == 1898459);
DeleteCollection(collection); DeleteCollection(collection);
......
...@@ -178,6 +178,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request, ...@@ -178,6 +178,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num; auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num;
try { try {
mut_msg.set_segment_id(segment_id(request.collection_name(), channel_id, timestamp)); mut_msg.set_segment_id(segment_id(request.collection_name(), channel_id, timestamp));
printf("%ld \n", mut_msg.segment_id());
mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i)); mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i));
mut_msg.mutable_extra_params()->CopyFrom(request.extra_params()); mut_msg.mutable_extra_params()->CopyFrom(request.extra_params());
......
...@@ -104,7 +104,6 @@ func (node *QueryNode) processSegmentCreate(id string, value string) { ...@@ -104,7 +104,6 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
} }
printSegmentStruct(segment) printSegmentStruct(segment)
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) { //if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return // return
//} //}
...@@ -118,6 +117,7 @@ func (node *QueryNode) processSegmentCreate(id string, value string) { ...@@ -118,6 +117,7 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
newSegment := partition.NewSegment(newSegmentID) newSegment := partition.NewSegment(newSegmentID)
newSegment.SegmentStatus = SegmentOpened newSegment.SegmentStatus = SegmentOpened
newSegment.SegmentCloseTime = segment.CloseTimeStamp newSegment.SegmentCloseTime = segment.CloseTimeStamp
partition.OpenedSegments = append(partition.OpenedSegments, newSegment)
node.SegmentsMap[newSegmentID] = newSegment node.SegmentsMap[newSegmentID] = newSegment
} }
} }
...@@ -147,7 +147,6 @@ func (node *QueryNode) processSegmentModify(id string, value string) { ...@@ -147,7 +147,6 @@ func (node *QueryNode) processSegmentModify(id string, value string) {
} }
printSegmentStruct(segment) printSegmentStruct(segment)
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) { //if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return // return
//} //}
......
...@@ -16,7 +16,8 @@ import "C" ...@@ -16,7 +16,8 @@ import "C"
type Partition struct { type Partition struct {
PartitionPtr C.CPartition PartitionPtr C.CPartition
PartitionName string PartitionName string
Segments []*Segment OpenedSegments []*Segment
ClosedSegments []*Segment
} }
func (p *Partition) NewSegment(segmentId int64) *Segment { func (p *Partition) NewSegment(segmentId int64) *Segment {
...@@ -27,7 +28,7 @@ func (p *Partition) NewSegment(segmentId int64) *Segment { ...@@ -27,7 +28,7 @@ func (p *Partition) NewSegment(segmentId int64) *Segment {
segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentId)) segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentId))
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentId: segmentId} var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentId: segmentId}
p.Segments = append(p.Segments, newSegment) p.OpenedSegments = append(p.OpenedSegments, newSegment)
return newSegment return newSegment
} }
......
...@@ -248,11 +248,17 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) { ...@@ -248,11 +248,17 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
const CountMsgNum = 1000 * 1000 const CountMsgNum = 1000 * 1000
if Debug { if Debug {
var start = time.Now()
var printFlag = true var printFlag = true
var startTime = true
var start time.Time
for { for {
// Test insert time
if printFlag && node.msgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", node.msgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
var msgLen = node.PrepareBatchMsg() var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()} var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin) assert.NotEqual(nil, 0, timeRange.timestampMin)
...@@ -262,12 +268,6 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) { ...@@ -262,12 +268,6 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
continue continue
} }
if startTime {
fmt.Println("============> Start Test <============")
startTime = false
start = time.Now()
}
node.QueryNodeDataInit() node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange) node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done") //fmt.Println("MessagesPreprocess Done")
...@@ -277,13 +277,6 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) { ...@@ -277,13 +277,6 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
node.DoInsertAndDelete() node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done") //fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange) node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
// Test insert time
if printFlag && node.msgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", node.msgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
} }
} }
...@@ -589,7 +582,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status { ...@@ -589,7 +582,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
fmt.Println(err.Error()) fmt.Println(err.Error())
return msgPb.Status{ErrorCode: 1} return msgPb.Status{ErrorCode: 1}
} }
fmt.Println(res.ResultIds)
for i := 0; i < len(res.ResultIds); i++ { for i := 0; i < len(res.ResultIds); i++ {
resultsTmp = append(resultsTmp, SearchResultTmp{ResultId: res.ResultIds[i], ResultDistance: res.ResultDistances[i]}) resultsTmp = append(resultsTmp, SearchResultTmp{ResultId: res.ResultIds[i], ResultDistance: res.ResultDistances[i]})
} }
......
...@@ -3,7 +3,6 @@ package reader ...@@ -3,7 +3,6 @@ package reader
import ( import (
"context" "context"
"github.com/czs007/suvlim/reader/message_client" "github.com/czs007/suvlim/reader/message_client"
"log"
"sync" "sync"
) )
...@@ -16,17 +15,11 @@ func StartQueryNode(pulsarURL string) { ...@@ -16,17 +15,11 @@ func StartQueryNode(pulsarURL string) {
ctx := context.Background() ctx := context.Background()
// Segments Services // Segments Services
go qn.SegmentManagementService() //go qn.SegmentManagementService()
go qn.SegmentStatisticService() go qn.SegmentStatisticService()
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
err := qn.InitFromMeta() qn.InitFromMeta()
if err != nil {
log.Printf("Init query node from meta failed")
return
}
wg.Add(3) wg.Add(3)
go qn.RunMetaService(ctx, &wg) go qn.RunMetaService(ctx, &wg)
go qn.RunInsertDelete(&wg) go qn.RunInsertDelete(&wg)
......
...@@ -73,8 +73,6 @@ func (s *Segment) CloseSegment(collection* Collection) error { ...@@ -73,8 +73,6 @@ func (s *Segment) CloseSegment(collection* Collection) error {
int int
Close(CSegmentBase c_segment); Close(CSegmentBase c_segment);
*/ */
fmt.Println("Closing segment :", s.SegmentId)
var status = C.Close(s.SegmentPtr) var status = C.Close(s.SegmentPtr)
s.SegmentStatus = SegmentClosed s.SegmentStatus = SegmentClosed
...@@ -84,13 +82,11 @@ func (s *Segment) CloseSegment(collection* Collection) error { ...@@ -84,13 +82,11 @@ func (s *Segment) CloseSegment(collection* Collection) error {
// Build index after closing segment // Build index after closing segment
s.SegmentStatus = SegmentIndexing s.SegmentStatus = SegmentIndexing
fmt.Println("Building index...")
s.buildIndex(collection) s.buildIndex(collection)
// TODO: remove redundant segment indexed status // TODO: remove redundant segment indexed status
// Change segment status to indexed // Change segment status to indexed
s.SegmentStatus = SegmentIndexed s.SegmentStatus = SegmentIndexed
fmt.Println("Segment closed and indexed")
return nil return nil
} }
......
...@@ -13,19 +13,20 @@ func (node *QueryNode) SegmentsManagement() { ...@@ -13,19 +13,20 @@ func (node *QueryNode) SegmentsManagement() {
//node.queryNodeTimeSync.UpdateTSOTimeSync() //node.queryNodeTimeSync.UpdateTSOTimeSync()
//var timeNow = node.queryNodeTimeSync.TSOTimeSync //var timeNow = node.queryNodeTimeSync.TSOTimeSync
timeNow := node.messageClient.GetTimeNow() >> 18 timeNow := node.messageClient.GetTimeNow()
for _, collection := range node.Collections { for _, collection := range node.Collections {
for _, partition := range collection.Partitions { for _, partition := range collection.Partitions {
for _, segment := range partition.Segments { for _, oldSegment := range partition.OpenedSegments {
if segment.SegmentStatus != SegmentOpened { // TODO: check segment status
log.Println("Segment have been closed") if timeNow >= oldSegment.SegmentCloseTime {
continue // close old segment and move it into partition.ClosedSegments
} if oldSegment.SegmentStatus != SegmentOpened {
log.Println("Never reach here, Opened segment cannot be closed")
fmt.Println("timeNow = ", timeNow, "SegmentCloseTime = ", segment.SegmentCloseTime) continue
if timeNow >= segment.SegmentCloseTime { }
go segment.CloseSegment(collection) go oldSegment.CloseSegment(collection)
partition.ClosedSegments = append(partition.ClosedSegments, oldSegment)
} }
} }
} }
...@@ -33,7 +34,7 @@ func (node *QueryNode) SegmentsManagement() { ...@@ -33,7 +34,7 @@ func (node *QueryNode) SegmentsManagement() {
} }
func (node *QueryNode) SegmentManagementService() { func (node *QueryNode) SegmentManagementService() {
sleepMillisecondTime := 3000 sleepMillisecondTime := 1000
fmt.Println("do segments management in ", strconv.Itoa(sleepMillisecondTime), "ms") fmt.Println("do segments management in ", strconv.Itoa(sleepMillisecondTime), "ms")
for { for {
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond) time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
...@@ -80,8 +81,6 @@ func (node *QueryNode) SegmentStatistic(sleepMillisecondTime int) { ...@@ -80,8 +81,6 @@ func (node *QueryNode) SegmentStatistic(sleepMillisecondTime int) {
statisticData = append(statisticData, stat) statisticData = append(statisticData, stat)
} }
fmt.Println("Publish segment statistic")
fmt.Println(statisticData)
var status = node.PublicStatistic(&statisticData) var status = node.PublicStatistic(&statisticData)
if status.ErrorCode != msgPb.ErrorCode_SUCCESS { if status.ErrorCode != msgPb.ErrorCode_SUCCESS {
log.Printf("Publish segments statistic failed") log.Printf("Publish segments statistic failed")
...@@ -89,7 +88,7 @@ func (node *QueryNode) SegmentStatistic(sleepMillisecondTime int) { ...@@ -89,7 +88,7 @@ func (node *QueryNode) SegmentStatistic(sleepMillisecondTime int) {
} }
func (node *QueryNode) SegmentStatisticService() { func (node *QueryNode) SegmentStatisticService() {
sleepMillisecondTime := 3000 sleepMillisecondTime := 1000
fmt.Println("do segments statistic in ", strconv.Itoa(sleepMillisecondTime), "ms") fmt.Println("do segments statistic in ", strconv.Itoa(sleepMillisecondTime), "ms")
for { for {
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond) time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "interface/ConnectionImpl.h" #include "interface/ConnectionImpl.h"
#include "utils/TimeRecorder.h" #include "utils/TimeRecorder.h"
const int N = 200000; const int N = 100;
const int DIM = 16; const int DIM = 16;
const int LOOP = 10; const int LOOP = 10;
......
...@@ -3,7 +3,6 @@ package tikv_driver ...@@ -3,7 +3,6 @@ package tikv_driver
import ( import (
"context" "context"
"errors" "errors"
"github.com/czs007/suvlim/conf"
. "github.com/czs007/suvlim/storage/internal/tikv/codec" . "github.com/czs007/suvlim/storage/internal/tikv/codec"
. "github.com/czs007/suvlim/storage/pkg/types" . "github.com/czs007/suvlim/storage/pkg/types"
"github.com/tikv/client-go/config" "github.com/tikv/client-go/config"
...@@ -87,8 +86,7 @@ type TikvStore struct { ...@@ -87,8 +86,7 @@ type TikvStore struct {
} }
func NewTikvStore(ctx context.Context) (*TikvStore, error) { func NewTikvStore(ctx context.Context) (*TikvStore, error) {
var pdAddress0 = conf.Config.Storage.Address + ":" + strconv.FormatInt(int64(conf.Config.Storage.Port), 10) pdAddrs := []string{"127.0.0.1:2379"}
pdAddrs := []string{pdAddress0}
conf := config.Default() conf := config.Default()
client, err := rawkv.NewClient(ctx, pdAddrs, conf) client, err := rawkv.NewClient(ctx, pdAddrs, conf)
if err != nil { if err != nil {
......
...@@ -32,48 +32,35 @@ func main() { ...@@ -32,48 +32,35 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
msgCounter := write_node.MsgCounter{
InsertCounter: 0,
DeleteCounter: 0,
}
wn := write_node.WriteNode{ wn := write_node.WriteNode{
KvStore: &kv, KvStore: &kv,
MessageClient: &mc, MessageClient: &mc,
TimeSync: 100, TimeSync: 100,
MsgCounter: &msgCounter,
} }
const Debug = true const Debug = true
const CountMsgNum = 1000 * 1000 const CountMsgNum = 1000 * 1000
if Debug { if Debug {
var start = time.Now()
var printFlag = true var printFlag = true
var startTime = true
var start time.Time
for { for {
// Test insert time
if printFlag && wn.MsgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", wn.MsgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
if ctx.Err() != nil { if ctx.Err() != nil {
break break
} }
msgLength := wn.MessageClient.PrepareBatchMsg() msgLength := wn.MessageClient.PrepareBatchMsg()
if msgLength > 0 { if msgLength > 0 {
if startTime {
fmt.Println("============> Start Test <============")
startTime = false
start = time.Now()
}
wn.DoWriteNode(ctx, &wg) wn.DoWriteNode(ctx, &wg)
fmt.Println("write node do a batch message, storage len: ", msgLength) fmt.Println("write node do a batch message, storage len: ", msgLength)
} }
// Test insert time
if printFlag && wn.MsgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", wn.MsgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册