提交 e89a1ed2 编写于 作者: H Heisenberg

Merge branch 'branch-0.4.0' into add_unitest


Former-commit-id: 15082114afeadde31e3c10a80f78ae3b57753cc0
......@@ -114,6 +114,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-531 - Disable next version code
- MS-533 - Update resource_test to cover dump function
- MS-523 - Config file validation
- MS-539 - Remove old task code
## New Feature
- MS-343 - Implement ResourceMgr
......
......@@ -35,7 +35,6 @@ cache_config:
gpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
gpu_ids: # gpu id
- 0
- 1
engine_config:
use_blas_threshold: 20
......
......@@ -14,11 +14,23 @@ namespace knowhere {
KnowhereException::KnowhereException(const std::string &msg):msg(msg) {}
KnowhereException::KnowhereException(const std::string &m, const char *funcName, const char *file, int line) {
#ifdef DEBUG
int size = snprintf(nullptr, 0, "Error in %s at %s:%d: %s",
funcName, file, line, m.c_str());
msg.resize(size + 1);
snprintf(&msg[0], msg.size(), "Error in %s at %s:%d: %s",
funcName, file, line, m.c_str());
#else
std::string file_path(file);
auto const pos = file_path.find_last_of('/');
auto filename = file_path.substr(pos+1).c_str();
int size = snprintf(nullptr, 0, "Error in %s at %s:%d: %s",
funcName, filename, line, m.c_str());
msg.resize(size + 1);
snprintf(&msg[0], msg.size(), "Error in %s at %s:%d: %s",
funcName, filename, line, m.c_str());
#endif
}
const char *KnowhereException::what() const noexcept {
......
......@@ -53,7 +53,7 @@ class MySQLMetaImpl : public Meta {
Status UpdateTableIndex(const std::string &table_id, const TableIndex& index) override;
Status UpdateTableFlag(const std::string &table_id, int64_t flag);
Status UpdateTableFlag(const std::string &table_id, int64_t flag) override;
Status DescribeTableIndex(const std::string &table_id, TableIndex& index) override;
......
......@@ -89,7 +89,6 @@ TaskScheduler::TaskDispatchWorker() {
return true;
}
#ifdef NEW_SCHEDULER
// TODO: Put task into Disk-TaskTable
auto task = TaskConvert(task_ptr);
auto disk_list = ResMgrInst::GetInstance()->GetDiskResources();
......@@ -98,16 +97,7 @@ TaskScheduler::TaskDispatchWorker() {
disk->task_table().Put(task);
}
}
#else
//execute task
ScheduleTaskPtr next_task = task_ptr->Execute();
if(next_task != nullptr) {
task_queue_.Put(next_task);
}
#endif
}
return true;
}
bool
......@@ -126,8 +116,6 @@ TaskScheduler::TaskWorker() {
task_queue_.Put(next_task);
}
}
return true;
}
}
......
......@@ -17,11 +17,6 @@ DeleteTask::DeleteTask(const DeleteContextPtr& context)
}
std::shared_ptr<IScheduleTask> DeleteTask::Execute() {
if(context_ != nullptr && context_->meta() != nullptr) {
context_->meta()->DeleteTableFiles(context_->table_id());
}
return nullptr;
}
......
......@@ -15,82 +15,13 @@ namespace zilliz {
namespace milvus {
namespace engine {
namespace {
void CollectFileMetrics(int file_type, size_t file_size) {
switch(file_type) {
case meta::TableFileSchema::RAW:
case meta::TableFileSchema::TO_INDEX: {
server::Metrics::GetInstance().RawFileSizeHistogramObserve(file_size);
server::Metrics::GetInstance().RawFileSizeTotalIncrement(file_size);
server::Metrics::GetInstance().RawFileSizeGaugeSet(file_size);
break;
}
default: {
server::Metrics::GetInstance().IndexFileSizeHistogramObserve(file_size);
server::Metrics::GetInstance().IndexFileSizeTotalIncrement(file_size);
server::Metrics::GetInstance().IndexFileSizeGaugeSet(file_size);
break;
}
}
}
}
IndexLoadTask::IndexLoadTask()
: IScheduleTask(ScheduleTaskType::kIndexLoad) {
}
std::shared_ptr<IScheduleTask> IndexLoadTask::Execute() {
server::TimeRecorder rc("");
//step 1: load index
ExecutionEnginePtr index_ptr = EngineFactory::Build(file_->dimension_,
file_->location_,
(EngineType)file_->engine_type_,
(MetricType)file_->metric_type_,
file_->nlist_);
try {
auto stat = index_ptr->Load();
if(!stat.ok()) {
//typical error: file not available
ENGINE_LOG_ERROR << "Failed to load index file: file not available";
for(auto& context : search_contexts_) {
context->IndexSearchDone(file_->id_);//mark as done avoid dead lock, even failed
}
return nullptr;
}
} catch (std::exception& ex) {
//typical error: out of disk space or permition denied
std::string msg = "Failed to load index file: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
for(auto& context : search_contexts_) {
context->IndexSearchDone(file_->id_);//mark as done avoid dead lock, even failed
}
return nullptr;
}
size_t file_size = index_ptr->PhysicalSize();
std::string info = "Load file id:" + std::to_string(file_->id_) + " file type:" + std::to_string(file_->file_type_)
+ " size:" + std::to_string(file_size) + " bytes from location: " + file_->location_ + " totally cost";
double span = rc.ElapseFromBegin(info);
for(auto& context : search_contexts_) {
context->AccumLoadCost(span);
}
CollectFileMetrics(file_->file_type_, file_size);
//step 2: return search task for later execution
SearchTaskPtr task_ptr = std::make_shared<SearchTask>();
task_ptr->index_id_ = file_->id_;
task_ptr->file_type_ = file_->file_type_;
task_ptr->index_engine_ = index_ptr;
task_ptr->search_contexts_.swap(search_contexts_);
return std::static_pointer_cast<IScheduleTask>(task_ptr);
return nullptr;
}
}
......
......@@ -14,259 +14,12 @@ namespace zilliz {
namespace milvus {
namespace engine {
namespace {
static constexpr size_t PARALLEL_REDUCE_THRESHOLD = 1000000;
static constexpr size_t PARALLEL_REDUCE_BATCH = 1000;
bool NeedParallelReduce(uint64_t nq, uint64_t topk) {
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode& db_config = config.GetConfig(server::CONFIG_DB);
bool need_parallel = db_config.GetBoolValue(server::CONFIG_DB_PARALLEL_REDUCE, false);
if(!need_parallel) {
return false;
}
return nq*topk >= PARALLEL_REDUCE_THRESHOLD;
}
void ParallelReduce(std::function<void(size_t, size_t)>& reduce_function, size_t max_index) {
size_t reduce_batch = PARALLEL_REDUCE_BATCH;
auto thread_count = std::thread::hardware_concurrency() - 1; //not all core do this work
if(thread_count > 0) {
reduce_batch = max_index/thread_count + 1;
}
ENGINE_LOG_DEBUG << "use " << thread_count <<
" thread parallelly do reduce, each thread process " << reduce_batch << " vectors";
std::vector<std::shared_ptr<std::thread> > thread_array;
size_t from_index = 0;
while(from_index < max_index) {
size_t to_index = from_index + reduce_batch;
if(to_index > max_index) {
to_index = max_index;
}
auto reduce_thread = std::make_shared<std::thread>(reduce_function, from_index, to_index);
thread_array.push_back(reduce_thread);
from_index = to_index;
}
for(auto& thread_ptr : thread_array) {
thread_ptr->join();
}
}
}
SearchTask::SearchTask()
: IScheduleTask(ScheduleTaskType::kSearch) {
}
std::shared_ptr<IScheduleTask> SearchTask::Execute() {
if(index_engine_ == nullptr) {
return nullptr;
}
ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_<< " with "
<< search_contexts_.size() << " tasks";
server::TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
server::CollectSearchTaskMetrics metrics(file_type_);
bool metric_l2 = (index_engine_->IndexMetricType() == MetricType::L2);
std::vector<long> output_ids;
std::vector<float> output_distence;
for(auto& context : search_contexts_) {
//step 1: allocate memory
auto inner_k = context->topk();
auto nprobe = context->nprobe();
output_ids.resize(inner_k*context->nq());
output_distence.resize(inner_k*context->nq());
try {
//step 2: search
index_engine_->Search(context->nq(), context->vectors(), inner_k, nprobe, output_distence.data(),
output_ids.data());
double span = rc.RecordSection("do search for context:" + context->Identity());
context->AccumSearchCost(span);
//step 3: cluster result
SearchContext::ResultSet result_set;
auto spec_k = index_engine_->Count() < context->topk() ? index_engine_->Count() : context->topk();
SearchTask::ClusterResult(output_ids, output_distence, context->nq(), spec_k, result_set);
span = rc.RecordSection("cluster result for context:" + context->Identity());
context->AccumReduceCost(span);
//step 4: pick up topk result
SearchTask::TopkResult(result_set, inner_k, metric_l2, context->GetResult());
span = rc.RecordSection("reduce topk for context:" + context->Identity());
context->AccumReduceCost(span);
} catch (std::exception& ex) {
ENGINE_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
context->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
continue;
}
//step 5: notify to send result to client
context->IndexSearchDone(index_id_);
}
rc.ElapseFromBegin("totally cost");
return nullptr;
}
Status SearchTask::ClusterResult(const std::vector<long> &output_ids,
const std::vector<float> &output_distence,
uint64_t nq,
uint64_t topk,
SearchContext::ResultSet &result_set) {
if(output_ids.size() < nq*topk || output_distence.size() < nq*topk) {
std::string msg = "Invalid id array size: " + std::to_string(output_ids.size()) +
" distance array size: " + std::to_string(output_distence.size());
ENGINE_LOG_ERROR << msg;
return Status(DB_ERROR, msg);
}
result_set.clear();
result_set.resize(nq);
std::function<void(size_t, size_t)> reduce_worker = [&](size_t from_index, size_t to_index) {
for (auto i = from_index; i < to_index; i++) {
SearchContext::Id2DistanceMap id_distance;
id_distance.reserve(topk);
for (auto k = 0; k < topk; k++) {
uint64_t index = i * topk + k;
if(output_ids[index] < 0) {
continue;
}
id_distance.push_back(std::make_pair(output_ids[index], output_distence[index]));
}
result_set[i] = id_distance;
}
};
if(NeedParallelReduce(nq, topk)) {
ParallelReduce(reduce_worker, nq);
} else {
reduce_worker(0, nq);
}
return Status::OK();
}
Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
SearchContext::Id2DistanceMap &distance_target,
uint64_t topk,
bool ascending) {
//Note: the score_src and score_target are already arranged by score in ascending order
if(distance_src.empty()) {
ENGINE_LOG_WARNING << "Empty distance source array";
return Status::OK();
}
if(distance_target.empty()) {
distance_target.swap(distance_src);
return Status::OK();
}
size_t src_count = distance_src.size();
size_t target_count = distance_target.size();
SearchContext::Id2DistanceMap distance_merged;
distance_merged.reserve(topk);
size_t src_index = 0, target_index = 0;
while(true) {
//all score_src items are merged, if score_merged.size() still less than topk
//move items from score_target to score_merged until score_merged.size() equal topk
if(src_index >= src_count) {
for(size_t i = target_index; i < target_count && distance_merged.size() < topk; ++i) {
distance_merged.push_back(distance_target[i]);
}
break;
}
//all score_target items are merged, if score_merged.size() still less than topk
//move items from score_src to score_merged until score_merged.size() equal topk
if(target_index >= target_count) {
for(size_t i = src_index; i < src_count && distance_merged.size() < topk; ++i) {
distance_merged.push_back(distance_src[i]);
}
break;
}
//compare score,
// if ascending = true, put smallest score to score_merged one by one
// else, put largest score to score_merged one by one
auto& src_pair = distance_src[src_index];
auto& target_pair = distance_target[target_index];
if(ascending){
if(src_pair.second > target_pair.second) {
distance_merged.push_back(target_pair);
target_index++;
} else {
distance_merged.push_back(src_pair);
src_index++;
}
} else {
if(src_pair.second < target_pair.second) {
distance_merged.push_back(target_pair);
target_index++;
} else {
distance_merged.push_back(src_pair);
src_index++;
}
}
//score_merged.size() already equal topk
if(distance_merged.size() >= topk) {
break;
}
}
distance_target.swap(distance_merged);
return Status::OK();
}
Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
uint64_t topk,
bool ascending,
SearchContext::ResultSet &result_target) {
if (result_target.empty()) {
result_target.swap(result_src);
return Status::OK();
}
if (result_src.size() != result_target.size()) {
std::string msg = "Invalid result set size";
ENGINE_LOG_ERROR << msg;
return Status(DB_ERROR, msg);
}
std::function<void(size_t, size_t)> ReduceWorker = [&](size_t from_index, size_t to_index) {
for (size_t i = from_index; i < to_index; i++) {
SearchContext::Id2DistanceMap &score_src = result_src[i];
SearchContext::Id2DistanceMap &score_target = result_target[i];
SearchTask::MergeResult(score_src, score_target, topk, ascending);
}
};
if(NeedParallelReduce(result_src.size(), topk)) {
ParallelReduce(ReduceWorker, result_src.size());
} else {
ReduceWorker(0, result_src.size());
}
return Status::OK();
return nullptr;
}
}
......
......@@ -19,22 +19,6 @@ public:
virtual std::shared_ptr<IScheduleTask> Execute() override;
static Status ClusterResult(const std::vector<long> &output_ids,
const std::vector<float> &output_distence,
uint64_t nq,
uint64_t topk,
SearchContext::ResultSet &result_set);
static Status MergeResult(SearchContext::Id2DistanceMap &distance_src,
SearchContext::Id2DistanceMap &distance_target,
uint64_t topk,
bool ascending);
static Status TopkResult(SearchContext::ResultSet &result_src,
uint64_t topk,
bool ascending,
SearchContext::ResultSet &result_target);
public:
size_t index_id_ = 0;
int file_type_ = 0; //for metrics
......
......@@ -55,9 +55,6 @@ void BuildVectors(int64_t n, std::vector<float> &vectors) {
}
TEST_F(MemManagerTest, VECTOR_SOURCE_TEST) {
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
engine::meta::TableSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
ASSERT_TRUE(status.ok());
......@@ -96,16 +93,10 @@ TEST_F(MemManagerTest, VECTOR_SOURCE_TEST) {
vector_ids = source.GetVectorIds();
ASSERT_EQ(vector_ids.size(), 100);
status = impl_->DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
auto options = engine::OptionsFactory::Build();
auto options = GetOptions();
engine::meta::TableSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
......@@ -143,15 +134,10 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
ASSERT_EQ(vector_ids.size(), n_max - n_100);
ASSERT_TRUE(mem_table_file.IsFull());
status = impl_->DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MemManagerTest, MEM_TABLE_TEST) {
std::shared_ptr<engine::meta::SqliteMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
auto options = engine::OptionsFactory::Build();
auto options = GetOptions();
engine::meta::TableSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
......@@ -211,9 +197,6 @@ TEST_F(MemManagerTest, MEM_TABLE_TEST) {
status = mem_table.Serialize();
ASSERT_TRUE(status.ok());
status = impl_->DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) {
......
......@@ -324,6 +324,12 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
status = impl_->DeleteTableFiles(table_id);
ASSERT_TRUE(status.ok());
status = impl_->CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::NEW;
status = impl_->UpdateTableFile(table_file);
status = impl_->CleanUp();
ASSERT_TRUE(status.ok());
status = impl_->DeleteTable(table_id);
ASSERT_TRUE(status.ok());
......
......@@ -128,6 +128,14 @@ TEST(DBMiscTest, UTILS_TEST) {
ASSERT_TRUE(boost::filesystem::exists(path));
}
options.slave_paths.push_back("/");
status = engine::utils::CreateTablePath(options, TABLE_NAME);
ASSERT_FALSE(status.ok());
options.path = "/";
status = engine::utils::CreateTablePath(options, TABLE_NAME);
ASSERT_FALSE(status.ok());
engine::meta::TableFileSchema file;
file.id_ = 50;
file.table_id_ = TABLE_NAME;
......@@ -142,6 +150,4 @@ TEST(DBMiscTest, UTILS_TEST) {
status = engine::utils::DeleteTableFilePath(options, file);
ASSERT_TRUE(status.ok());
}
\ No newline at end of file
......@@ -351,7 +351,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
status = impl_->DeleteTable(table_id);
ASSERT_TRUE(status.ok());
status = impl_->CleanUpFilesWithTTL(1UL);
status = impl_->CleanUpFilesWithTTL(0UL);
ASSERT_TRUE(status.ok());
}
......
......@@ -10,6 +10,8 @@
#include <gtest/gtest.h>
#include <cmath>
#include <vector>
#include <src/scheduler/task/SearchTask.h>
using namespace zilliz::milvus;
......@@ -114,23 +116,23 @@ TEST(DBSearchTest, TOPK_TEST) {
std::vector<long> target_ids;
std::vector<float> target_distence;
engine::SearchContext::ResultSet src_result;
auto status = engine::SearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result);
auto status = engine::XSearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result);
ASSERT_FALSE(status.ok());
ASSERT_TRUE(src_result.empty());
BuildResult(NQ, TOP_K, ascending, target_ids, target_distence);
status = engine::SearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result);
status = engine::XSearchTask::ClusterResult(target_ids, target_distence, NQ, TOP_K, src_result);
ASSERT_TRUE(status.ok());
ASSERT_EQ(src_result.size(), NQ);
engine::SearchContext::ResultSet target_result;
status = engine::SearchTask::TopkResult(target_result, TOP_K, ascending, target_result);
status = engine::XSearchTask::TopkResult(target_result, TOP_K, ascending, target_result);
ASSERT_TRUE(status.ok());
status = engine::SearchTask::TopkResult(target_result, TOP_K, ascending, src_result);
status = engine::XSearchTask::TopkResult(target_result, TOP_K, ascending, src_result);
ASSERT_FALSE(status.ok());
status = engine::SearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
status = engine::XSearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
ASSERT_TRUE(status.ok());
ASSERT_TRUE(src_result.empty());
ASSERT_EQ(target_result.size(), NQ);
......@@ -140,10 +142,10 @@ TEST(DBSearchTest, TOPK_TEST) {
uint64_t wrong_topk = TOP_K - 10;
BuildResult(NQ, wrong_topk, ascending, src_ids, src_distence);
status = engine::SearchTask::ClusterResult(src_ids, src_distence, NQ, wrong_topk, src_result);
status = engine::XSearchTask::ClusterResult(src_ids, src_distence, NQ, wrong_topk, src_result);
ASSERT_TRUE(status.ok());
status = engine::SearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
status = engine::XSearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
ASSERT_TRUE(status.ok());
for(uint64_t i = 0; i < NQ; i++) {
ASSERT_EQ(target_result[i].size(), TOP_K);
......@@ -152,7 +154,7 @@ TEST(DBSearchTest, TOPK_TEST) {
wrong_topk = TOP_K + 10;
BuildResult(NQ, wrong_topk, ascending, src_ids, src_distence);
status = engine::SearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
status = engine::XSearchTask::TopkResult(src_result, TOP_K, ascending, target_result);
ASSERT_TRUE(status.ok());
for(uint64_t i = 0; i < NQ; i++) {
ASSERT_EQ(target_result[i].size(), TOP_K);
......@@ -170,15 +172,15 @@ TEST(DBSearchTest, MERGE_TEST) {
uint64_t src_count = 5, target_count = 8;
BuildResult(1, src_count, ascending, src_ids, src_distence);
BuildResult(1, target_count, ascending, target_ids, target_distence);
auto status = engine::SearchTask::ClusterResult(src_ids, src_distence, 1, src_count, src_result);
auto status = engine::XSearchTask::ClusterResult(src_ids, src_distence, 1, src_count, src_result);
ASSERT_TRUE(status.ok());
status = engine::SearchTask::ClusterResult(target_ids, target_distence, 1, target_count, target_result);
status = engine::XSearchTask::ClusterResult(target_ids, target_distence, 1, target_count, target_result);
ASSERT_TRUE(status.ok());
{
engine::SearchContext::Id2DistanceMap src = src_result[0];
engine::SearchContext::Id2DistanceMap target = target_result[0];
status = engine::SearchTask::MergeResult(src, target, 10, ascending);
status = engine::XSearchTask::MergeResult(src, target, 10, ascending);
ASSERT_TRUE(status.ok());
ASSERT_EQ(target.size(), 10);
CheckResult(src_result[0], target_result[0], target, ascending);
......@@ -187,7 +189,7 @@ TEST(DBSearchTest, MERGE_TEST) {
{
engine::SearchContext::Id2DistanceMap src = src_result[0];
engine::SearchContext::Id2DistanceMap target;
status = engine::SearchTask::MergeResult(src, target, 10, ascending);
status = engine::XSearchTask::MergeResult(src, target, 10, ascending);
ASSERT_TRUE(status.ok());
ASSERT_EQ(target.size(), src_count);
ASSERT_TRUE(src.empty());
......@@ -197,7 +199,7 @@ TEST(DBSearchTest, MERGE_TEST) {
{
engine::SearchContext::Id2DistanceMap src = src_result[0];
engine::SearchContext::Id2DistanceMap target = target_result[0];
status = engine::SearchTask::MergeResult(src, target, 30, ascending);
status = engine::XSearchTask::MergeResult(src, target, 30, ascending);
ASSERT_TRUE(status.ok());
ASSERT_EQ(target.size(), src_count + target_count);
CheckResult(src_result[0], target_result[0], target, ascending);
......@@ -206,7 +208,7 @@ TEST(DBSearchTest, MERGE_TEST) {
{
engine::SearchContext::Id2DistanceMap target = src_result[0];
engine::SearchContext::Id2DistanceMap src = target_result[0];
status = engine::SearchTask::MergeResult(src, target, 30, ascending);
status = engine::XSearchTask::MergeResult(src, target, 30, ascending);
ASSERT_TRUE(status.ok());
ASSERT_EQ(target.size(), src_count + target_count);
CheckResult(src_result[0], target_result[0], target, ascending);
......@@ -229,7 +231,7 @@ TEST(DBSearchTest, PARALLEL_CLUSTER_TEST) {
BuildResult(nq, topk, ascending, target_ids, target_distence);
rc.RecordSection("build id/dietance map");
auto status = engine::SearchTask::ClusterResult(target_ids, target_distence, nq, topk, src_result);
auto status = engine::XSearchTask::ClusterResult(target_ids, target_distence, nq, topk, src_result);
ASSERT_TRUE(status.ok());
ASSERT_EQ(src_result.size(), nq);
......@@ -269,14 +271,14 @@ TEST(DBSearchTest, PARALLEL_TOPK_TEST) {
server::TimeRecorder rc("DoCluster");
BuildResult(nq, topk, ascending, target_ids, target_distence);
auto status = engine::SearchTask::ClusterResult(target_ids, target_distence, nq, topk, src_result);
auto status = engine::XSearchTask::ClusterResult(target_ids, target_distence, nq, topk, src_result);
rc.RecordSection("cluster result");
BuildResult(nq, insufficient_topk, ascending, insufficient_ids, insufficient_distence);
status = engine::SearchTask::ClusterResult(target_ids, target_distence, nq, insufficient_topk, insufficient_result);
status = engine::XSearchTask::ClusterResult(target_ids, target_distence, nq, insufficient_topk, insufficient_result);
rc.RecordSection("cluster result");
engine::SearchTask::TopkResult(insufficient_result, topk, ascending, src_result);
engine::XSearchTask::TopkResult(insufficient_result, topk, ascending, src_result);
ASSERT_TRUE(status.ok());
rc.RecordSection("topk");
......
......@@ -87,7 +87,7 @@ class MySqlMetaTest : public BaseTest {
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class MemManagerTest : public BaseTest {
class MemManagerTest : public MetaTest {
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -34,7 +34,6 @@ cache_config:
gpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
gpu_ids: # gpu id
- 0
- 1
engine_config:
use_blas_threshold: 20
......
......@@ -101,7 +101,8 @@ TEST(ConfigTest, SERVER_CONFIG_TEST) {
err = server::ServerConfig::GetInstance().ValidateConfig();
ASSERT_EQ(err, SERVER_SUCCESS);
server::ConfigNode node1 = config.GetConfig("server_config");
const server::ServerConfig& config_const = config;
server::ConfigNode node1 = config_const.GetConfig("server_config");
server::ConfigNode& node2 = config.GetConfig("cache_config");
node1.Combine(node2);
......
......@@ -161,7 +161,17 @@ TEST(UtilTest, LOG_TEST) {
ASSERT_EQ(fname, "log_config.conf");
}
TEST(UtilTest, VALIDATE_TABLENAME_TEST) {
TEST(UtilTest, TIMERECORDER_TEST) {
for(int64_t log_level = 0; log_level <= 6; log_level++) {
if(log_level == 5) {
continue; //skip fatal
}
server::TimeRecorder rc("time", log_level);
rc.RecordSection("end");
}
}
TEST(ValidationUtilTest, VALIDATE_TABLENAME_TEST) {
std::string table_name = "Normal123_";
ErrorCode res = server::ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_SUCCESS);
......@@ -195,7 +205,7 @@ TEST(UtilTest, VALIDATE_TABLENAME_TEST) {
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
}
TEST(UtilTest, VALIDATE_DIMENSIONTEST) {
TEST(ValidationUtilTest, VALIDATE_DIMENSION_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateTableDimension(-1), SERVER_INVALID_VECTOR_DIMENSION);
ASSERT_EQ(server::ValidationUtil::ValidateTableDimension(0), SERVER_INVALID_VECTOR_DIMENSION);
ASSERT_EQ(server::ValidationUtil::ValidateTableDimension(16385), SERVER_INVALID_VECTOR_DIMENSION);
......@@ -203,7 +213,7 @@ TEST(UtilTest, VALIDATE_DIMENSIONTEST) {
ASSERT_EQ(server::ValidationUtil::ValidateTableDimension(1), SERVER_SUCCESS);
}
TEST(UtilTest, VALIDATE_INDEX_TEST) {
TEST(ValidationUtilTest, VALIDATE_INDEX_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateTableIndexType((int)engine::EngineType::INVALID), SERVER_INVALID_INDEX_TYPE);
for(int i = 1; i <= (int)engine::EngineType::MAX_VALUE; i++) {
ASSERT_EQ(server::ValidationUtil::ValidateTableIndexType(i), SERVER_SUCCESS);
......@@ -221,14 +231,14 @@ TEST(UtilTest, VALIDATE_INDEX_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateTableIndexMetricType(2), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, ValidateTopkTest) {
TEST(ValidationUtilTest, VALIDATE_TOPK_TEST) {
engine::meta::TableSchema schema;
ASSERT_EQ(server::ValidationUtil::ValidateSearchTopk(10, schema), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateSearchTopk(65536, schema), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateSearchTopk(0, schema), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, ValidateNprobeTest) {
TEST(ValidationUtilTest, VALIDATE_NPROBE_TEST) {
engine::meta::TableSchema schema;
schema.nlist_ = 100;
ASSERT_EQ(server::ValidationUtil::ValidateSearchNprobe(10, schema), SERVER_SUCCESS);
......@@ -236,7 +246,7 @@ TEST(ValidationUtilTest, ValidateNprobeTest) {
ASSERT_NE(server::ValidationUtil::ValidateSearchNprobe(101, schema), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, ValidateGpuTest) {
TEST(ValidationUtilTest, VALIDATE_GPU_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateGpuIndex(0), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateGpuIndex(100), SERVER_SUCCESS);
......@@ -245,14 +255,35 @@ TEST(ValidationUtilTest, ValidateGpuTest) {
ASSERT_NE(server::ValidationUtil::GetGpuMemory(100, memory), SERVER_SUCCESS);
}
TEST(UtilTest, TIMERECORDER_TEST) {
for(int64_t log_level = 0; log_level <= 6; log_level++) {
if(log_level == 5) {
continue; //skip fatal
}
server::TimeRecorder rc("time", log_level);
rc.RecordSection("end");
}
TEST(ValidationUtilTest, VALIDATE_IPADDRESS_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateIpAddress("127.0.0.1"), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateIpAddress("not ip"), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_NUMBER_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateStringIsNumber("1234"), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateStringIsNumber("not number"), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_BOOL_TEST) {
std::string str = "true";
ASSERT_EQ(server::ValidationUtil::ValidateStringIsBool(str), SERVER_SUCCESS);
str = "not bool";
ASSERT_NE(server::ValidationUtil::ValidateStringIsBool(str), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_DOUBLE_TEST) {
double ret = 0.0;
ASSERT_EQ(server::ValidationUtil::ValidateStringIsDouble("2.5", ret), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateStringIsDouble("not double", ret), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_DBURI_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateDbURI("sqlite://:@:/"), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateDbURI("xxx://:@:/"), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateDbURI("not uri"), SERVER_SUCCESS);
ASSERT_EQ(server::ValidationUtil::ValidateDbURI("mysql://root:123456@127.0.0.1:3303/milvus"), SERVER_SUCCESS);
ASSERT_NE(server::ValidationUtil::ValidateDbURI("mysql://root:123456@127.0.0.1:port/milvus"), SERVER_SUCCESS);
}
TEST(UtilTest, ROLLOUTHANDLER_TEST){
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册