提交 1125bcec 编写于 作者: Y Yukikaze-CZR

Merge remote-tracking branch 'upstream/0.6.0' into 0.6.0

......@@ -20,6 +20,7 @@ Please mark all change in change log and use the ticket from JIRA.
- \#440 - Query API in customization still uses old version
- \#440 - Server cannot startup with gpu_resource_config.enable=false in GPU version
- \#458 - Index data is not compatible between 0.5 and 0.6
- \#486 - gpu no usage during index building
## Feature
- \#12 - Pure CPU version for Milvus
......@@ -28,6 +29,7 @@ Please mark all change in change log and use the ticket from JIRA.
- \#226 - Experimental shards middleware for Milvus
- \#227 - Support new index types SPTAG-KDT and SPTAG-BKT
- \#346 - Support build index with multiple gpu
- \#488 - Add log in scheduler/optimizer
## Improvement
- \#255 - Add ivfsq8 test report detailed version
......
......@@ -114,7 +114,8 @@ pipeline {
stage("Deploy to Development") {
environment {
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
FROMAT_SEMVER = "${env.SEMVER}".replaceAll(".", "-")
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
}
agent {
......@@ -248,7 +249,8 @@ pipeline {
stage("Deploy to Development") {
environment {
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
FROMAT_SEMVER = "${env.SEMVER}".replaceAll(".", "-")
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
}
agent {
......
......@@ -25,6 +25,7 @@
namespace milvus {
namespace cache {
#ifdef MILVUS_GPU_VERSION
std::mutex GpuCacheMgr::mutex_;
std::unordered_map<uint64_t, GpuCacheMgrPtr> GpuCacheMgr::instance_;
......@@ -76,6 +77,7 @@ GpuCacheMgr::GetIndex(const std::string& key) {
DataObjPtr obj = GetItem(key);
return obj;
}
#endif
} // namespace cache
} // namespace milvus
......@@ -25,6 +25,7 @@
namespace milvus {
namespace cache {
#ifdef MILVUS_GPU_VERSION
class GpuCacheMgr;
using GpuCacheMgrPtr = std::shared_ptr<GpuCacheMgr>;
......@@ -42,6 +43,7 @@ class GpuCacheMgr : public CacheMgr<DataObjPtr> {
static std::mutex mutex_;
static std::unordered_map<uint64_t, GpuCacheMgrPtr> instance_;
};
#endif
} // namespace cache
} // namespace milvus
......@@ -151,6 +151,7 @@ ExecutionEngineImpl::HybridLoad() const {
return;
}
#ifdef MILVUS_GPU_VERSION
const std::string key = location_ + ".quantizer";
server::Config& config = server::Config::GetInstance();
......@@ -205,6 +206,7 @@ ExecutionEngineImpl::HybridLoad() const {
auto cache_quantizer = std::make_shared<CachedQuantizer>(quantizer);
cache::GpuCacheMgr::GetInstance(best_device_id)->InsertItem(key, cache_quantizer);
}
#endif
}
void
......@@ -342,6 +344,7 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
}
#endif
#ifdef MILVUS_GPU_VERSION
auto index = std::static_pointer_cast<VecIndex>(cache::GpuCacheMgr::GetInstance(device_id)->GetIndex(location_));
bool already_in_cache = (index != nullptr);
if (already_in_cache) {
......@@ -364,16 +367,19 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
if (!already_in_cache) {
GpuCache(device_id);
}
#endif
return Status::OK();
}
Status
ExecutionEngineImpl::CopyToIndexFileToGpu(uint64_t device_id) {
#ifdef MILVUS_GPU_VERSION
gpu_num_ = device_id;
auto to_index_data = std::make_shared<ToIndexData>(PhysicalSize());
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(to_index_data);
milvus::cache::GpuCacheMgr::GetInstance(device_id)->InsertItem(location_, obj);
#endif
return Status::OK();
}
......@@ -584,15 +590,17 @@ ExecutionEngineImpl::Cache() {
Status
ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
#ifdef MILVUS_GPU_VERSION
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(index_);
milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, obj);
#endif
return Status::OK();
}
// TODO(linxj): remove.
Status
ExecutionEngineImpl::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
std::vector<int64_t> gpu_ids;
Status s = config.GetGpuResourceConfigBuildIndexResources(gpu_ids);
......@@ -604,6 +612,9 @@ ExecutionEngineImpl::Init() {
std::string msg = "Invalid gpu_num";
return Status(SERVER_INVALID_ARGUMENT, msg);
#else
return Status::OK();
#endif
}
} // namespace engine
......
......@@ -59,9 +59,9 @@ print_banner() {
#endif
<< " library." << std::endl;
#ifdef MILVUS_CPU_VERSION
std::cout << "You are using Milvus CPU version" << std::endl;
std::cout << "You are using Milvus CPU edition" << std::endl;
#else
std::cout << "You are using Milvus GPU version" << std::endl;
std::cout << "You are using Milvus GPU edition" << std::endl;
#endif
std::cout << std::endl;
}
......
......@@ -106,6 +106,25 @@ class OptimizerInst {
server::Config& config = server::Config::GetInstance();
config.GetGpuResourceConfigEnable(enable_gpu);
if (enable_gpu) {
std::vector<int64_t> build_gpus;
std::vector<int64_t> search_gpus;
int64_t gpu_search_threshold;
config.GetGpuResourceConfigBuildIndexResources(build_gpus);
config.GetGpuResourceConfigSearchResources(search_gpus);
config.GetEngineConfigGpuSearchThreshold(gpu_search_threshold);
std::string build_msg = "Build index gpu:";
for (auto build_id : build_gpus) {
build_msg.append(" gpu" + std::to_string(build_id));
}
SERVER_LOG_DEBUG << build_msg;
std::string search_msg = "Search gpu:";
for (auto search_id : search_gpus) {
search_msg.append(" gpu" + std::to_string(search_id));
}
search_msg.append(". gpu_search_threshold:" + std::to_string(gpu_search_threshold));
SERVER_LOG_DEBUG << search_msg;
pass_list.push_back(std::make_shared<BuildIndexPass>());
pass_list.push_back(std::make_shared<FaissFlatPass>());
pass_list.push_back(std::make_shared<FaissIVFFlatPass>());
......
......@@ -25,12 +25,13 @@ namespace scheduler {
void
BuildIndexPass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
std::vector<int64_t> build_resources;
Status s = config.GetGpuResourceConfigBuildIndexResources(build_resources);
Status s = config.GetGpuResourceConfigBuildIndexResources(build_gpu_ids_);
if (!s.ok()) {
throw;
}
#endif
}
bool
......@@ -38,13 +39,16 @@ BuildIndexPass::Run(const TaskPtr& task) {
if (task->Type() != TaskType::BuildIndexTask)
return false;
if (build_gpu_ids_.empty())
if (build_gpu_ids_.empty()) {
SERVER_LOG_WARNING << "BuildIndexPass cannot get build index gpu!";
return false;
}
ResourcePtr res_ptr;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, build_gpu_ids_[specified_gpu_id_]);
auto label = std::make_shared<SpecResLabel>(std::weak_ptr<Resource>(res_ptr));
task->label() = label;
SERVER_LOG_DEBUG << "Specify gpu" << specified_gpu_id_ << " to build index!";
specified_gpu_id_ = (specified_gpu_id_ + 1) % build_gpu_ids_.size();
return true;
......
......@@ -45,7 +45,7 @@ class BuildIndexPass : public Pass {
private:
uint64_t specified_gpu_id_ = 0;
std::vector<int32_t> build_gpu_ids_;
std::vector<int64_t> build_gpu_ids_;
};
using BuildIndexPassPtr = std::shared_ptr<BuildIndexPass>;
......
......@@ -29,6 +29,7 @@ namespace scheduler {
void
FaissFlatPass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
......@@ -38,6 +39,7 @@ FaissFlatPass::Init() {
if (!s.ok()) {
throw;
}
#endif
}
bool
......@@ -54,9 +56,11 @@ FaissFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << "FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % gpus.size();
SERVER_LOG_DEBUG << "FaissFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!";
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
}
......
......@@ -29,6 +29,7 @@ namespace scheduler {
void
FaissIVFFlatPass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
......@@ -38,6 +39,7 @@ FaissIVFFlatPass::Init() {
if (!s.ok()) {
throw;
}
#endif
}
bool
......@@ -54,9 +56,12 @@ FaissIVFFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << "FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % gpus.size();
SERVER_LOG_DEBUG << "FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id
<< " to search!";
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
}
......
......@@ -29,12 +29,14 @@ namespace scheduler {
void
FaissIVFSQ8HPass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
threshold_ = std::numeric_limits<int64_t>::max();
}
s = config.GetGpuResourceConfigSearchResources(gpus);
#endif
}
bool
......@@ -51,9 +53,12 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % gpus.size();
SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu" << best_device_id
<< " to search!";
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
}
......
......@@ -29,6 +29,7 @@ namespace scheduler {
void
FaissIVFSQ8Pass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
......@@ -38,6 +39,7 @@ FaissIVFSQ8Pass::Init() {
if (!s.ok()) {
throw;
}
#endif
}
bool
......@@ -54,9 +56,12 @@ FaissIVFSQ8Pass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % gpus.size();
SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu" << best_device_id
<< " to search!";
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
}
......
......@@ -33,6 +33,7 @@ FallbackPass::Run(const TaskPtr& task) {
return false;
}
// NEVER be empty
SERVER_LOG_DEBUG << "FallbackPass!";
auto cpu = ResMgrInst::GetInstance()->GetCpuResources()[0];
auto label = std::make_shared<SpecResLabel>(cpu);
task->label() = label;
......
......@@ -85,7 +85,7 @@ XBuildIndexTask::Load(milvus::scheduler::LoadType type, uint8_t device_id) {
size_t file_size = to_index_engine_->PhysicalSize();
std::string info = "Load file id:" + std::to_string(file_->id_) +
std::string info = "Load file id:" + std::to_string(file_->id_) + " " + type_str +
" file type:" + std::to_string(file_->file_type_) + " size:" + std::to_string(file_size) +
" bytes from location: " + file_->location_ + " totally cost";
double span = rc.ElapseFromBegin(info);
......
......@@ -182,6 +182,7 @@ Config::ValidateConfig() {
return s;
}
#ifdef MILVUS_GPU_VERSION
int64_t engine_gpu_search_threshold;
s = GetEngineConfigGpuSearchThreshold(engine_gpu_search_threshold);
if (!s.ok()) {
......@@ -195,7 +196,6 @@ Config::ValidateConfig() {
return s;
}
#ifdef MILVUS_GPU_VERSION
if (gpu_resource_enable) {
int64_t resource_cache_capacity;
s = GetGpuResourceConfigCacheCapacity(resource_cache_capacity);
......@@ -325,13 +325,13 @@ Config::ResetDefaultConfig() {
return s;
}
#ifdef MILVUS_GPU_VERSION
/* gpu resource config */
s = SetEngineConfigGpuSearchThreshold(CONFIG_ENGINE_GPU_SEARCH_THRESHOLD_DEFAULT);
if (!s.ok()) {
return s;
}
/* gpu resource config */
#ifdef MILVUS_GPU_VERSION
s = SetGpuResourceConfigEnable(CONFIG_GPU_RESOURCE_ENABLE_DEFAULT);
if (!s.ok()) {
return s;
......@@ -632,6 +632,7 @@ Config::CheckEngineConfigOmpThreadNum(const std::string& value) {
return Status::OK();
}
#ifdef MILVUS_GPU_VERSION
Status
Config::CheckEngineConfigGpuSearchThreshold(const std::string& value) {
if (!ValidationUtil::ValidateStringIsNumber(value).ok()) {
......@@ -761,6 +762,7 @@ Config::CheckGpuResourceConfigBuildIndexResources(const std::vector<std::string>
return Status::OK();
}
#endif
////////////////////////////////////////////////////////////////////////////////
ConfigNode&
......@@ -981,6 +983,7 @@ Config::GetEngineConfigOmpThreadNum(int64_t& value) {
return Status::OK();
}
#ifdef MILVUS_GPU_VERSION
Status
Config::GetEngineConfigGpuSearchThreshold(int64_t& value) {
std::string str =
......@@ -1097,6 +1100,7 @@ Config::GetGpuResourceConfigBuildIndexResources(std::vector<int64_t>& value) {
}
return Status::OK();
}
#endif
///////////////////////////////////////////////////////////////////////////////
/* server config */
......@@ -1284,6 +1288,8 @@ Config::SetEngineConfigOmpThreadNum(const std::string& value) {
return Status::OK();
}
#ifdef MILVUS_GPU_VERSION
/* gpu resource config */
Status
Config::SetEngineConfigGpuSearchThreshold(const std::string& value) {
Status s = CheckEngineConfigGpuSearchThreshold(value);
......@@ -1294,7 +1300,6 @@ Config::SetEngineConfigGpuSearchThreshold(const std::string& value) {
return Status::OK();
}
/* gpu resource config */
Status
Config::SetGpuResourceConfigEnable(const std::string& value) {
Status s = CheckGpuResourceConfigEnable(value);
......@@ -1348,6 +1353,7 @@ Config::SetGpuResourceConfigBuildIndexResources(const std::string& value) {
SetConfigValueInMem(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_BUILD_INDEX_RESOURCES, value);
return Status::OK();
} // namespace server
#endif
} // namespace server
} // namespace milvus
......@@ -170,6 +170,8 @@ class Config {
CheckEngineConfigUseBlasThreshold(const std::string& value);
Status
CheckEngineConfigOmpThreadNum(const std::string& value);
#ifdef MILVUS_GPU_VERSION
Status
CheckEngineConfigGpuSearchThreshold(const std::string& value);
......@@ -184,6 +186,7 @@ class Config {
CheckGpuResourceConfigSearchResources(const std::vector<std::string>& value);
Status
CheckGpuResourceConfigBuildIndexResources(const std::vector<std::string>& value);
#endif
std::string
GetConfigStr(const std::string& parent_key, const std::string& child_key, const std::string& default_value = "");
......@@ -239,6 +242,8 @@ class Config {
GetEngineConfigUseBlasThreshold(int64_t& value);
Status
GetEngineConfigOmpThreadNum(int64_t& value);
#ifdef MILVUS_GPU_VERSION
Status
GetEngineConfigGpuSearchThreshold(int64_t& value);
......@@ -253,6 +258,7 @@ class Config {
GetGpuResourceConfigSearchResources(std::vector<int64_t>& value);
Status
GetGpuResourceConfigBuildIndexResources(std::vector<int64_t>& value);
#endif
public:
/* server config */
......@@ -300,6 +306,8 @@ class Config {
SetEngineConfigUseBlasThreshold(const std::string& value);
Status
SetEngineConfigOmpThreadNum(const std::string& value);
#ifdef MILVUS_GPU_VERSION
Status
SetEngineConfigGpuSearchThreshold(const std::string& value);
......@@ -314,6 +322,7 @@ class Config {
SetGpuResourceConfigSearchResources(const std::string& value);
Status
SetGpuResourceConfigBuildIndexResources(const std::string& value);
#endif
private:
std::unordered_map<std::string, std::unordered_map<std::string, std::string>> config_map_;
......
......@@ -183,7 +183,11 @@ Server::Start() {
// print version information
SERVER_LOG_INFO << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
#ifdef MILVUS_CPU_VERSION
SERVER_LOG_INFO << "CPU edition";
#else
SERVER_LOG_INFO << "GPU edition";
#endif
server::Metrics::GetInstance().Init();
server::SystemInfo::GetInstance().Init();
......
......@@ -30,9 +30,13 @@ class StringHelpFunctions {
StringHelpFunctions() = default;
public:
// trim blanks from begin and end
// " a b c " => "a b c"
static void
TrimStringBlank(std::string& string);
// trim quotes from begin and end
// "'abc'" => "abc"
static void
TrimStringQuote(std::string& string, const std::string& qoute);
......@@ -46,6 +50,8 @@ class StringHelpFunctions {
static void
SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector<std::string>& result);
// merge strings with delimeter
// "a", "b", "c" => "a,b,c"
static void
MergeStringWithDelimeter(const std::vector<std::string>& strs, const std::string& delimeter, std::string& result);
......
......@@ -218,10 +218,9 @@ ValidationUtil::ValidateGpuIndex(int32_t gpu_index) {
return Status::OK();
}
#ifdef MILVUS_GPU_VERSION
Status
ValidationUtil::GetGpuMemory(int32_t gpu_index, size_t& memory) {
#ifdef MILVUS_GPU_VERSION
cudaDeviceProp deviceProp;
auto cuda_err = cudaGetDeviceProperties(&deviceProp, gpu_index);
if (cuda_err) {
......@@ -232,10 +231,9 @@ ValidationUtil::GetGpuMemory(int32_t gpu_index, size_t& memory) {
}
memory = deviceProp.totalGlobalMem;
#endif
return Status::OK();
}
#endif
Status
ValidationUtil::ValidateIpAddress(const std::string& ip_address) {
......
......@@ -64,8 +64,10 @@ class ValidationUtil {
static Status
ValidateGpuIndex(int32_t gpu_index);
#ifdef MILVUS_GPU_VERSION
static Status
GetGpuMemory(int32_t gpu_index, size_t& memory);
#endif
static Status
ValidateIpAddress(const std::string& ip_address);
......
......@@ -132,8 +132,8 @@ BaseTest::SetUp() {
void
BaseTest::TearDown() {
milvus::cache::CpuCacheMgr::GetInstance()->ClearCache();
milvus::cache::GpuCacheMgr::GetInstance(0)->ClearCache();
#ifdef MILVUS_GPU_VERSION
milvus::cache::GpuCacheMgr::GetInstance(0)->ClearCache();
knowhere::FaissGpuResourceMgr::GetInstance().Free();
#endif
}
......
......@@ -98,24 +98,25 @@ class SchedulerTest : public testing::Test {
protected:
void
SetUp() override {
res_mgr_ = std::make_shared<ResourceMgr>();
ResourcePtr disk = ResourceFactory::Create("disk", "DISK", 0, true, false);
ResourcePtr cpu = ResourceFactory::Create("cpu", "CPU", 0, true, false);
disk_resource_ = res_mgr_->Add(std::move(disk));
cpu_resource_ = res_mgr_->Add(std::move(cpu));
#ifdef MILVUS_GPU_VERSION
constexpr int64_t cache_cap = 1024 * 1024 * 1024;
cache::GpuCacheMgr::GetInstance(0)->SetCapacity(cache_cap);
cache::GpuCacheMgr::GetInstance(1)->SetCapacity(cache_cap);
ResourcePtr disk = ResourceFactory::Create("disk", "DISK", 0, true, false);
ResourcePtr cpu = ResourceFactory::Create("cpu", "CPU", 0, true, false);
ResourcePtr gpu_0 = ResourceFactory::Create("gpu0", "GPU", 0);
ResourcePtr gpu_1 = ResourceFactory::Create("gpu1", "GPU", 1);
res_mgr_ = std::make_shared<ResourceMgr>();
disk_resource_ = res_mgr_->Add(std::move(disk));
cpu_resource_ = res_mgr_->Add(std::move(cpu));
gpu_resource_0_ = res_mgr_->Add(std::move(gpu_0));
gpu_resource_1_ = res_mgr_->Add(std::move(gpu_1));
auto PCIE = Connection("IO", 11000.0);
res_mgr_->Connect("cpu", "gpu0", PCIE);
res_mgr_->Connect("cpu", "gpu1", PCIE);
#endif
scheduler_ = std::make_shared<Scheduler>(res_mgr_);
......@@ -138,17 +139,6 @@ class SchedulerTest : public testing::Test {
std::shared_ptr<Scheduler> scheduler_;
};
void
insert_dummy_index_into_gpu_cache(uint64_t device_id) {
MockVecIndex* mock_index = new MockVecIndex();
mock_index->ntotal_ = 1000;
engine::VecIndexPtr index(mock_index);
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(index);
cache::GpuCacheMgr::GetInstance(device_id)->InsertItem("location", obj);
}
class SchedulerTest2 : public testing::Test {
protected:
void
......@@ -157,16 +147,13 @@ class SchedulerTest2 : public testing::Test {
ResourcePtr cpu0 = ResourceFactory::Create("cpu0", "CPU", 0, true, false);
ResourcePtr cpu1 = ResourceFactory::Create("cpu1", "CPU", 1, true, false);
ResourcePtr cpu2 = ResourceFactory::Create("cpu2", "CPU", 2, true, false);
ResourcePtr gpu0 = ResourceFactory::Create("gpu0", "GPU", 0, true, true);
ResourcePtr gpu1 = ResourceFactory::Create("gpu1", "GPU", 1, true, true);
res_mgr_ = std::make_shared<ResourceMgr>();
disk_ = res_mgr_->Add(std::move(disk));
cpu_0_ = res_mgr_->Add(std::move(cpu0));
cpu_1_ = res_mgr_->Add(std::move(cpu1));
cpu_2_ = res_mgr_->Add(std::move(cpu2));
gpu_0_ = res_mgr_->Add(std::move(gpu0));
gpu_1_ = res_mgr_->Add(std::move(gpu1));
auto IO = Connection("IO", 5.0);
auto PCIE1 = Connection("PCIE", 11.0);
auto PCIE2 = Connection("PCIE", 20.0);
......@@ -174,8 +161,15 @@ class SchedulerTest2 : public testing::Test {
res_mgr_->Connect("cpu0", "cpu1", IO);
res_mgr_->Connect("cpu1", "cpu2", IO);
res_mgr_->Connect("cpu0", "cpu2", IO);
#ifdef MILVUS_GPU_VERSION
ResourcePtr gpu0 = ResourceFactory::Create("gpu0", "GPU", 0, true, true);
ResourcePtr gpu1 = ResourceFactory::Create("gpu1", "GPU", 1, true, true);
gpu_0_ = res_mgr_->Add(std::move(gpu0));
gpu_1_ = res_mgr_->Add(std::move(gpu1));
res_mgr_->Connect("cpu1", "gpu0", PCIE1);
res_mgr_->Connect("cpu2", "gpu1", PCIE2);
#endif
scheduler_ = std::make_shared<Scheduler>(res_mgr_);
......
......@@ -175,6 +175,7 @@ TEST(CacheTest, CPU_CACHE_TEST) {
cpu_mgr->PrintInfo();
}
#ifdef MILVUS_GPU_VERSION
TEST(CacheTest, GPU_CACHE_TEST) {
auto gpu_mgr = milvus::cache::GpuCacheMgr::GetInstance(0);
......@@ -202,6 +203,7 @@ TEST(CacheTest, GPU_CACHE_TEST) {
gpu_mgr->ClearCache();
ASSERT_EQ(gpu_mgr->ItemCount(), 0);
}
#endif
TEST(CacheTest, INVALID_TEST) {
{
......
......@@ -25,6 +25,8 @@
#include "utils/StringHelpFunctions.h"
#include "utils/ValidationUtil.h"
#include <limits>
namespace {
static constexpr uint64_t KB = 1024;
......@@ -63,9 +65,21 @@ TEST_F(ConfigTest, CONFIG_TEST) {
int64_t port = server_config.GetInt64Value("port");
ASSERT_NE(port, 0);
server_config.SetValue("test", "2.5");
double test = server_config.GetDoubleValue("test");
ASSERT_EQ(test, 2.5);
server_config.SetValue("float_test", "2.5");
double dbl = server_config.GetDoubleValue("float_test");
ASSERT_LE(abs(dbl - 2.5), std::numeric_limits<double>::epsilon());
float flt = server_config.GetFloatValue("float_test");
ASSERT_LE(abs(flt - 2.5), std::numeric_limits<float>::epsilon());
server_config.SetValue("bool_test", "true");
bool blt = server_config.GetBoolValue("bool_test");
ASSERT_TRUE(blt);
server_config.SetValue("int_test", "34");
int32_t it32 = server_config.GetInt32Value("int_test");
ASSERT_EQ(it32, 34);
int64_t it64 = server_config.GetInt64Value("int_test");
ASSERT_EQ(it64, 34);
milvus::server::ConfigNode fake;
server_config.AddChild("fake", fake);
......@@ -236,6 +250,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int64_val == engine_omp_thread_num);
#ifdef MILVUS_GPU_VERSION
int64_t engine_gpu_search_threshold = 800;
s = config.SetEngineConfigGpuSearchThreshold(std::to_string(engine_gpu_search_threshold));
ASSERT_TRUE(s.ok());
......@@ -251,7 +266,6 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
ASSERT_TRUE(s.ok());
ASSERT_TRUE(bool_val == resource_enable_gpu);
#ifdef MILVUS_GPU_VERSION
int64_t gpu_cache_capacity = 1;
s = config.SetGpuResourceConfigCacheCapacity(std::to_string(gpu_cache_capacity));
ASSERT_TRUE(s.ok());
......@@ -389,6 +403,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) {
s = config.SetEngineConfigOmpThreadNum("10000");
ASSERT_FALSE(s.ok());
#ifdef MILVUS_GPU_VERSION
s = config.SetEngineConfigGpuSearchThreshold("-1");
ASSERT_FALSE(s.ok());
......@@ -396,7 +411,6 @@ TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) {
s = config.SetGpuResourceConfigEnable("ok");
ASSERT_FALSE(s.ok());
#ifdef MILVUS_GPU_VERSION
s = config.SetGpuResourceConfigCacheCapacity("a");
ASSERT_FALSE(s.ok());
s = config.SetGpuResourceConfigCacheCapacity("128");
......
......@@ -313,6 +313,9 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
std::vector<std::vector<float>> record_array;
BuildVectors(0, VECTOR_COUNT, record_array);
::milvus::grpc::VectorIds vector_ids;
for (int64_t i = 0; i < VECTOR_COUNT; i++) {
vector_ids.add_vector_id_array(i);
}
// Insert vectors
// test invalid table name
handler->Insert(&context, &request, &vector_ids);
......
......@@ -120,7 +120,13 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) {
milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result);
ASSERT_EQ(result.size(), 3UL);
std::string merge_str;
milvus::server::StringHelpFunctions::MergeStringWithDelimeter(result, ",", merge_str);
ASSERT_EQ(merge_str, "a,b,c");
result.clear();
milvus::server::StringHelpFunctions::MergeStringWithDelimeter(result, ",", merge_str);
ASSERT_TRUE(merge_str.empty());
auto status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result);
ASSERT_TRUE(status.ok());
ASSERT_EQ(result.size(), 3UL);
......@@ -211,6 +217,11 @@ TEST(UtilTest, STATUS_TEST) {
str = status.ToString();
ASSERT_FALSE(str.empty());
status = milvus::Status(milvus::DB_INVALID_PATH, "mistake");
ASSERT_EQ(status.code(), milvus::DB_INVALID_PATH);
str = status.ToString();
ASSERT_FALSE(str.empty());
status = milvus::Status(milvus::DB_META_TRANSACTION_FAILED, "mistake");
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
str = status.ToString();
......@@ -261,6 +272,10 @@ TEST(ValidationUtilTest, VALIDATE_TABLENAME_TEST) {
table_name = std::string(10000, 'a');
status = milvus::server::ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME);
table_name = "";
status = milvus::server::ValidationUtil::ValidatePartitionName(table_name);
ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME);
}
TEST(ValidationUtilTest, VALIDATE_DIMENSION_TEST) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册