提交 d30e432e 编写于 作者: S starlord

merge 0.3.1


Former-commit-id: a538685c673319581c756289a4c003fa5833d829
上级 223cf1f5
timeout(time: 10, unit: 'MINUTES') {
timeout(time: 25, unit: 'MINUTES') {
try {
dir ("${PROJECT_NAME}_test") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
......
......@@ -61,6 +61,8 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-257 - Update bzip2 download url
- MS-288 - Update compile scripts
- MS-330 - Stability test failed caused by server core dumped
- MS-347 - Build index hangs again
- MS-382 - fix MySQLMetaImpl::CleanUpFilesWithTTL unknown column bug
## Improvement
- MS-156 - Add unittest for merge result functions
......@@ -89,6 +91,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-324 - Show error when there is not enough gpu memory to build index
- MS-328 - Check metric type on server start
- MS-332 - Set grpc and thrift server run concurrently
- MS-352 - Add hybrid index
## New Feature
- MS-180 - Add new mem manager
......@@ -148,8 +151,8 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-130 - Add prometheus_test
- MS-144 - Add nprobe config
- MS-147 - Enable IVF
- MS-130 - Add prometheus_test
## Task
- MS-74 - Change README.md in cpp
- MS-88 - Add support for arm architecture
......
......@@ -86,7 +86,7 @@ if [[ ! -d cmake_build ]]; then
fi
cd cmake_build
git
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
if [[ ${MAKE_CLEAN} == "ON" ]]; then
......
......@@ -157,7 +157,6 @@ if (UNIX)
endif (APPLE)
endif (UNIX)
# ----------------------------------------------------------------------
# thirdparty directory
set(THIRDPARTY_DIR "${MILVUS_SOURCE_DIR}/thirdparty")
......@@ -167,7 +166,7 @@ set(THIRDPARTY_DIR "${MILVUS_SOURCE_DIR}/thirdparty")
if(NOT DEFINED USE_JFROG_CACHE)
set(USE_JFROG_CACHE "OFF")
endif()
if(USE_JFROG_CACHE STREQUAL "ON")
if(USE_JFROG_CACHE STREQUAL "ON")
set(JFROG_ARTFACTORY_CACHE_URL "http://192.168.1.201:80/artifactory/generic-local/milvus/thirdparty/cache/${CMAKE_OS_NAME}/${MILVUS_BUILD_ARCH}/${BUILD_TYPE}")
set(JFROG_USER_NAME "test")
set(JFROG_PASSWORD "Fantast1c")
......@@ -308,9 +307,11 @@ set(EASYLOGGINGPP_MD5 "b78cd319db4be9b639927657b8aa7732")
if(DEFINED ENV{MILVUS_FAISS_URL})
set(FAISS_SOURCE_URL "$ENV{MILVUS_FAISS_URL}")
else()
set(FAISS_SOURCE_URL "https://github.com/facebookresearch/faiss/archive/${FAISS_VERSION}.tar.gz")
set(FAISS_SOURCE_URL "http://192.168.1.105:6060/jinhai/faiss/-/archive/${FAISS_VERSION}/faiss-${FAISS_VERSION}.tar.gz")
# set(FAISS_SOURCE_URL "https://github.com/facebookresearch/faiss/archive/${FAISS_VERSION}.tar.gz")
endif()
set(FAISS_MD5 "0bc12737b23def156f6a1eb782050135")
set(FAISS_MD5 "a589663865a8558205533c8ac414278c")
if(DEFINED ENV{MILVUS_KNOWHERE_URL})
set(KNOWHERE_SOURCE_URL "$ENV{MILVUS_KNOWHERE_URL}")
......@@ -462,6 +463,7 @@ else()
endif()
set(GRPC_MD5 "7ec59ad54c85a12dcbbfede09bf413a9")
# ----------------------------------------------------------------------
# ARROW
......@@ -686,7 +688,7 @@ macro(build_bzip2)
set(BZIP2_STATIC_LIB
"${BZIP2_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}bz2${CMAKE_STATIC_LIBRARY_SUFFIX}")
if(USE_JFROG_CACHE STREQUAL "ON")
if(USE_JFROG_CACHE STREQUAL "ON")
set(BZIP2_CACHE_PACKAGE_NAME "bzip2_${BZIP2_MD5}.tar.gz")
set(BZIP2_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${BZIP2_CACHE_PACKAGE_NAME}")
set(BZIP2_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${BZIP2_CACHE_PACKAGE_NAME}")
......@@ -1184,7 +1186,7 @@ macro(build_faiss)
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
endif()
add_dependencies(faiss faiss_ep)
if(${BUILD_FAISS_WITH_MKL} STREQUAL "OFF")
......@@ -1321,7 +1323,7 @@ if (MILVUS_BUILD_TESTS)
if(NOT GTEST_VENDORED)
endif()
get_target_property(GTEST_INCLUDE_DIR gtest INTERFACE_INCLUDE_DIRECTORIES)
link_directories(SYSTEM "${GTEST_PREFIX}/lib")
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
......@@ -1828,7 +1830,7 @@ endmacro()
if(MILVUS_WITH_SNAPPY)
resolve_dependency(Snappy)
get_target_property(SNAPPY_INCLUDE_DIRS snappy INTERFACE_INCLUDE_DIRECTORIES)
link_directories(SYSTEM ${SNAPPY_PREFIX}/lib/)
include_directories(SYSTEM ${SNAPPY_INCLUDE_DIRS})
......@@ -2131,7 +2133,7 @@ endmacro()
if(MILVUS_WITH_YAMLCPP)
resolve_dependency(yaml-cpp)
get_target_property(YAMLCPP_INCLUDE_DIR yaml-cpp INTERFACE_INCLUDE_DIRECTORIES)
link_directories(SYSTEM ${YAMLCPP_PREFIX}/lib/)
include_directories(SYSTEM ${YAMLCPP_INCLUDE_DIR})
......@@ -2203,7 +2205,7 @@ endmacro()
if(MILVUS_WITH_ZLIB)
resolve_dependency(ZLIB)
get_target_property(ZLIB_INCLUDE_DIR zlib INTERFACE_INCLUDE_DIRECTORIES)
include_directories(SYSTEM ${ZLIB_INCLUDE_DIR})
endif()
......@@ -2301,7 +2303,7 @@ endmacro()
if(MILVUS_WITH_ZSTD)
resolve_dependency(ZSTD)
get_target_property(ZSTD_INCLUDE_DIR zstd INTERFACE_INCLUDE_DIRECTORIES)
link_directories(SYSTEM ${ZSTD_PREFIX}/lib)
include_directories(SYSTEM ${ZSTD_INCLUDE_DIR})
......@@ -2406,7 +2408,7 @@ endmacro()
if(MILVUS_WITH_AWS)
resolve_dependency(AWS)
link_directories(SYSTEM ${AWS_PREFIX}/lib)
get_target_property(AWS_CPP_SDK_S3_INCLUDE_DIR aws-cpp-sdk-s3 INTERFACE_INCLUDE_DIRECTORIES)
......
......@@ -45,3 +45,5 @@ engine_config:
use_blas_threshold: 20
metric_type: L2 # compare vectors by euclidean distance(L2) or inner product(IP), optional: L2 or IP
omp_thread_num: 0 # how many compute threads be used by engine, 0 means use all cpu core to compute
use_hybrid_index: false # use GPU/CPU hybrid index
hybrid_index_gpu: 0 # hybrid index gpu device id
......@@ -73,19 +73,19 @@ YamlConfigMgr::SetChildConfig(const YAML::Node& node,
return false;
}
bool
YamlConfigMgr::SetSequence(const YAML::Node &node,
const std::string &child_name,
ConfigNode &config) {
if(node[child_name].IsDefined ()) {
size_t cnt = node[child_name].size();
for(size_t i = 0; i < cnt; i++){
config.AddSequenceItem(child_name, node[child_name][i].as<std::string>());
}
return true;
}
return false;
}
//bool
//YamlConfigMgr::SetSequence(const YAML::Node &node,
// const std::string &child_name,
// ConfigNode &config) {
// if(node[child_name].IsDefined ()) {
// size_t cnt = node[child_name].size();
// for(size_t i = 0; i < cnt; i++){
// config.AddSequenceItem(child_name, node[child_name][i].as<std::string>());
// }
// return true;
// }
// return false;
//}
void
YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) {
......@@ -98,8 +98,8 @@ YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) {
SetConfigValue(node, key, config);
} else if(node[key].IsMap()){
SetChildConfig(node, key, config);
} else if(node[key].IsSequence()){
SetSequence(node, key, config);
// } else if(node[key].IsSequence()){
// SetSequence(node, key, config);
}
}
}
......
......@@ -33,10 +33,10 @@ class YamlConfigMgr : public IConfigMgr {
const std::string &name,
ConfigNode &config);
bool
SetSequence(const YAML::Node &node,
const std::string &child_name,
ConfigNode &config);
// bool
// SetSequence(const YAML::Node &node,
// const std::string &child_name,
// ConfigNode &config);
void LoadConfigNode(const YAML::Node& node, ConfigNode& config);
......
......@@ -60,6 +60,7 @@ void CollectQueryMetrics(double total_time, size_t nq) {
server::Metrics::GetInstance().QueryVectorResponsePerSecondGaugeSet(double (nq) / total_time);
}
#if 0
void CollectFileMetrics(int file_type, size_t file_size, double total_time) {
switch(file_type) {
case meta::TableFileSchema::RAW:
......@@ -79,6 +80,7 @@ void CollectFileMetrics(int file_type, size_t file_size, double total_time) {
}
}
}
#endif
}
......@@ -205,7 +207,7 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq, uint6
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
ENGINE_LOG_DEBUG << "Query by vectors";
ENGINE_LOG_DEBUG << "Query by vectors " << table_id;
//get all table files from table
meta::DatePartionedTableFilesSchema files;
......@@ -568,7 +570,7 @@ Status DBImpl::BuildIndex(const std::string& table_id) {
int times = 1;
while (has) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
ENGINE_LOG_DEBUG << "Non index files detected in " << table_id << "! Will build index " << times;
meta_ptr_->UpdateTableFilesToIndex(table_id);
/* StartBuildIndexTask(true); */
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10*1000, times*100)));
......
......@@ -90,11 +90,11 @@ std::shared_ptr<meta::Meta> DBMetaImplFactory::Build(const DBMetaOptions& metaOp
}
}
std::shared_ptr<DB> DBFactory::Build() {
auto options = OptionsFactory::Build();
auto db = DBFactory::Build(options);
return std::shared_ptr<DB>(db);
}
//std::shared_ptr<DB> DBFactory::Build() {
// auto options = OptionsFactory::Build();
// auto db = DBFactory::Build(options);
// return std::shared_ptr<DB>(db);
//}
DB* DBFactory::Build(const Options& options) {
return new DBImpl(options);
......
......@@ -33,7 +33,7 @@ struct DBMetaImplFactory {
};
struct DBFactory {
static std::shared_ptr<DB> Build();
//static std::shared_ptr<DB> Build();
static DB *Build(const Options &);
};
......
......@@ -85,16 +85,20 @@ Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id
return Status::OK();
}
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id) {
std::string db_path = options.path;
std::string table_path = db_path + TABLES_FOLDER + table_id;
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
for(auto& path : options.slave_paths) {
table_path = path + TABLES_FOLDER + table_id;
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force) {
std::vector<std::string> paths = options.slave_paths;
paths.push_back(options.path);
for(auto& path : paths) {
std::string table_path = path + TABLES_FOLDER + table_id;
if(force) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
} else if(boost::filesystem::exists(table_path) &&
boost::filesystem::is_empty(table_path)) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
}
}
return Status::OK();
......
......@@ -19,7 +19,7 @@ namespace utils {
long GetMicroSecTimeStamp();
Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id);
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id);
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force = true);
Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
......
......@@ -30,13 +30,13 @@ namespace meta {
}
}
int MySQLConnectionPool::getConnectionsInUse() {
return conns_in_use_;
}
void MySQLConnectionPool::set_max_idle_time(int max_idle) {
max_idle_time_ = max_idle;
}
// int MySQLConnectionPool::getConnectionsInUse() {
// return conns_in_use_;
// }
//
// void MySQLConnectionPool::set_max_idle_time(int max_idle) {
// max_idle_time_ = max_idle;
// }
std::string MySQLConnectionPool::getDB() {
return db_;
......
......@@ -44,9 +44,9 @@ public:
// Other half of in-use conn count limit
void release(const mysqlpp::Connection *pc) override;
int getConnectionsInUse();
void set_max_idle_time(int max_idle);
// int getConnectionsInUse();
//
// void set_max_idle_time(int max_idle);
std::string getDB();
......
......@@ -1652,15 +1652,14 @@ Status MySQLMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
}
Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
auto now = utils::GetMicroSecTimeStamp();
std::set<std::string> table_ids;
//remove to_delete files
try {
MetricCollector metric;
{
ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab);
if (connectionPtr == nullptr) {
......@@ -1700,6 +1699,8 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_;
idsToDelete.emplace_back(std::to_string(table_file.id_));
table_ids.insert(table_file.table_id_);
}
if (!idsToDelete.empty()) {
......@@ -1734,12 +1735,11 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
return Status::DBTransactionError("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", er.what());
}
//remove to_delete tables
try {
MetricCollector metric;
{
ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab);
if (connectionPtr == nullptr) {
......@@ -1765,7 +1765,7 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
std::string table_id;
resRow["table_id"].to_string(table_id);
utils::DeleteTablePath(options_, table_id);
utils::DeleteTablePath(options_, table_id, false);//only delete empty folder
idsToDeleteSS << "id = " << std::to_string(id) << " OR ";
}
......@@ -1794,6 +1794,41 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
return Status::DBTransactionError("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", er.what());
}
try {
MetricCollector metric;
{
ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab);
if (connectionPtr == nullptr) {
return Status::Error("Failed to connect to database server");
}
for(auto& table_id : table_ids) {
Query cleanUpFilesWithTTLQuery = connectionPtr->query();
cleanUpFilesWithTTLQuery << "SELECT file_id " <<
"FROM TableFiles " <<
"WHERE table_id = " << quote << table_id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str();
StoreQueryResult res = cleanUpFilesWithTTLQuery.store();
if (res.empty()) {
utils::DeleteTablePath(options_, table_id);
}
}
}
} catch (const BadQuery &er) {
// Handle any query errors
ENGINE_LOG_ERROR << "QUERY ERROR WHEN CLEANING UP FILES WITH TTL" << ": " << er.what();
return Status::DBTransactionError("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", er.what());
} catch (const Exception &er) {
// Catch-all for any other MySQL++ exceptions
ENGINE_LOG_ERROR << "GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL" << ": " << er.what();
return Status::DBTransactionError("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", er.what());
}
return Status::OK();
}
......
......@@ -311,6 +311,7 @@ Status SqliteMetaImpl::HasNonIndexFiles(const std::string& table_id, bool& has)
has = true;
int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0, to_index_count = 0;
std::vector<std::string> file_ids;
for (auto &file : selected) {
switch (std::get<1>(file)) {
case (int) TableFileSchema::RAW:
......@@ -1069,6 +1070,9 @@ Status SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
auto now = utils::GetMicroSecTimeStamp();
std::set<std::string> table_ids;
//remove to_delete files
try {
MetricCollector metric;
......@@ -1098,6 +1102,7 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
ENGINE_LOG_DEBUG << "Removing file id:" << table_file.file_id_ << " location:" << table_file.location_;
ConnectorPtr->remove<TableFileSchema>(table_file.id_);
table_ids.insert(table_file.table_id_);
}
return true;
});
......@@ -1111,6 +1116,7 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
return HandleException("Encounter exception when clean table files", e);
}
//remove to_delete tables
try {
MetricCollector metric;
......@@ -1123,7 +1129,7 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
auto commited = ConnectorPtr->transaction([&]() mutable {
for (auto &table : tables) {
utils::DeleteTablePath(options_, std::get<1>(table));
utils::DeleteTablePath(options_, std::get<1>(table), false);//only delete empty folder
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
}
......@@ -1139,6 +1145,23 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
return HandleException("Encounter exception when clean table files", e);
}
//remove deleted table folder
//don't remove table folder until all its files has been deleted
try {
MetricCollector metric;
for(auto& table_id : table_ids) {
auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_),
where(c(&TableFileSchema::table_id_) == table_id));
if(selected.size() == 0) {
utils::DeleteTablePath(options_, table_id);
}
}
} catch (std::exception &e) {
return HandleException("Encounter exception when delete table folder", e);
}
return Status::OK();
}
......
......@@ -48,12 +48,14 @@ static const char* CONFIG_METRIC_COLLECTOR = "collector";
static const char* CONFIG_PROMETHEUS = "prometheus_config";
static const char* CONFIG_METRIC_PROMETHEUS_PORT = "port";
static const char* CONFIG_ENGINE = "engine_config";
static const char* CONFIG_NPROBE = "nprobe";
static const char* CONFIG_NLIST = "nlist";
static const char* CONFIG_DCBT = "use_blas_threshold";
static const char* CONFIG_METRICTYPE = "metric_type";
static const char* CONFIG_OMP_THREAD_NUM = "omp_thread_num";
static const std::string CONFIG_ENGINE = "engine_config";
static const std::string CONFIG_NPROBE = "nprobe";
static const std::string CONFIG_NLIST = "nlist";
static const std::string CONFIG_DCBT = "use_blas_threshold";
static const std::string CONFIG_METRICTYPE = "metric_type";
static const std::string CONFIG_OMP_THREAD_NUM = "omp_thread_num";
static const std::string CONFIG_USE_HYBRID_INDEX = "use_hybrid_index";
static const std::string CONFIG_HYBRID_INDEX_GPU = "hybrid_index_gpu";
class ServerConfig {
public:
......
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include "FaissGpuResources.h"
#include "map"
namespace zilliz {
namespace milvus {
namespace engine {
FaissGpuResources::Ptr& FaissGpuResources::GetGpuResources(int device_id) {
static std::map<int, FaissGpuResources::Ptr> gpu_resources_map;
auto search = gpu_resources_map.find(device_id);
if (search != gpu_resources_map.end()) {
return gpu_resources_map[device_id];
} else {
gpu_resources_map[device_id] = std::make_shared<faiss::gpu::StandardGpuResources>();
return gpu_resources_map[device_id];
}
}
void FaissGpuResources::SelectGpu() {
using namespace zilliz::milvus::server;
ServerConfig &config = ServerConfig::GetInstance();
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
gpu_num_ = server_config.GetInt32Value(server::CONFIG_GPU_INDEX, 0);
}
int32_t FaissGpuResources::GetGpu() {
return gpu_num_;
}
}
}
}
\ No newline at end of file
/*******************************************************************************
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include "faiss/gpu/GpuResources.h"
#include "faiss/gpu/StandardGpuResources.h"
#include "server/ServerConfig.h"
namespace zilliz {
namespace milvus {
namespace engine {
class FaissGpuResources {
public:
using Ptr = std::shared_ptr<faiss::gpu::GpuResources>;
static FaissGpuResources::Ptr& GetGpuResources(int device_id);
void SelectGpu();
int32_t GetGpu();
FaissGpuResources() : gpu_num_(0) { SelectGpu(); }
private:
int32_t gpu_num_;
};
}
}
}
\ No newline at end of file
......@@ -7,16 +7,22 @@
#if 0
// TODO: maybe support static search
#ifdef GPU_VERSION
#include "faiss/gpu/GpuAutoTune.h"
#include "faiss/gpu/StandardGpuResources.h"
#include "faiss/gpu/utils/DeviceUtils.h"
#endif
#include "Index.h"
#include "faiss/index_io.h"
#include "faiss/IndexIVF.h"
#include "faiss/IVFlib.h"
#include "faiss/IndexScalarQuantizer.h"
#include "server/ServerConfig.h"
#include "src/wrapper/FaissGpuResources.h"
namespace zilliz {
namespace milvus {
......@@ -74,8 +80,27 @@ void write_index(const Index_ptr &index, const std::string &file_name) {
Index_ptr read_index(const std::string &file_name) {
std::shared_ptr<faiss::Index> raw_index = nullptr;
raw_index.reset(faiss::read_index(file_name.c_str()));
return std::make_shared<Index>(raw_index);
faiss::Index *cpu_index = faiss::read_index(file_name.c_str());
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode engine_config = config.GetConfig(server::CONFIG_ENGINE);
bool use_hybrid_index_ = engine_config.GetBoolValue(server::CONFIG_USE_HYBRID_INDEX, false);
if (dynamic_cast<faiss::IndexIVFScalarQuantizer *>(cpu_index) != nullptr && use_hybrid_index_) {
int device_id = engine_config.GetInt32Value(server::CONFIG_HYBRID_INDEX_GPU, 0);
auto gpu_resources = engine::FaissGpuResources::GetGpuResources(device_id);
faiss::gpu::GpuClonerOptions clone_option;
clone_option.storeInCpu = true;
faiss::Index *gpu_index = faiss::gpu::index_cpu_to_gpu(gpu_resources.get(), device_id, cpu_index, &clone_option);
delete cpu_index;
raw_index.reset(gpu_index);
return std::make_shared<Index>(raw_index);
} else {
raw_index.reset(cpu_index);
return std::make_shared<Index>(raw_index);
}
}
}
......
......@@ -83,7 +83,6 @@ void write_index(const Index_ptr &index, const std::string &file_name);
extern Index_ptr read_index(const std::string &file_name);
#endif
}
}
}
......@@ -17,41 +17,17 @@
#include <faiss/IndexFlat.h>
#include <easylogging++.h>
#include "faiss/IndexScalarQuantizer.h"
#include "server/ServerConfig.h"
#include "IndexBuilder.h"
#include "FaissGpuResources.h"
namespace zilliz {
namespace milvus {
namespace engine {
class GpuResources {
public:
static GpuResources &GetInstance() {
static GpuResources instance;
return instance;
}
void SelectGpu() {
using namespace zilliz::milvus::server;
ServerConfig &config = ServerConfig::GetInstance();
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
gpu_num = server_config.GetInt32Value(server::CONFIG_GPU_INDEX, 0);
}
int32_t GetGpu() {
return gpu_num;
}
private:
GpuResources() : gpu_num(0) { SelectGpu(); }
private:
int32_t gpu_num;
};
using std::vector;
static std::mutex gpu_resource;
......@@ -59,6 +35,12 @@ static std::mutex cpu_resource;
IndexBuilder::IndexBuilder(const Operand_ptr &opd) {
opd_ = opd;
using namespace zilliz::milvus::server;
ServerConfig &config = ServerConfig::GetInstance();
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
use_hybrid_index_ = engine_config.GetBoolValue(CONFIG_USE_HYBRID_INDEX, false);
hybrid_index_device_id_ = engine_config.GetInt32Value(server::CONFIG_HYBRID_INDEX_GPU, 0);
}
// Default: build use gpu
......@@ -76,14 +58,48 @@ Index_ptr IndexBuilder::build_all(const long &nb,
faiss::Index *ori_index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type);
std::lock_guard<std::mutex> lk(gpu_resource);
#ifdef UNITTEST_ONLY
faiss::gpu::StandardGpuResources res;
auto device_index = faiss::gpu::index_cpu_to_gpu(&res, GpuResources::GetInstance().GetGpu(), ori_index);
int device_id = 0;
faiss::gpu::GpuClonerOptions clone_option;
clone_option.storeInCpu = use_hybrid_index_;
auto device_index = faiss::gpu::index_cpu_to_gpu(&res, device_id, ori_index, &clone_option);
#else
engine::FaissGpuResources res;
int device_id = res.GetGpu();
auto gpu_resources = engine::FaissGpuResources::GetGpuResources(device_id);
faiss::gpu::GpuClonerOptions clone_option;
clone_option.storeInCpu = use_hybrid_index_;
auto device_index = faiss::gpu::index_cpu_to_gpu(gpu_resources.get(), device_id, ori_index, &clone_option);
#endif
if (!device_index->is_trained) {
nt == 0 || xt == nullptr ? device_index->train(nb, xb)
: device_index->train(nt, xt);
}
device_index->add_with_ids(nb, xb, ids); // TODO: support with add_with_IDMAP
if (dynamic_cast<faiss::IndexIVFScalarQuantizer*>(ori_index) != nullptr
&& use_hybrid_index_) {
std::shared_ptr<faiss::Index> device_hybrid_index = nullptr;
if (hybrid_index_device_id_ != device_id) {
auto host_hybrid_index = faiss::gpu::index_gpu_to_cpu(device_index);
auto hybrid_gpu_resources = engine::FaissGpuResources::GetGpuResources(hybrid_index_device_id_);
auto another_device_index = faiss::gpu::index_cpu_to_gpu(hybrid_gpu_resources.get(),
hybrid_index_device_id_,
host_hybrid_index,
&clone_option);
device_hybrid_index.reset(another_device_index);
delete device_index;
delete host_hybrid_index;
} else {
device_hybrid_index.reset(device_index);
}
delete ori_index;
return std::make_shared<Index>(device_hybrid_index);
}
host_index.reset(faiss::gpu::index_gpu_to_cpu(device_index));
delete device_index;
......
......@@ -45,6 +45,8 @@ class IndexBuilder {
protected:
Operand_ptr opd_ = nullptr;
bool use_hybrid_index_;
int hybrid_index_device_id_;
};
class BgCpuBuilder : public IndexBuilder {
......
......@@ -2,7 +2,7 @@ ARROW_VERSION=zilliz
BOOST_VERSION=1.70.0
BZIP2_VERSION=1.0.6
EASYLOGGINGPP_VERSION=v9.96.7
FAISS_VERSION=v1.5.3
FAISS_VERSION=branch-0.1.0
MKL_VERSION=2019.4.243
GTEST_VERSION=1.8.1
JSONCONS_VERSION=0.126.0
......
......@@ -93,6 +93,7 @@ TEST_F(DBTest, CONFIG_TEST) {
TEST_F(DBTest, DB_TEST) {
db_->Open(GetOptions(), &db_);
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
......@@ -161,6 +162,11 @@ TEST_F(DBTest, DB_TEST) {
}
search.join();
uint64_t count;
stat = db_->GetTableRowCount(TABLE_NAME, count);
ASSERT_STATS(stat);
ASSERT_TRUE(count > 0);
};
TEST_F(DBTest, SEARCH_TEST) {
......
......@@ -84,6 +84,7 @@ TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) {
vector_ids = source.GetVectorIds();
ASSERT_EQ(vector_ids.size(), 100);
status = impl_->DropAll();
ASSERT_TRUE(status.ok());
}
......@@ -198,6 +199,8 @@ TEST_F(NewMemManagerTest, MEM_TABLE_TEST) {
status = mem_table.Serialize();
ASSERT_TRUE(status.ok());
status = impl_->DropAll();
ASSERT_TRUE(status.ok());
}
......@@ -372,7 +375,6 @@ TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) {
delete db_;
boost::filesystem::remove_all(options.meta.path);
};
TEST_F(DBTest, VECTOR_IDS_TEST)
......@@ -445,4 +447,5 @@ TEST_F(DBTest, VECTOR_IDS_TEST)
for (auto i = 0; i < nb; i++) {
ASSERT_EQ(vector_ids[i], i + nb);
}
}
\ No newline at end of file
}
......@@ -135,4 +135,8 @@ TEST(DBMiscTest, UTILS_TEST) {
status = engine::utils::DeleteTablePath(options, TABLE_NAME);
ASSERT_TRUE(status.ok());
status = engine::utils::DeleteTableFilePath(options, file);
ASSERT_TRUE(status.ok());
}
\ No newline at end of file
......@@ -57,7 +57,7 @@ TEST_F(DISABLED_MySQLTest, TABLE_TEST) {
table.table_id_ = "";
status = impl.CreateTable(table);
ASSERT_TRUE(status.ok());
// ASSERT_TRUE(status.ok());
status = impl.DropAll();
ASSERT_TRUE(status.ok());
......@@ -82,16 +82,22 @@ TEST_F(DISABLED_MySQLTest, TABLE_FILE_TEST) {
table.dimension_ = 256;
auto status = impl.CreateTable(table);
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
status = impl.CreateTableFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, meta::TableFileSchema::NEW);
meta::DatesT dates;
dates.push_back(meta::Meta::GetDate());
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
ASSERT_FALSE(status.ok());
uint64_t cnt = 0;
status = impl.Count(table_id, cnt);
ASSERT_TRUE(status.ok());
ASSERT_EQ(cnt, 0UL);
// ASSERT_TRUE(status.ok());
// ASSERT_EQ(cnt, 0UL);
auto file_id = table_file.file_id_;
......@@ -102,11 +108,6 @@ TEST_F(DISABLED_MySQLTest, TABLE_FILE_TEST) {
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, new_file_type);
meta::DatesT dates;
dates.push_back(meta::Meta::GetDate());
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
ASSERT_FALSE(status.ok());
dates.clear();
for (auto i=2; i < 10; ++i) {
dates.push_back(meta::Meta::GetDateWithDelta(-1*i));
......@@ -132,6 +133,8 @@ TEST_F(DISABLED_MySQLTest, TABLE_FILE_TEST) {
ASSERT_EQ(files.size(), 1UL);
ASSERT_TRUE(files[0].file_type_ == meta::TableFileSchema::TO_DELETE);
// status = impl.NextTableId(table_id);
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
......@@ -194,6 +197,13 @@ TEST_F(DISABLED_MySQLTest, ARCHIVE_TEST_DAYS) {
i++;
}
bool has;
status = impl.HasNonIndexFiles(table_id, has);
ASSERT_TRUE(status.ok());
status = impl.UpdateTableFilesToIndex(table_id);
ASSERT_TRUE(status.ok());
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
......@@ -216,6 +226,10 @@ TEST_F(DISABLED_MySQLTest, ARCHIVE_TEST_DISK) {
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
meta::TableSchema table_schema;
table_schema.table_id_ = "";
status = impl.CreateTable(table_schema);
meta::TableFilesSchema files;
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
......
......@@ -56,6 +56,8 @@ TEST(DBSchedulerTest, TASK_QUEUE_TEST) {
ptr = queue.Back();
ASSERT_EQ(ptr->type(), engine::ScheduleTaskType::kIndexLoad);
load_task->Execute();
}
TEST(DBSchedulerTest, SEARCH_SCHEDULER_TEST) {
......
......@@ -4,6 +4,7 @@
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include "db/scheduler/task/SearchTask.h"
#include "server/ServerConfig.h"
#include "utils/TimeRecorder.h"
#include <gtest/gtest.h>
......@@ -213,6 +214,10 @@ TEST(DBSearchTest, MERGE_TEST) {
}
TEST(DBSearchTest, PARALLEL_CLUSTER_TEST) {
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode& db_config = config.GetConfig(server::CONFIG_DB);
db_config.SetValue(server::CONFIG_DB_PARALLEL_REDUCE, "true");
bool ascending = true;
std::vector<long> target_ids;
std::vector<float> target_distence;
......@@ -245,6 +250,10 @@ TEST(DBSearchTest, PARALLEL_CLUSTER_TEST) {
}
TEST(DBSearchTest, PARALLEL_TOPK_TEST) {
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode& db_config = config.GetConfig(server::CONFIG_DB);
db_config.SetValue(server::CONFIG_DB_PARALLEL_REDUCE, "true");
std::vector<long> target_ids;
std::vector<float> target_distence;
engine::SearchContext::ResultSet src_result;
......
......@@ -91,9 +91,10 @@ zilliz::milvus::engine::DBMetaOptions DISABLED_MySQLTest::getDBMetaOptions() {
zilliz::milvus::engine::DBMetaOptions options;
options.path = "/tmp/milvus_test";
options.backend_uri = DBTestEnvironment::getURI();
if(options.backend_uri.empty()) {
throw std::exception();
// throw std::exception();
options.backend_uri = "mysql://root:Fantast1c@192.168.1.194:3306/";
}
return options;
......@@ -123,6 +124,10 @@ int main(int argc, char **argv) {
if (argc > 1) {
uri = argv[1];
}
// if(uri.empty()) {
// uri = "mysql://root:Fantast1c@192.168.1.194:3306/";
// }
// std::cout << uri << std::endl;
::testing::AddGlobalTestEnvironment(new DBTestEnvironment);
return RUN_ALL_TESTS();
......
#-------------------------------------------------------------------------------
# Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited.
# Proprietary and confidential.
#-------------------------------------------------------------------------------
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper wrapper_src)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
set(util_files
${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp)
# Make sure that your call to link_directories takes place before your call to the relevant add_executable.
include_directories(/usr/local/cuda/include)
link_directories("/usr/local/cuda/lib64")
set(wrapper_test_src
${unittest_srcs}
${wrapper_src}
${config_files}
${util_files}
${require_files}
wrapper_test.cpp
)
add_executable(wrapper_test ${wrapper_test_src})
set(wrapper_libs
stdc++
boost_system_static
boost_filesystem_static
faiss
cudart
cublas
sqlite
snappy
bz2
z
zstd
lz4
)
if(${BUILD_FAISS_WITH_MKL} STREQUAL "ON")
set(wrapper_libs ${wrapper_libs} ${MKL_LIBS} ${MKL_LIBS})
else()
set(wrapper_libs ${wrapper_libs}
lapack
openblas)
endif()
target_link_libraries(wrapper_test ${wrapper_libs} ${unittest_libs})
add_definitions("-DUNITTEST_ONLY")
set(topk_test_src
topk_test.cpp
${CMAKE_SOURCE_DIR}/src/wrapper/gpu/Topk.cu)
install(TARGETS wrapper_test DESTINATION bin)
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include "wrapper/Operand.h"
#include "wrapper/Index.h"
#include "wrapper/IndexBuilder.h"
#include "wrapper/FaissGpuResources.h"
#include "server/ServerConfig.h"
#include <gtest/gtest.h>
#include <random>
#include <src/wrapper/FaissGpuResources.h>
using namespace zilliz::milvus;
using namespace zilliz::milvus::engine;
TEST(operand_test, Wrapper_Test) {
using std::cout;
using std::endl;
auto opd = std::make_shared<Operand>();
opd->index_type = "IVF";
opd->preproc = "OPQ";
opd->postproc = "PQ";
opd->metric_type = "L2";
opd->d = 64;
auto opd_str = operand_to_str(opd);
auto new_opd = str_to_operand(opd_str);
// TODO: fix all place where using opd to build index.
assert(new_opd->get_index_type(10000) == opd->get_index_type(10000));
auto opd_sq8 = std::make_shared<Operand>();
opd_sq8->index_type = "IVFSQ8";
opd_sq8->preproc = "OPQ";
opd_sq8->postproc = "PQ";
opd_sq8->metric_type = "L2";
opd_sq8->d = 64;
auto opd_str_sq8 = operand_to_str(opd_sq8);
auto new_opd_sq8 = str_to_operand(opd_str_sq8);
assert(new_opd_sq8->get_index_type(10000) == opd_sq8->get_index_type(10000));
}
TEST(build_test, Wrapper_Test) {
// dimension of the vectors to index
int d = 3;
// make a set of nt training vectors in the unit cube
size_t nt = 10000;
// a reasonable number of cetroids to index nb vectors
int ncentroids = 16;
std::random_device rd;
std::mt19937 gen(rd());
std::vector<float> xb;
std::vector<long> ids;
//prepare train data
std::uniform_real_distribution<> dis_xt(-1.0, 1.0);
std::vector<float> xt(nt * d);
for (size_t i = 0; i < nt * d; i++) {
xt[i] = dis_xt(gen);
}
//train the index
auto opd = std::make_shared<Operand>();
opd->index_type = "IVF";
opd->d = d;
opd->ncent = ncentroids;
IndexBuilderPtr index_builder_1 = GetIndexBuilder(opd);
auto index_1 = index_builder_1->build_all(0, xb, ids, nt, xt);
ASSERT_TRUE(index_1 != nullptr);
// size of the database we plan to index
size_t nb = 100000;
//prepare raw data
xb.resize(nb);
ids.resize(nb);
for (size_t i = 0; i < nb; i++) {
xb[i] = dis_xt(gen);
ids[i] = i;
}
index_1->add_with_ids(nb, xb.data(), ids.data());
//search in first quadrant
int nq = 1, k = 10;
std::vector<float> xq = {0.5, 0.5, 0.5};
float *result_dists = new float[k];
long *result_ids = new long[k];
index_1->search(nq, xq.data(), k, result_dists, result_ids);
for (int i = 0; i < k; i++) {
if (result_ids[i] < 0) {
ASSERT_TRUE(false);
break;
}
long id = result_ids[i];
std::cout << "No." << id << " [" << xb[id * 3] << ", " << xb[id * 3 + 1] << ", "
<< xb[id * 3 + 2] << "] distance = " << result_dists[i] << std::endl;
//makesure result vector is in first quadrant
ASSERT_TRUE(xb[id * 3] > 0.0);
ASSERT_TRUE(xb[id * 3 + 1] > 0.0);
ASSERT_TRUE(xb[id * 3 + 2] > 0.0);
}
delete[] result_dists;
delete[] result_ids;
}
TEST(gpu_build_test, Wrapper_Test) {
using std::vector;
int d = 256;
int nb = 3 * 1000 * 100;
int nq = 100;
vector<float> xb(d * nb);
vector<float> xq(d * nq);
vector<long> ids(nb);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis_xt(-1.0, 1.0);
for (auto &e : xb) { e = float(dis_xt(gen)); }
for (auto &e : xq) { e = float(dis_xt(gen)); }
for (int i = 0; i < nb; ++i) { ids[i] = i; }
auto opd = std::make_shared<Operand>();
opd->index_type = "IVF";
opd->d = d;
opd->ncent = 256;
IndexBuilderPtr index_builder_1 = GetIndexBuilder(opd);
auto index_1 = index_builder_1->build_all(nb, xb.data(), ids.data());
assert(index_1->ntotal == nb);
assert(index_1->dim == d);
// sanity check: search 5 first vectors of xb
int k = 1;
vector<long> I(5 * k);
vector<float> D(5 * k);
index_1->search(5, xb.data(), k, D.data(), I.data());
for (int i = 0; i < 5; ++i) { assert(i == I[i]); }
}
TEST(gpu_resource_test, Wrapper_Test) {
FaissGpuResources res_mgr;
FaissGpuResources::Ptr& res = res_mgr.GetGpuResources(0);
ASSERT_NE(res, nullptr);
res = res_mgr.GetGpuResources(0);
ASSERT_NE(res, nullptr);
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode& server_config = config.GetConfig(server::CONFIG_SERVER);
server_config.SetValue(server::CONFIG_GPU_INDEX, "0");
res_mgr.SelectGpu();
int32_t gpu_num = res_mgr.GetGpu();
ASSERT_EQ(gpu_num, 0);
}
TEST(index_test, Wrapper_Test) {
std::vector<float> data;
std::vector<long> ids;
long vec_count = 10000;
for(long i = 0; i < vec_count; i++) {
data.push_back(i/3);
data.push_back(i/9);
ids.push_back(i);
}
faiss::Index* faiss_index = faiss::index_factory(2, "IVF128,SQ8");
faiss_index->train(vec_count, data.data());
std::shared_ptr<faiss::Index> raw_index(faiss_index);
engine::Index_ptr index = std::make_shared<engine::Index>(raw_index);
index->add_with_ids(vec_count, data.data(), ids.data());
ASSERT_EQ(index->ntotal, vec_count);
std::string file_name = "/tmp/index_test.t";
write_index(index, file_name);
server::ServerConfig &config = server::ServerConfig::GetInstance();
server::ConfigNode& engine_config = config.GetConfig(server::CONFIG_ENGINE);
engine_config.SetValue(server::CONFIG_USE_HYBRID_INDEX, "true");
Index_ptr index_out = read_index(file_name);
ASSERT_NE(index_out, nullptr);
bool res = index_out->reset();
ASSERT_TRUE(res);
}
......@@ -4,9 +4,12 @@
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <gtest/gtest-death-test.h>
#include "config/IConfigMgr.h"
#include "server/ServerConfig.h"
#include "utils/CommonUtil.h"
#include "utils/ValidationUtil.h"
using namespace zilliz::milvus;
......@@ -15,6 +18,10 @@ namespace {
static const char* CONFIG_FILE_PATH = "./milvus/conf/server_config.yaml";
static const char* LOG_FILE_PATH = "./milvus/conf/log_config.conf";
static constexpr uint64_t KB = 1024;
static constexpr uint64_t MB = KB*1024;
static constexpr uint64_t GB = MB*1024;
}
TEST(ConfigTest, CONFIG_TEST) {
......@@ -87,6 +94,9 @@ TEST(ConfigTest, SERVER_CONFIG_TEST) {
server::ServerError err = config.LoadConfigFile(CONFIG_FILE_PATH);
ASSERT_EQ(err, server::SERVER_SUCCESS);
err = server::ServerConfig::GetInstance().ValidateConfig();
ASSERT_EQ(err, server::SERVER_SUCCESS);
server::ConfigNode node1 = config.GetConfig("server_config");
server::ConfigNode& node2 = config.GetConfig("cache_config");
node1.Combine(node2);
......@@ -100,6 +110,43 @@ TEST(ConfigTest, SERVER_CONFIG_TEST) {
config.PrintAll();
const server::ServerConfig const_config = config;
server::ConfigNode node = const_config.GetConfig("aaa");
unsigned long total_mem = 0, free_mem = 0;
server::CommonUtil::GetSystemMemInfo(total_mem, free_mem);
size_t gpu_mem = 0;
server::ValidationUtil::GetGpuMemory(0, gpu_mem);
server::ConfigNode& server_config = config.GetConfig("server_config");
server::ConfigNode& db_config = config.GetConfig("db_config");
server::ConfigNode& cache_config = config.GetConfig(server::CONFIG_CACHE);
cache_config.SetValue(server::CACHE_FREE_PERCENT, "2.0");
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
size_t cache_cap = 16;
size_t insert_buffer_size = (total_mem - cache_cap*GB + 1*GB)/GB;
db_config.SetValue(server::CONFIG_DB_INSERT_BUFFER_SIZE, std::to_string(insert_buffer_size));
cache_config.SetValue(server::CONFIG_CPU_CACHE_CAPACITY, std::to_string(cache_cap));
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
cache_cap = total_mem/GB + 2;
cache_config.SetValue(server::CONFIG_CPU_CACHE_CAPACITY, std::to_string(cache_cap));
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
size_t index_building_threshold = (gpu_mem + 1*MB)/MB;
db_config.SetValue(server::CONFIG_DB_INDEX_TRIGGER_SIZE,
std::to_string(index_building_threshold));
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
insert_buffer_size = total_mem/GB + 2;
db_config.SetValue(server::CONFIG_DB_INSERT_BUFFER_SIZE, std::to_string(insert_buffer_size));
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
server_config.SetValue(server::CONFIG_GPU_INDEX, "9999");
err = config.ValidateConfig();
ASSERT_NE(err, server::SERVER_SUCCESS);
}
\ No newline at end of file
......@@ -204,3 +204,12 @@ TEST(UtilTest, VALIDATE_INDEXTYPE_TEST) {
ASSERT_EQ(server::ValidationUtil::ValidateTableIndexType((int)engine::EngineType::MAX_VALUE + 1), server::SERVER_INVALID_INDEX_TYPE);
}
TEST(UtilTest, TIMERECORDER_TEST) {
for(int64_t log_level = 0; log_level <= 6; log_level++) {
if(log_level == 5) {
continue; //skip fatal
}
server::TimeRecorder rc("time", log_level);
rc.RecordSection("end");
}
}
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include "utils/ValidationUtil.h"
#include "utils/Error.h"
#include "db/ExecutionEngine.h"
#include <string>
using namespace zilliz::milvus;
using namespace zilliz::milvus::server;
TEST(ValidationUtilTest, TableNameTest) {
std::string table_name = "Normal123_";
ServerError res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_SUCCESS);
table_name = "12sds";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
table_name = "";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
table_name = "_asdasd";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_SUCCESS);
table_name = "!@#!@";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
table_name = "_!@#!@";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
table_name = "中文";
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
table_name = std::string(10000, 'a');
res = ValidationUtil::ValidateTableName(table_name);
ASSERT_EQ(res, SERVER_INVALID_TABLE_NAME);
}
TEST(ValidationUtilTest, TableDimensionTest) {
ASSERT_EQ(ValidationUtil::ValidateTableDimension(-1), SERVER_INVALID_VECTOR_DIMENSION);
ASSERT_EQ(ValidationUtil::ValidateTableDimension(0), SERVER_INVALID_VECTOR_DIMENSION);
ASSERT_EQ(ValidationUtil::ValidateTableDimension(16385), SERVER_INVALID_VECTOR_DIMENSION);
ASSERT_EQ(ValidationUtil::ValidateTableDimension(16384), SERVER_SUCCESS);
ASSERT_EQ(ValidationUtil::ValidateTableDimension(1), SERVER_SUCCESS);
}
TEST(ValidationUtilTest, TableIndexTypeTest) {
ASSERT_EQ(ValidationUtil::ValidateTableIndexType((int)engine::EngineType::INVALID), SERVER_INVALID_INDEX_TYPE);
for(int i = 1; i <= (int)engine::EngineType::MAX_VALUE; i++) {
ASSERT_EQ(ValidationUtil::ValidateTableIndexType(i), SERVER_SUCCESS);
}
ASSERT_EQ(ValidationUtil::ValidateTableIndexType((int)engine::EngineType::MAX_VALUE + 1), SERVER_INVALID_INDEX_TYPE);
}
TEST(ValidationUtilTest, ValidateGpuTest) {
ASSERT_EQ(ValidationUtil::ValidateGpuIndex(0), SERVER_SUCCESS);
ASSERT_NE(ValidationUtil::ValidateGpuIndex(100), SERVER_SUCCESS);
size_t memory = 0;
ASSERT_EQ(ValidationUtil::GetGpuMemory(0, memory), SERVER_SUCCESS);
ASSERT_NE(ValidationUtil::GetGpuMemory(100, memory), SERVER_SUCCESS);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册