提交 52e23c47 编写于 作者: Y yu yunfeng

merge 0.3.0


Former-commit-id: 9e30f64b77a970d48e627bd5c518a7c17f498fd5
......@@ -5,8 +5,11 @@ Please mark all change in change log and use the ticket from JIRA.
# MegaSearch 0.3.0 (TBD)
## Bug
- MS-80 - Fix server hang issue
## Improvement
- MS-82 - Update server startup welcome message
- MS-83 - Update vecwise to Milvus
## New Feature
......@@ -15,8 +18,18 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-64 - Different table can have different index type
- MS-52 - Return search score
- MS-66 - Support time range query
- MS-68 - Remove rocksdb from third-party
- MS-70 - cmake: remove redundant libs in src
- MS-71 - cmake: fix faiss dependency
- MS-72 - cmake: change prometheus source to git
- MS-73 - cmake: delete civetweb
- MS-65 - Implement GetTableRowCount interface
- MS-45 - Implement DeleteTable interface
- MS-75 - cmake: change faiss version to 1.5.2; add CUDA gencode
- MS-81 - fix faiss ptx issue; change cuda gencode
## Task
- MS-74 - Change README.md in cpp
# MegaSearch 0.2.0 (2019-05-31)
......@@ -25,6 +38,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-32 - Fix thrift error
- MS-34 - Fix prometheus-cpp thirdparty
- MS-67 - Fix license check bug
- MS-76 - Fix pipeline crash bug
## Improvement
......
......@@ -20,11 +20,12 @@ MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
ENDMACRO (GET_GIT_BRANCH_NAME)
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
if(NOT GIT_BRANCH_NAME STREQUAL "")
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
endif()
set(MEGASEARCH_VERSION "${GIT_BRANCH_NAME}")
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MEGASEARCH_VERSION "${MEGASEARCH_VERSION}")
message(STATUS "Build version = ${MEGASEARCH_VERSION}")
if(CMAKE_BUILD_TYPE STREQUAL "Release")
set(BUILD_TYPE "release")
......@@ -33,11 +34,16 @@ else()
endif()
message(STATUS "Build type = ${BUILD_TYPE}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
project(megasearch VERSION "${MEGASEARCH_VERSION}")
project(vecwise_engine LANGUAGES CUDA CXX)
# Ensure that a default make is set
if("${MAKE}" STREQUAL "")
if(NOT MSVC)
find_program(MAKE make)
endif()
endif()
set(MEGASEARCH_VERSION_MAJOR "${megasearch_VERSION_MAJOR}")
set(MEGASEARCH_VERSION_MINOR "${megasearch_VERSION_MINOR}")
set(MEGASEARCH_VERSION_PATCH "${megasearch_VERSION_PATCH}")
......@@ -45,9 +51,13 @@ set(MEGASEARCH_VERSION_PATCH "${megasearch_VERSION_PATCH}")
if(MEGASEARCH_VERSION_MAJOR STREQUAL ""
OR MEGASEARCH_VERSION_MINOR STREQUAL ""
OR MEGASEARCH_VERSION_PATCH STREQUAL "")
message(FATAL_ERROR "Failed to determine MegaSearch version from '${MEGASEARCH_VERSION}'")
message(WARNING "Failed to determine MegaSearch version from '${MEGASEARCH_VERSION}'")
set(MEGASEARCH_VERSION "unknown")
endif()
message(STATUS "Build version = ${MEGASEARCH_VERSION}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
message(STATUS "MegaSearch version: "
"${MEGASEARCH_VERSION_MAJOR}.${MEGASEARCH_VERSION_MINOR}.${MEGASEARCH_VERSION_PATCH} "
"(full: '${MEGASEARCH_VERSION}')")
......
......@@ -2,20 +2,12 @@
#### Step 1: install necessery tools
centos7 :
yum install gfortran libsqlite3-dev libsnappy-dev libzstd-dev bzip2
yum install gfortran flex bison
ubuntu16.04 :
sudo apt-get install gfortran libsqlite3-dev libsnappy-dev libzstd-dev bzip2 liblz4-dev
sudo apt-get install gfortran flex bison
#### Step 2: build third-parties
Note: If you want to debug into third-parties, you can build debug with CXXFLAGS='-g -O0' with option
: -t Debug
cd [sourcecode path]/cpp/thid_party
./build.sh -t Debug
./build.sh -t Release
#### Step 3: build(output to cmake_build folder)
#### Step 2: build(output to cmake_build folder)
cmake_build/src/vecwise_server is the server
cmake_build/src/libvecwise_engine.a is the static library
......@@ -24,19 +16,30 @@ cmake_build/src/libvecwise_engine.a is the static library
./build.sh -t Debug
./build.sh -t Release
./build.sh -g # Build GPU version
If you encounter the following error when building:
`protocol https not supported or disabled in libcurl`
1. Install libcurl4-openssl-dev
2. Install cmake 3.14:
```
./bootstrap --system-curl
make
sudo make install
```
#### To build unittest:
./build.sh -u
or
./build.sh --unittest
### Launch server
Set config in cpp/conf/server_config.yaml
Then launch server with config:
cd [build output path]
start_server.sh
stop_server.sh
......@@ -44,7 +47,7 @@ Then launch server with config:
### Launch test_client(only for debug)
If you want to test remote api, you can build test_client.
test_client use same config file with server:
cd [build output path]/test_client
test_client -c [sourcecode path]/cpp/conf/server_config.yaml
......
......@@ -70,8 +70,8 @@ define_option(MEGASEARCH_WITH_FAISS "Build with FAISS library" ON)
define_option(MEGASEARCH_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON)
define_option_string(MEGASEARCH_FAISS_GPU_ARCH "Specifying which GPU architectures to build against"
"-gencode=arch=compute_61,code=sm_61")
#define_option_string(MEGASEARCH_FAISS_GPU_ARCH "Specifying which GPU architectures to build against"
# "-gencode=arch=compute_35,code=compute_35 -gencode=arch=compute_52,code=compute_52 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=compute_61")
define_option(MEGASEARCH_WITH_LAPACK "Build with LAPACK library" ON)
......@@ -81,7 +81,7 @@ define_option(MEGASEARCH_WITH_OPENBLAS "Build with OpenBLAS library" ON)
define_option(MEGASEARCH_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
define_option(MEGASEARCH_WITH_ROCKSDB "Build with RocksDB library" ON)
define_option(MEGASEARCH_WITH_ROCKSDB "Build with RocksDB library" OFF)
define_option(MEGASEARCH_WITH_SNAPPY "Build with Snappy compression" ON)
......
......@@ -220,7 +220,7 @@ endif()
if(DEFINED ENV{MEGASEARCH_FAISS_URL})
set(FAISS_SOURCE_URL "$ENV{MEGASEARCH_FAISS_URL}")
else()
set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/${FAISS_VERSION}.tar.gz")
set(FAISS_SOURCE_URL "https://github.com/facebookresearch/faiss/archive/${FAISS_VERSION}.tar.gz")
endif()
if (DEFINED ENV{MEGASEARCH_GTEST_URL})
......@@ -253,7 +253,8 @@ if (DEFINED ENV{MEGASEARCH_PROMETHEUS_URL})
set(PROMETHEUS_SOURCE_URL "$ENV{PROMETHEUS_OPENBLAS_URL}")
else ()
set(PROMETHEUS_SOURCE_URL
"https://github.com/JinHai-CN/prometheus-cpp/archive/${PROMETHEUS_VERSION}.tar.gz")
#"https://github.com/JinHai-CN/prometheus-cpp/archive/${PROMETHEUS_VERSION}.tar.gz"
https://github.com/jupp0r/prometheus-cpp.git)
endif()
if (DEFINED ENV{MEGASEARCH_ROCKSDB_URL})
......@@ -685,7 +686,15 @@ macro(build_faiss)
if(${MEGASEARCH_WITH_FAISS_GPU_VERSION} STREQUAL "ON")
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
"--with-cuda=${CUDA_TOOLKIT_ROOT_DIR}"
"--with-cuda-arch=${MEGASEARCH_FAISS_GPU_ARCH}")
# "with_cuda_arch=\"-gencode=arch=compute_35,code=compute_35 \\
# -gencode=arch=compute_52,code=compute_52 \\
# -gencode=arch=compute_60,code=compute_60 \\
# -gencode=arch=compute_61,code=compute_61\""
"--with-cuda-arch=\"-gencode=arch=compute_35,code=compute_35\""
"--with-cuda-arch=\"-gencode=arch=compute_52,code=compute_52\""
"--with-cuda-arch=\"-gencode=arch=compute_60,code=compute_60\""
"--with-cuda-arch=\"-gencode=arch=compute_61,code=compute_61\""
)
else()
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} --without-cuda)
endif()
......@@ -716,17 +725,23 @@ macro(build_faiss)
${FAISS_STATIC_LIB})
# DEPENDS
# ${faiss_dependencies})
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep)
ExternalProject_Add_StepDependencies(faiss_ep build lapack_ep)
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
file(MAKE_DIRECTORY "${FAISS_INCLUDE_DIR}")
add_library(faiss STATIC IMPORTED)
set_target_properties(
faiss
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}")
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
add_dependencies(faiss faiss_ep)
#add_dependencies(faiss openblas_ep)
#add_dependencies(faiss lapack_ep)
#target_link_libraries(faiss ${OPENBLAS_PREFIX}/lib)
#target_link_libraries(faiss ${LAPACK_PREFIX}/lib)
endmacro()
if(MEGASEARCH_WITH_FAISS)
......@@ -929,11 +944,20 @@ macro(build_prometheus)
${EP_COMMON_CMAKE_ARGS}
-DCMAKE_INSTALL_LIBDIR=lib
-DBUILD_SHARED_LIBS=OFF
"-DCMAKE_INSTALL_PREFIX=${PROMETHEUS_PREFIX}")
"-DCMAKE_INSTALL_PREFIX=${PROMETHEUS_PREFIX}"
-DCMAKE_BUILD_TYPE=Release)
externalproject_add(prometheus_ep
URL
GIT_REPOSITORY
${PROMETHEUS_SOURCE_URL}
GIT_TAG
${PROMETHEUS_VERSION}
GIT_SHALLOW
TRUE
# GIT_CONFIG
# recurse-submodules=true
# URL
# ${PROMETHEUS_SOURCE_URL}
${EP_LOG_OPTIONS}
CMAKE_ARGS
${PROMETHEUS_CMAKE_ARGS}
......@@ -991,7 +1015,7 @@ if(MEGASEARCH_WITH_PROMETHEUS)
link_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/)
include_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/include)
link_directories(${PROMETHEUS_PREFIX}/civetweb_ep-prefix/src/civetweb_ep)
#link_directories(${PROMETHEUS_PREFIX}/civetweb_ep-prefix/src/civetweb_ep)
endif()
# ----------------------------------------------------------------------
......
......@@ -6,7 +6,7 @@ server_config:
gpu_index: 0 #which gpu to be used
db_config:
db_path: /tmp/vecwise
db_path: /tmp/milvus
db_backend_url: http://127.0.0.1
db_flush_interval: 5 #unit: second
idmapper_max_open_file: 128
......
......@@ -6,7 +6,7 @@ server_config:
gpu_index: 0 #which gpu to be used
db_config:
db_path: /tmp/vecwise
db_path: /tmp/milvus
db_backend_url: http://127.0.0.1
db_flush_interval: 5 #unit: second
idmapper_max_open_file: 128
......
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-global.log"
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = true
......@@ -8,12 +8,12 @@
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
* DEBUG:
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-debug.log"
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-warning.log"
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-warning.log"
* TRACE:
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-trace.log"
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = false
......@@ -21,7 +21,7 @@
## Error logs
* ERROR:
ENABLED = false
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-error.log"
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = false
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-fatal.log"
\ No newline at end of file
FILENAME = "/tmp/milvus/logs/vecwise_engine-%datetime{%H:%m}-fatal.log"
\ No newline at end of file
......@@ -10,7 +10,7 @@ function kill_progress()
STATUS=$(kill_progress "vecwise_server" )
if [[ ${STATUS} == "false" ]];then
echo "vecwise_server closed abnormally!"
echo "Milvus server closed abnormally!"
else
echo "vecwise_server closed successfully!"
echo "Milvus server closed successfully!"
fi
......@@ -54,33 +54,12 @@ set(get_sys_info_files
license/GetSysInfo.cpp)
include_directories(/usr/include)
include_directories(/usr/local/cuda/include)
include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include")
include_directories(thrift/gen-cpp)
#target_link_libraries(megasearch boost_system_static)
#target_link_libraries(megasearch boost_filesystem_static)
#target_link_libraries(megasearch boost_serialization_static)
#target_link_libraries(megasearch bzip2)
#target_link_libraries(megasearch easyloggingpp)
#target_link_libraries(megasearch faiss)
#target_link_libraries(megasearch gtest)
#target_link_libraries(megasearch lapack)
#target_link_libraries(megasearch lz4)
#target_link_libraries(megasearch openblas)
#target_link_libraries(megasearch rocksdb)
#target_link_libraries(megasearch snappy)
#target_link_libraries(megasearch sqlite)
#target_link_libraries(megasearch sqlite_orm)
#target_link_libraries(megasearch thrift)
#target_link_libraries(megasearch yaml-cpp)
#target_link_libraries(megasearch zlib)
#target_link_libraries(megasearch zstd)
set(third_party_libs
easyloggingpp
sqlite
# sqlite_orm
thrift
yaml-cpp
faiss
......@@ -89,7 +68,6 @@ set(third_party_libs
prometheus-cpp-push
prometheus-cpp-pull
prometheus-cpp-core
civetweb
boost_system_static
boost_filesystem_static
boost_serialization_static
......@@ -102,47 +80,31 @@ set(third_party_libs
)
if (GPU_VERSION STREQUAL "ON")
link_directories(/usr/local/cuda/lib64)
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
set(engine_libs
pthread
libfaiss.a
libgpufaiss.a
libgomp.a
libopenblas.a
libgfortran.a
libquadmath.a
cudart
cublas
libsqlite3.a
libprometheus-cpp-push.a
libprometheus-cpp-pull.a
libprometheus-cpp-core.a
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
)
else()
set(engine_libs
pthread
libfaiss.a
libgomp.a
libopenblas.a
libgfortran.a
libquadmath.a
libsqlite3.a
libprometheus-cpp-push.a
libprometheus-cpp-pull.a
libprometheus-cpp-core.a
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
)
endif ()
if (ENABLE_LICENSE STREQUAL "ON")
link_directories(/usr/local/cuda/lib64/stubs)
link_directories(/usr/local/cuda/lib64)
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs")
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
set(license_libs
nvidia-ml
libboost_system.a
libboost_filesystem.a
libboost_serialization.a
crypto
cudart
cublas
......@@ -161,27 +123,18 @@ if (ENABLE_LICENSE STREQUAL "ON")
target_link_libraries(vecwise_license ${license_libs} ${third_party_libs})
endif ()
#set(metrics_lib
# libprometheus-cpp-push.a
# libprometheus-cpp-pull.a
# libprometheus-cpp-core.a
# )
set(metrics_lib
prometheus-cpp-push
prometheus-cpp-pull
prometheus-cpp-core
)
#add_library(vecwise_engine STATIC ${metrics_files} )
#target_link_libraries(metrics ${metrics_lib})
target_link_libraries(metrics ${metrics_lib})
set(server_libs
vecwise_engine
libthrift.a
pthread
libyaml-cpp.a
libboost_system.a
libboost_filesystem.a
libsnappy.a
libbz2.a
libz.a
libzstd.a
liblz4.a
dl
metrics
)
......
......@@ -10,7 +10,7 @@
#include <set>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
Cache::Cache(int64_t capacity, uint64_t cache_max_count)
......@@ -218,6 +218,6 @@ void Cache::print() {
}
} // cache
} // vecwise
} // milvus
} // zilliz
......@@ -14,7 +14,7 @@
#include "DataObj.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
const std::string SWAP_DIR = ".CACHE";
......@@ -65,6 +65,6 @@ private:
using CachePtr = std::shared_ptr<Cache>;
} // cache
} // vecwise
} // milvus
} // zilliz
......@@ -8,7 +8,7 @@
#include "metrics/Metrics.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
CacheMgr::CacheMgr() {
......
......@@ -9,7 +9,7 @@
#include "Cache.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
class CacheMgr {
......
......@@ -8,7 +8,7 @@
#include "server/ServerConfig.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
CpuCacheMgr::CpuCacheMgr() {
......
......@@ -8,7 +8,7 @@
#include "CacheMgr.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
class CpuCacheMgr : public CacheMgr {
......
......@@ -11,7 +11,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
class DataObj {
......
......@@ -8,7 +8,7 @@
#include "server/ServerConfig.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
GpuCacheMgr::GpuCacheMgr() {
......
......@@ -7,7 +7,7 @@
#include "CacheMgr.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
class GpuCacheMgr : public CacheMgr {
......
......@@ -12,7 +12,7 @@
#include <stdexcept>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace cache {
template<typename key_t, typename value_t>
......@@ -97,6 +97,6 @@ private:
};
} // cache
} // vecwise
} // milvus
} // zilliz
......@@ -12,7 +12,7 @@
#include <algorithm>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
void ConfigNode::Combine(const ConfigNode& target) {
......
......@@ -10,7 +10,7 @@
#include <map>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class ConfigNode;
......
......@@ -7,7 +7,7 @@
#include "YamlConfigMgr.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
IConfigMgr * IConfigMgr::GetInstance() {
......
......@@ -9,7 +9,7 @@
#include "ConfigNode.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
// this class can parse nested config file and return config item
......
......@@ -9,7 +9,7 @@
#include <sys/stat.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
ServerError YamlConfigMgr::LoadConfigFile(const std::string &filename) {
......
......@@ -12,7 +12,7 @@
#include <yaml-cpp/yaml.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class YamlConfigMgr : public IConfigMgr {
......
......@@ -10,7 +10,7 @@
#include "Factories.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
DB::~DB() {}
......@@ -21,5 +21,5 @@ void DB::Open(const Options& options, DB** dbptr) {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -13,7 +13,7 @@
#include <string>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Env;
......@@ -23,19 +23,22 @@ public:
static void Open(const Options& options, DB** dbptr);
virtual Status CreateTable(meta::TableSchema& table_schema_) = 0;
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
virtual Status DescribeTable(meta::TableSchema& table_schema_) = 0;
virtual Status HasTable(const std::string& table_id_, bool& has_or_not_) = 0;
virtual Status HasTable(const std::string& table_id, bool& has_or_not_) = 0;
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
virtual Status InsertVectors(const std::string& table_id_,
size_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, QueryResults& results) = 0;
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0;
virtual Status Size(long& result) = 0;
virtual Status Size(uint64_t& result) = 0;
virtual Status DropAll() = 0;
......@@ -47,5 +50,5 @@ public:
}; // DB
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -6,6 +6,7 @@
#include "DBImpl.h"
#include "DBMetaImpl.h"
#include "Env.h"
#include "Log.h"
#include "EngineFactory.h"
#include "metrics/Metrics.h"
#include "scheduler/SearchScheduler.h"
......@@ -15,11 +16,11 @@
#include <thread>
#include <iostream>
#include <cstring>
#include <easylogging++.h>
#include <cache/CpuCacheMgr.h>
#include <boost/filesystem.hpp>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace {
......@@ -88,6 +89,34 @@ Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
return pMeta_->CreateTable(table_schema);
}
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
meta::DatePartionedTableFilesSchema files;
auto status = pMeta_->FilesToDelete(table_id, dates, files);
if (!status.ok()) { return status; }
for (auto &day_files : files) {
for (auto &file : day_files.second) {
boost::filesystem::remove(file.location_);
}
}
//dates empty means delete all files of the table
if(dates.empty()) {
meta::TableSchema table_schema;
table_schema.table_id_ = table_id;
status = DescribeTable(table_schema);
pMeta_->DeleteTable(table_id);
boost::system::error_code ec;
boost::filesystem::remove_all(table_schema.location_, ec);
if(ec.failed()) {
ENGINE_LOG_WARNING << "Failed to remove table folder";
}
}
return Status::OK();
}
Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
return pMeta_->DescribeTable(table_schema);
}
......@@ -96,8 +125,16 @@ Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
return pMeta_->HasTable(table_id, has_or_not);
}
Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
return pMeta_->AllTables(table_schema_array);
}
Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
return pMeta_->Count(table_id, row_count);
}
Status DBImpl::InsertVectors(const std::string& table_id_,
size_t n, const float* vectors, IDNumbers& vector_ids_) {
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
auto start_time = METRICS_NOW_TIME;
Status status = pMemMgr_->InsertVectors(table_id_, n, vectors, vector_ids_);
......@@ -111,7 +148,7 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
}
Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
const float *vectors, QueryResults &results) {
auto start_time = METRICS_NOW_TIME;
meta::DatesT dates = {meta::Meta::GetDate()};
......@@ -124,7 +161,7 @@ Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
return result;
}
Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
#if 0
return QuerySync(table_id, k, nq, vectors, dates, results);
......@@ -133,13 +170,13 @@ Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
#endif
}
Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
Status DBImpl::QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
meta::DatePartionedTableFilesSchema files;
auto status = pMeta_->FilesToSearch(table_id, dates, files);
if (!status.ok()) { return status; }
LOG(DEBUG) << "Search DateT Size=" << files.size();
ENGINE_LOG_DEBUG << "Search DateT Size = " << files.size();
meta::TableFilesSchema index_files;
meta::TableFilesSchema raw_files;
......@@ -156,7 +193,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
} else if (!raw_files.empty()) {
dim = raw_files[0].dimension_;
} else {
LOG(DEBUG) << "no files to search";
ENGINE_LOG_DEBUG << "no files to search";
return Status::OK();
}
......@@ -192,7 +229,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
auto file_size = index->PhysicalSize();
search_set_size += file_size;
LOG(DEBUG) << "Search file_type " << file.file_type_ << " Of Size: "
ENGINE_LOG_DEBUG << "Search file_type " << file.file_type_ << " Of Size: "
<< file_size/(1024*1024) << " M";
int inner_k = index->Count() < k ? index->Count() : k;
......@@ -254,7 +291,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
search_in_index(raw_files);
search_in_index(index_files);
LOG(DEBUG) << "Search Overall Set Size=" << search_set_size << " M";
ENGINE_LOG_DEBUG << "Search Overall Set Size = " << search_set_size << " M";
cluster_topk();
free(output_distence);
......@@ -267,7 +304,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
return Status::OK();
}
Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
Status DBImpl::QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
//step 1: get files to search
......@@ -275,7 +312,7 @@ Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
auto status = pMeta_->FilesToSearch(table_id, dates, files);
if (!status.ok()) { return status; }
LOG(DEBUG) << "Search DateT Size=" << files.size();
ENGINE_LOG_DEBUG << "Search DateT Size=" << files.size();
SearchContextPtr context = std::make_shared<SearchContext>(k, nq, vectors);
......@@ -312,11 +349,12 @@ void DBImpl::BackgroundTimerTask(int interval) {
if (shutting_down_.load(std::memory_order_acquire)) break;
std::this_thread::sleep_for(std::chrono::seconds(interval));
server::Metrics::GetInstance().KeepingAliveCounterIncrement(interval);
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
server::Metrics::GetInstance().CacheUsageGaugeSet(cache_usage*100/cache_total);
long size;
uint64_t size;
Size(size);
server::Metrics::GetInstance().DataFileSizeGaugeSet(size);
server::Metrics::GetInstance().CPUUsagePercentSet();
......@@ -518,7 +556,7 @@ Status DBImpl::DropAll() {
return pMeta_->DropAll();
}
Status DBImpl::Size(long& result) {
Status DBImpl::Size(uint64_t& result) {
return pMeta_->Size(result);
}
......@@ -543,5 +581,5 @@ DBImpl::~DBImpl() {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -16,7 +16,7 @@
#include <thread>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Env;
......@@ -33,29 +33,32 @@ public:
DBImpl(const Options& options);
virtual Status CreateTable(meta::TableSchema& table_schema) override;
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
virtual Status DescribeTable(meta::TableSchema& table_schema) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
virtual Status InsertVectors(const std::string& table_id,
size_t n, const float* vectors, IDNumbers& vector_ids) override;
uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, QueryResults& results) override;
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
virtual Status DropAll() override;
virtual Status Size(long& result) override;
virtual Status Size(uint64_t& result) override;
virtual ~DBImpl();
private:
Status QuerySync(const std::string& table_id, size_t k, size_t nq,
Status QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results);
Status QueryAsync(const std::string& table_id, size_t k, size_t nq,
Status QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results);
......@@ -97,5 +100,5 @@ private:
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -6,6 +6,7 @@
#include "DBMetaImpl.h"
#include "IDGenerator.h"
#include "Utils.h"
#include "Log.h"
#include "MetaConsts.h"
#include "Factories.h"
#include "metrics/Metrics.h"
......@@ -17,16 +18,24 @@
#include <chrono>
#include <fstream>
#include <sqlite_orm.h>
#include <easylogging++.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
using namespace sqlite_orm;
namespace {
void HandleException(std::exception &e) {
ENGINE_LOG_DEBUG << "Engine meta exception: " << e.what();
throw e;
}
}
inline auto StoragePrototype(const std::string &path) {
return make_storage(path,
make_table("Table",
......@@ -100,7 +109,7 @@ Status DBMetaImpl::Initialize() {
if (!boost::filesystem::is_directory(options_.path)) {
auto ret = boost::filesystem::create_directory(options_.path);
if (!ret) {
LOG(ERROR) << "Create directory " << options_.path << " Error";
ENGINE_LOG_ERROR << "Create directory " << options_.path << " Error";
}
assert(ret);
}
......@@ -148,8 +157,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
in(&TableFileSchema::date_, dates)
));
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
......@@ -175,12 +183,12 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
auto group_path = GetTablePath(table_schema.table_id_);
if (!boost::filesystem::is_directory(group_path)) {
auto ret = boost::filesystem::create_directories(group_path);
auto table_path = GetTablePath(table_schema.table_id_);
table_schema.location_ = table_path;
if (!boost::filesystem::is_directory(table_path)) {
auto ret = boost::filesystem::create_directories(table_path);
if (!ret) {
LOG(ERROR) << "Create directory " << group_path << " Error";
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
}
assert(ret);
}
......@@ -188,6 +196,21 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
return Status::OK();
}
Status DBMetaImpl::DeleteTable(const std::string& table_id) {
try {
//drop the table from meta
auto tables = ConnectorPtr->select(columns(&TableSchema::id_),
where(c(&TableSchema::table_id_) == table_id));
for (auto &table : tables) {
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
}
} catch (std::exception &e) {
HandleException(e);
}
return Status::OK();
}
Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
try {
server::Metrics::GetInstance().MetaAccessTotalIncrement();
......@@ -212,9 +235,12 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
} else {
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
}
auto table_path = GetTablePath(table_schema.table_id_);
table_schema.location_ = table_path;
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -237,9 +263,39 @@ Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
has_or_not = false;
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
Status DBMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
try {
server::Metrics::GetInstance().MetaAccessTotalIncrement();
auto start_time = METRICS_NOW_TIME;
auto selected = ConnectorPtr->select(columns(&TableSchema::id_,
&TableSchema::table_id_,
&TableSchema::files_cnt_,
&TableSchema::dimension_,
&TableSchema::engine_type_,
&TableSchema::store_raw_data_));
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
for (auto &table : selected) {
TableSchema schema;
schema.id_ = std::get<0>(table);
schema.table_id_ = std::get<1>(table);
schema.files_cnt_ = std::get<2>(table);
schema.dimension_ = std::get<3>(table);
schema.engine_type_ = std::get<4>(table);
schema.store_raw_data_ = std::get<5>(table);
table_schema_array.emplace_back(schema);
}
} catch (std::exception &e) {
HandleException(e);
}
return Status::OK();
}
......@@ -282,7 +338,7 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
if (!boost::filesystem::is_directory(partition_path)) {
auto ret = boost::filesystem::create_directory(partition_path);
if (!ret) {
LOG(ERROR) << "Create directory " << partition_path << " Error";
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
}
assert(ret);
}
......@@ -336,8 +392,7 @@ Status DBMetaImpl::FilesToIndex(TableFilesSchema &files) {
files.push_back(table_file);
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -438,8 +493,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -488,8 +542,79 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
files[table_file.date_].push_back(table_file);
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
Status DBMetaImpl::FilesToDelete(const std::string& table_id,
const DatesT& partition,
DatePartionedTableFilesSchema& files) {
auto now = utils::GetMicroSecTimeStamp();
try {
if(partition.empty()) {
//step 1: get table files by dates
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
&TableFileSchema::table_id_,
&TableFileSchema::file_id_,
&TableFileSchema::size_,
&TableFileSchema::date_),
where(c(&TableFileSchema::file_type_) !=
(int) TableFileSchema::TO_DELETE
and c(&TableFileSchema::table_id_) == table_id));
//step 2: erase table files from meta
for (auto &file : selected) {
TableFileSchema table_file;
table_file.id_ = std::get<0>(file);
table_file.table_id_ = std::get<1>(file);
table_file.file_id_ = std::get<2>(file);
table_file.size_ = std::get<3>(file);
table_file.date_ = std::get<4>(file);
GetTableFilePath(table_file);
auto dateItr = files.find(table_file.date_);
if (dateItr == files.end()) {
files[table_file.date_] = TableFilesSchema();
}
files[table_file.date_].push_back(table_file);
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
}
} else {
//step 1: get all table files
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
&TableFileSchema::table_id_,
&TableFileSchema::file_id_,
&TableFileSchema::size_,
&TableFileSchema::date_),
where(c(&TableFileSchema::file_type_) !=
(int) TableFileSchema::TO_DELETE
and in(&TableFileSchema::date_, partition)
and c(&TableFileSchema::table_id_) == table_id));
//step 2: erase table files from meta
for (auto &file : selected) {
TableFileSchema table_file;
table_file.id_ = std::get<0>(file);
table_file.table_id_ = std::get<1>(file);
table_file.file_id_ = std::get<2>(file);
table_file.size_ = std::get<3>(file);
table_file.date_ = std::get<4>(file);
GetTableFilePath(table_file);
auto dateItr = files.find(table_file.date_);
if (dateItr == files.end()) {
files[table_file.date_] = TableFilesSchema();
}
files[table_file.date_].push_back(table_file);
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
}
}
} catch (std::exception &e) {
HandleException(e);
}
return Status::OK();
......@@ -520,8 +645,7 @@ Status DBMetaImpl::GetTableFile(TableFileSchema &file_schema) {
" File:" + file_schema.file_id_ + " not found");
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -550,12 +674,11 @@ Status DBMetaImpl::Archive() {
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
));
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
}
if (criteria == "disk") {
long sum = 0;
uint64_t sum = 0;
Size(sum);
auto to_delete = (sum - limit * G);
......@@ -566,7 +689,7 @@ Status DBMetaImpl::Archive() {
return Status::OK();
}
Status DBMetaImpl::Size(long &result) {
Status DBMetaImpl::Size(uint64_t &result) {
result = 0;
try {
auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::size_)),
......@@ -578,11 +701,10 @@ Status DBMetaImpl::Size(long &result) {
if (!std::get<0>(sub_query)) {
continue;
}
result += (long) (*std::get<0>(sub_query));
result += (uint64_t) (*std::get<0>(sub_query));
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -609,7 +731,8 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
table_file.id_ = std::get<0>(file);
table_file.size_ = std::get<1>(file);
ids.push_back(table_file.id_);
LOG(DEBUG) << "Discard table_file.id=" << table_file.file_id_ << " table_file.size=" << table_file.size_;
ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_
<< " table_file.size=" << table_file.size_;
to_discard_size -= table_file.size_;
}
......@@ -626,11 +749,9 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
));
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return DiscardFiles(to_discard_size);
}
......@@ -644,9 +765,8 @@ Status DBMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
LOG(DEBUG) << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
throw e;
ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
HandleException(e);
}
return Status::OK();
}
......@@ -669,8 +789,7 @@ Status DBMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
return Status::DBTransactionError("Update files Error");
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
......@@ -708,8 +827,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
/* LOG(DEBUG) << "Removing deleted id=" << table_file.id << " location=" << table_file.location << std::endl; */
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
......@@ -747,14 +865,13 @@ Status DBMetaImpl::CleanUp() {
/* LOG(DEBUG) << "Removing id=" << table_file.id << " location=" << table_file.location << std::endl; */
}
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
Status DBMetaImpl::Count(const std::string &table_id, long &result) {
Status DBMetaImpl::Count(const std::string &table_id, uint64_t &result) {
try {
......@@ -785,10 +902,10 @@ Status DBMetaImpl::Count(const std::string &table_id, long &result) {
}
result /= table_schema.dimension_;
result /= sizeof(float);
} catch (std::exception &e) {
LOG(DEBUG) << e.what();
throw e;
HandleException(e);
}
return Status::OK();
}
......@@ -806,5 +923,5 @@ DBMetaImpl::~DBMetaImpl() {
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -9,7 +9,7 @@
#include "Options.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -20,8 +20,10 @@ public:
DBMetaImpl(const DBMetaOptions& options_);
virtual Status CreateTable(TableSchema& table_schema) override;
virtual Status DeleteTable(const std::string& table_id) override;
virtual Status DescribeTable(TableSchema& group_info_) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
virtual Status DropPartitionsByDates(const std::string& table_id,
......@@ -40,11 +42,15 @@ public:
virtual Status FilesToMerge(const std::string& table_id,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToDelete(const std::string& table_id,
const DatesT& partition,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToIndex(TableFilesSchema&) override;
virtual Status Archive() override;
virtual Status Size(long& result) override;
virtual Status Size(uint64_t& result) override;
virtual Status CleanUp() override;
......@@ -52,7 +58,7 @@ public:
virtual Status DropAll() override;
virtual Status Count(const std::string& table_id, long& result) override;
virtual Status Count(const std::string& table_id, uint64_t& result) override;
virtual ~DBMetaImpl();
......@@ -70,5 +76,5 @@ private:
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include "Log.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
ExecutionEnginePtr
......
......@@ -9,7 +9,7 @@
#include "ExecutionEngine.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class EngineFactory {
......
......@@ -9,7 +9,7 @@
#include "Env.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
Env::Env()
......@@ -83,5 +83,5 @@ Env* Env::Default() {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -12,7 +12,7 @@
#include <atomic>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Env {
......@@ -52,5 +52,5 @@ protected:
}; // Env
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -9,7 +9,7 @@
#include <string>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Exception : public std::exception {
......@@ -50,5 +50,5 @@ public:
};
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include <easylogging++.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
......@@ -23,5 +23,5 @@ Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std:
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -11,7 +11,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
enum class EngineType {
......@@ -57,5 +57,5 @@ using ExecutionEnginePtr = std::shared_ptr<ExecutionEngine>;
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -15,7 +15,7 @@
#include <easylogging++.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
DBMetaOptions DBMetaOptionsFactory::Build(const std::string& path) {
......@@ -54,5 +54,5 @@ DB* DBFactory::Build(const Options& options) {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -14,7 +14,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
struct DBMetaOptionsFactory {
......@@ -35,5 +35,5 @@ struct DBFactory {
};
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -17,7 +17,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
......@@ -68,7 +68,7 @@ Status FaissExecutionEngine::Serialize() {
}
Status FaissExecutionEngine::Load() {
auto index = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
bool to_cache = false;
auto start_time = METRICS_NOW_TIME;
if (!index) {
......@@ -98,7 +98,7 @@ Status FaissExecutionEngine::Merge(const std::string& location) {
if (location == location_) {
return Status::Error("Cannot Merge Self");
}
auto to_merge = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(location);
auto to_merge = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location);
if (!to_merge) {
to_merge = read_index(location);
}
......@@ -140,7 +140,7 @@ Status FaissExecutionEngine::Search(long n,
}
Status FaissExecutionEngine::Cache() {
zilliz::vecwise::cache::CpuCacheMgr::GetInstance(
zilliz::milvus::cache::CpuCacheMgr::GetInstance(
)->InsertItem(location_, std::make_shared<Index>(pIndex_));
return Status::OK();
......@@ -148,5 +148,5 @@ Status FaissExecutionEngine::Cache() {
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -15,7 +15,7 @@ namespace faiss {
}
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
......@@ -68,5 +68,5 @@ protected:
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -10,7 +10,7 @@
#include <iostream>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
IDGenerator::~IDGenerator() {}
......@@ -49,5 +49,5 @@ void SimpleIDGenerator::GetNextIDNumbers(size_t n, IDNumbers& ids) {
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -11,7 +11,7 @@
#include <vector>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class IDGenerator {
......@@ -37,5 +37,5 @@ private:
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include <easylogging++.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
#define ENGINE_DOMAIN_NAME "[ENGINE] "
......
......@@ -15,7 +15,7 @@
#include <easylogging++.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
......@@ -144,5 +144,5 @@ Status MemManager::Serialize(std::vector<std::string>& table_ids) {
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -17,7 +17,7 @@
#include <mutex>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -92,5 +92,5 @@ private:
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -9,7 +9,7 @@
#include <stdio.h>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -44,5 +44,5 @@ DateT Meta::GetDate() {
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -14,7 +14,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -24,8 +24,10 @@ public:
using Ptr = std::shared_ptr<Meta>;
virtual Status CreateTable(TableSchema& table_schema) = 0;
virtual Status DeleteTable(const std::string& table_id) = 0;
virtual Status DescribeTable(TableSchema& table_schema) = 0;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) = 0;
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) = 0;
virtual Status CreateTableFile(TableFileSchema& file_schema) = 0;
virtual Status DropPartitionsByDates(const std::string& table_id,
......@@ -43,7 +45,11 @@ public:
virtual Status FilesToMerge(const std::string& table_id,
DatePartionedTableFilesSchema& files) = 0;
virtual Status Size(long& result) = 0;
virtual Status FilesToDelete(const std::string& table_id,
const DatesT& partition,
DatePartionedTableFilesSchema& files) = 0;
virtual Status Size(uint64_t& result) = 0;
virtual Status Archive() = 0;
......@@ -54,7 +60,7 @@ public:
virtual Status DropAll() = 0;
virtual Status Count(const std::string& table_id, long& result) = 0;
virtual Status Count(const std::string& table_id, uint64_t& result) = 0;
static DateT GetDate(const std::time_t& t, int day_delta = 0);
static DateT GetDate();
......@@ -64,5 +70,5 @@ public:
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -6,7 +6,7 @@
#pragma once
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -28,5 +28,5 @@ const size_t W_SEC = 7*D_SEC;
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -12,7 +12,7 @@
#include <string>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace meta {
......@@ -58,5 +58,5 @@ typedef std::map<DateT, TableFilesSchema> DatePartionedTableFilesSchema;
} // namespace meta
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -14,7 +14,7 @@
#include "Exception.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
Options::Options()
......@@ -71,5 +71,5 @@ void ArchiveConf::ParseType(const std::string& type) {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -10,7 +10,7 @@
#include <map>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Env;
......@@ -49,5 +49,5 @@ struct Options {
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -9,7 +9,7 @@
#include "Status.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
const char* Status::CopyState(const char* state) {
......@@ -63,5 +63,5 @@ std::string Status::ToString() const {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include <string>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class Status {
......@@ -90,5 +90,5 @@ inline Status& Status::operator=(Status&& rhs) noexcept {
}
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include <vector>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
typedef long IDNumber;
......@@ -20,5 +20,5 @@ typedef std::vector<QueryResult> QueryResults;
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -8,7 +8,7 @@
#include <chrono>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace utils {
......@@ -22,5 +22,5 @@ long GetMicroSecTimeStamp() {
} // namespace utils
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -7,7 +7,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace utils {
......@@ -15,5 +15,5 @@ long GetMicroSecTimeStamp();
} // namespace utils
} // namespace engine
} // namespace vecwise
} // namespace milvus
} // namespace zilliz
......@@ -9,7 +9,7 @@
#include "SearchContext.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class IScheduleStrategy {
......
......@@ -10,15 +10,9 @@
#include "utils/Log.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
IndexLoaderQueue&
IndexLoaderQueue::GetInstance() {
static IndexLoaderQueue instance;
return instance;
}
void
IndexLoaderQueue::Put(const SearchContextPtr &search_context) {
std::unique_lock <std::mutex> lock(mtx);
......@@ -26,6 +20,7 @@ IndexLoaderQueue::Put(const SearchContextPtr &search_context) {
if(search_context == nullptr) {
queue_.push_back(nullptr);
empty_.notify_all();
return;
}
......
......@@ -14,7 +14,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
......@@ -26,18 +26,15 @@ public:
using IndexLoaderContextPtr = std::shared_ptr<IndexLoaderContext>;
class IndexLoaderQueue {
private:
public:
IndexLoaderQueue() : mtx(), full_(), empty_() {}
IndexLoaderQueue(const IndexLoaderQueue &rhs) = delete;
IndexLoaderQueue &operator=(const IndexLoaderQueue &rhs) = delete;
public:
using LoaderQueue = std::list<IndexLoaderContextPtr>;
static IndexLoaderQueue& GetInstance();
void Put(const SearchContextPtr &search_context);
IndexLoaderContextPtr Take();
......
......@@ -11,7 +11,7 @@
#include "utils/Log.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class MemScheduleStrategy : public IScheduleStrategy {
......@@ -38,7 +38,7 @@ public:
new_loader->search_contexts_.push_back(search_context);
new_loader->file_ = pair.second;
auto index = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(pair.second->location_);
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(pair.second->location_);
if(index != nullptr) {
//if the index file has been in memory, increase its priority
loader_list.push_front(new_loader);
......
......@@ -8,7 +8,7 @@
#include "IScheduleStrategy.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class StrategyFactory {
......
......@@ -10,7 +10,7 @@
#include <chrono>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
SearchContext::SearchContext(uint64_t topk, uint64_t nq, const float* vectors)
......
......@@ -13,7 +13,7 @@
#include <condition_variable>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
using TableFileSchemaPtr = std::shared_ptr<meta::TableFileSchema>;
......
......@@ -13,7 +13,7 @@
#include "db/EngineFactory.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace {
......@@ -55,8 +55,7 @@ void CollectDurationMetrics(int index_type, double total_time) {
}
SearchScheduler::SearchScheduler()
: thread_pool_(2),
stopped_(true) {
: stopped_(true) {
Start();
}
......@@ -75,8 +74,13 @@ SearchScheduler::Start() {
return true;
}
thread_pool_.enqueue(&SearchScheduler::IndexLoadWorker, this);
thread_pool_.enqueue(&SearchScheduler::SearchWorker, this);
stopped_ = false;
search_queue_.SetCapacity(2);
index_load_thread_ = std::make_shared<std::thread>(&SearchScheduler::IndexLoadWorker, this);
search_thread_ = std::make_shared<std::thread>(&SearchScheduler::SearchWorker, this);
return true;
}
......@@ -86,29 +90,34 @@ SearchScheduler::Stop() {
return true;
}
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
index_queue.Put(nullptr);
if(index_load_thread_) {
index_load_queue_.Put(nullptr);
index_load_thread_->join();
index_load_thread_ = nullptr;
}
if(search_thread_) {
search_queue_.Put(nullptr);
search_thread_->join();
search_thread_ = nullptr;
}
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
search_queue.Put(nullptr);
stopped_ = true;
return true;
}
bool
SearchScheduler::ScheduleSearchTask(SearchContextPtr& search_context) {
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
index_queue.Put(search_context);
index_load_queue_.Put(search_context);
return true;
}
bool
SearchScheduler::IndexLoadWorker() {
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
while(true) {
IndexLoaderContextPtr context = index_queue.Take();
IndexLoaderContextPtr context = index_load_queue_.Take();
if(context == nullptr) {
SERVER_LOG_INFO << "Stop thread for index loading";
break;//exit
......@@ -137,7 +146,7 @@ SearchScheduler::IndexLoadWorker() {
task_ptr->index_type_ = context->file_->file_type_;
task_ptr->index_engine_ = index_ptr;
task_ptr->search_contexts_.swap(context->search_contexts_);
search_queue.Put(task_ptr);
search_queue_.Put(task_ptr);
}
return true;
......@@ -145,9 +154,8 @@ SearchScheduler::IndexLoadWorker() {
bool
SearchScheduler::SearchWorker() {
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
while(true) {
SearchTaskPtr task_ptr = search_queue.Take();
SearchTaskPtr task_ptr = search_queue_.Take();
if(task_ptr == nullptr) {
SERVER_LOG_INFO << "Stop thread for searching";
break;//exit
......
......@@ -6,10 +6,11 @@
#pragma once
#include "SearchContext.h"
#include "utils/ThreadPool.h"
#include "IndexLoaderQueue.h"
#include "SearchTaskQueue.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class SearchScheduler {
......@@ -30,7 +31,12 @@ private:
bool SearchWorker();
private:
server::ThreadPool thread_pool_;
std::shared_ptr<std::thread> index_load_thread_;
std::shared_ptr<std::thread> search_thread_;
IndexLoaderQueue index_load_queue_;
SearchTaskQueue search_queue_;
bool stopped_ = true;
};
......
......@@ -8,7 +8,7 @@
#include "utils/TimeRecorder.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
namespace {
......@@ -21,7 +21,7 @@ void ClusterResult(const std::vector<long> &output_ids,
for (auto i = 0; i < nq; i++) {
SearchContext::Id2ScoreMap id_score;
for (auto k = 0; k < topk; k++) {
uint64_t index = i * nq + k;
uint64_t index = i * topk + k;
id_score.push_back(std::make_pair(output_ids[index], output_distence[index]));
}
result_set.emplace_back(id_score);
......@@ -94,24 +94,12 @@ void CalcScore(uint64_t vector_count,
}
SearchTaskQueue::SearchTaskQueue() {
SetCapacity(4);
}
SearchTaskQueue&
SearchTaskQueue::GetInstance() {
static SearchTaskQueue instance;
return instance;
}
bool SearchTask::DoSearch() {
if(index_engine_ == nullptr) {
return false;
}
server::TimeRecorder rc("DoSearch");
server::TimeRecorder rc("DoSearch index(" + std::to_string(index_id_) + ")");
std::vector<long> output_ids;
std::vector<float> output_distence;
......@@ -125,28 +113,29 @@ bool SearchTask::DoSearch() {
try {
index_engine_->Search(context->nq(), context->vectors(), inner_k, output_distence.data(),
output_ids.data());
rc.Record("do search");
//step 3: cluster result
SearchContext::ResultSet result_set;
ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set);
rc.Record("cluster result");
//step 4: pick up topk result
TopkResult(result_set, inner_k, context->GetResult());
rc.Record("reduce topk");
//step 5: calculate score between 0 ~ 100
CalcScore(context->nq(), context->vectors(), index_engine_->Dimension(), context->GetResult(), result_set);
context->GetResult().swap(result_set);
rc.Record("calculate score");
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
context->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
continue;
}
rc.Record("do search");
//step 3: cluster result
SearchContext::ResultSet result_set;
ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set);
rc.Record("cluster result");
//step 4: pick up topk result
TopkResult(result_set, inner_k, context->GetResult());
rc.Record("reduce topk");
//step 5: calculate score between 0 ~ 100
CalcScore(context->nq(), context->vectors(), index_engine_->Dimension(), context->GetResult(), result_set);
context->GetResult().swap(result_set);
rc.Record("calculate score");
//step 6: notify to send result to client
context->IndexSearchDone(index_id_);
}
......
......@@ -12,7 +12,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace engine {
class SearchTask {
......@@ -27,21 +27,7 @@ public:
};
using SearchTaskPtr = std::shared_ptr<SearchTask>;
class SearchTaskQueue : public server::BlockingQueue<SearchTaskPtr> {
private:
SearchTaskQueue();
SearchTaskQueue(const SearchTaskQueue &rhs) = delete;
SearchTaskQueue &operator=(const SearchTaskQueue &rhs) = delete;
public:
static SearchTaskQueue& GetInstance();
private:
};
using SearchTaskQueue = server::BlockingQueue<SearchTaskPtr>;
}
......
......@@ -7,7 +7,7 @@
#include <getopt.h>
#include <memory.h>
// Not provide path: current work path will be used and system.info.
using namespace zilliz::vecwise;
using namespace zilliz::milvus;
void
print_usage(const std::string &app_name) {
......
......@@ -14,7 +14,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
LicenseCheck::LicenseCheck() {
......
......@@ -9,7 +9,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class LicenseCheck {
......
......@@ -8,7 +8,7 @@
#include "utils/Error.h"
using namespace zilliz::vecwise;
using namespace zilliz::milvus;
// Not provide path: current work path will be used and system.info.
void
......
......@@ -21,7 +21,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
constexpr int LicenseLibrary::sha256_length_;
......
......@@ -15,7 +15,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class LicenseLibrary {
......
......@@ -22,12 +22,12 @@ INITIALIZE_EASYLOGGINGPP
void print_help(const std::string &app_name);
using namespace zilliz::vecwise;
using namespace zilliz::milvus;
int
main(int argc, char *argv[]) {
printf("Megasearch %s version: v%s built at %s\n", BUILD_TYPE, MEGASEARCH_VERSION, BUILD_TIME);
printf("Megasearch server start...\n");
printf("\nWelcome to use Milvus by Zillz!\n");
printf("Milvus %s version: v%s built at %s\n", BUILD_TYPE, MEGASEARCH_VERSION, BUILD_TIME);
signal(SIGINT, server::SignalUtil::HandleSignal);
signal(SIGSEGV, server::SignalUtil::HandleSignal);
......@@ -53,7 +53,7 @@ main(int argc, char *argv[]) {
if(argc < 2) {
print_help(app_name);
printf("Vecwise engine server exit...\n");
printf("Milvus server exit...\n");
return EXIT_FAILURE;
}
......@@ -98,7 +98,7 @@ main(int argc, char *argv[]) {
}
}
zilliz::vecwise::server::InitLog(log_config_file);
zilliz::milvus::server::InitLog(log_config_file);
server::Server* server_ptr = server::Server::Instance();
server_ptr->Init(start_daemonized, pid_filename, config_filename);
......
......@@ -11,7 +11,7 @@
#include "SystemInfo.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class MetricsBase{
public:
......
......@@ -9,7 +9,7 @@
#include "PrometheusMetrics.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
MetricsBase &
......
......@@ -16,7 +16,7 @@
//#include "PrometheusMetrics.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
#define METRICS_NOW_TIME std::chrono::system_clock::now()
......
......@@ -10,7 +10,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
ServerError
......
......@@ -22,7 +22,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
......
......@@ -17,7 +17,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
void SystemInfo::Init() {
......
......@@ -21,7 +21,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class SystemInfo {
......
......@@ -26,7 +26,6 @@ add_library(megasearch_sdk STATIC
${service_files}
)
link_directories(../../third_party/build/lib)
target_link_libraries(megasearch_sdk
${third_party_libs}
)
......
......@@ -28,7 +28,7 @@ namespace {
std::cout << "Table name: " << tb_schema.table_name << std::endl;
std::cout << "Table index type: " << (int)tb_schema.index_type << std::endl;
std::cout << "Table dimension: " << tb_schema.dimension << std::endl;
std::cout << "Table store raw data: " << tb_schema.store_raw_vector << std::endl;
std::cout << "Table store raw data: " << (tb_schema.store_raw_vector ? "true" : "false") << std::endl;
BLOCK_SPLITER
}
......@@ -148,7 +148,9 @@ ClientTest::Test(const std::string& address, const std::string& port) {
std::cout << "ShowTables function call status: " << stat.ToString() << std::endl;
std::cout << "All tables: " << std::endl;
for(auto& table : tables) {
std::cout << "\t" << table << std::endl;
int64_t row_count = 0;
stat = conn->GetTableRowCount(table, row_count);
std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl;
}
}
......@@ -192,10 +194,10 @@ ClientTest::Test(const std::string& address, const std::string& port) {
PrintSearchResult(topk_query_result_array);
}
// {//delete table
// Status stat = conn->DeleteTable(TABLE_NAME);
// std::cout << "DeleteTable function call status: " << stat.ToString() << std::endl;
// }
{//delete table
Status stat = conn->DeleteTable(TABLE_NAME);
std::cout << "DeleteTable function call status: " << stat.ToString() << std::endl;
}
{//server status
std::string status = conn->ServerStatus();
......
......@@ -72,7 +72,7 @@ class Status {
* @return, the status is assigned.
*
*/
inline Status &operator=(const Status &s);
Status &operator=(const Status &s);
/**
* @brief Status
......@@ -93,7 +93,7 @@ class Status {
* @return, the status is moved.
*
*/
inline Status &operator=(Status &&s) noexcept;
Status &operator=(Status &&s) noexcept;
/**
* @brief Status
......
......@@ -77,7 +77,7 @@ ClientProxy::Disconnect() {
std::string
ClientProxy::ClientVersion() const {
return std::string("v1.0");
return "";
}
Status
......@@ -221,6 +221,8 @@ ClientProxy::DescribeTable(const std::string &table_name, TableSchema &table_sch
table_schema.table_name = thrift_schema.table_name;
table_schema.index_type = (IndexType)thrift_schema.index_type;
table_schema.dimension = thrift_schema.dimension;
table_schema.store_raw_vector = thrift_schema.store_raw_vector;
} catch ( std::exception& ex) {
return Status(StatusCode::UnknownError, "failed to describe table: " + std::string(ex.what()));
......
......@@ -4,6 +4,7 @@
* Proprietary and confidential.
******************************************************************************/
#include "ConnectionImpl.h"
#include "version.h"
namespace megasearch {
......@@ -47,7 +48,7 @@ ConnectionImpl::Disconnect() {
std::string
ConnectionImpl::ClientVersion() const {
return client_proxy_->ClientVersion();
return MEGASEARCH_VERSION;
}
Status
......
......@@ -9,7 +9,7 @@
#include "utils/TimeRecorder.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
using namespace megasearch;
......
......@@ -11,7 +11,7 @@
#include "MegasearchService.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class MegasearchServiceHandler : virtual public megasearch::thrift::MegasearchServiceIf {
......
......@@ -10,7 +10,7 @@
#include "megasearch_constants.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
using namespace megasearch;
......
......@@ -12,7 +12,7 @@
#include <thread>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class BaseTask {
......
......@@ -23,7 +23,7 @@
#include <thread>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
using namespace megasearch::thrift;
......
......@@ -9,7 +9,7 @@
#include <string>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class MegasearchServer {
......
......@@ -14,24 +14,21 @@
#include "version.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
static const std::string DQL_TASK_GROUP = "dql";
static const std::string DDL_DML_TASK_GROUP = "ddl_dml";
static const std::string PING_TASK_GROUP = "ping";
static const std::string VECTOR_UID = "uid";
static const uint64_t USE_MT = 5000;
using DB_META = zilliz::vecwise::engine::meta::Meta;
using DB_DATE = zilliz::vecwise::engine::meta::DateT;
using DB_META = zilliz::milvus::engine::meta::Meta;
using DB_DATE = zilliz::milvus::engine::meta::DateT;
namespace {
class DBWrapper {
public:
DBWrapper() {
zilliz::vecwise::engine::Options opt;
zilliz::milvus::engine::Options opt;
ConfigNode& config = ServerConfig::GetInstance().GetConfig(CONFIG_DB);
opt.meta.backend_uri = config.GetValue(CONFIG_DB_URL);
std::string db_path = config.GetValue(CONFIG_DB_PATH);
......@@ -40,7 +37,7 @@ namespace {
CommonUtil::CreateDirectory(opt.meta.path);
zilliz::vecwise::engine::DB::Open(opt, &db_);
zilliz::milvus::engine::DB::Open(opt, &db_);
if(db_ == nullptr) {
SERVER_LOG_ERROR << "Failed to open db";
throw ServerException(SERVER_NULL_POINTER, "Failed to open db");
......@@ -51,13 +48,13 @@ namespace {
delete db_;
}
zilliz::vecwise::engine::DB* DB() { return db_; }
zilliz::milvus::engine::DB* DB() { return db_; }
private:
zilliz::vecwise::engine::DB* db_ = nullptr;
zilliz::milvus::engine::DB* db_ = nullptr;
};
zilliz::vecwise::engine::DB* DB() {
zilliz::milvus::engine::DB* DB() {
static DBWrapper db_wrapper;
return db_wrapper.DB();
}
......@@ -76,6 +73,20 @@ namespace {
return map_type[type];
}
int IndexType(engine::EngineType type) {
static std::map<engine::EngineType, int> map_type = {
{engine::EngineType::INVALID, 0},
{engine::EngineType::FAISS_IDMAP, 1},
{engine::EngineType::FAISS_IVFFLAT, 2},
};
if(map_type.find(type) == map_type.end()) {
return 0;
}
return map_type[type];
}
ServerError
ConvertRowRecordToFloatArray(const std::vector<thrift::RowRecord>& record_array,
uint64_t dimension,
......@@ -174,16 +185,17 @@ ServerError CreateTableTask::OnExecute() {
//step 2: create table
engine::Status stat = DB()->CreateTable(table_info);
if(!stat.ok()) {//table could exist
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return SERVER_SUCCESS;
return error_code_;
}
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return SERVER_UNEXPECTED_ERROR;
return error_code_;
}
rc.Record("done");
......@@ -215,10 +227,13 @@ ServerError DescribeTableTask::OnExecute() {
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
} else {
}
schema_.table_name = table_info.table_id_;
schema_.index_type = IndexType((engine::EngineType)table_info.engine_type_);
schema_.dimension = table_info.dimension_;
schema_.store_raw_vector = table_info.store_raw_data_;
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
......@@ -243,16 +258,53 @@ BaseTaskPtr DeleteTableTask::Create(const std::string& group_id) {
}
ServerError DeleteTableTask::OnExecute() {
error_code_ = SERVER_NOT_IMPLEMENT;
error_msg_ = "delete table not implemented";
SERVER_LOG_ERROR << error_msg_;
try {
TimeRecorder rc("DeleteTableTask");
//step 1: check validation
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
//step 2: check table existence
engine::meta::TableSchema table_info;
table_info.table_id_ = table_name_;
engine::Status stat = DB()->DescribeTable(table_info);
if(!stat.ok()) {
error_code_ = SERVER_TABLE_NOT_EXIST;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
rc.Record("check validation");
return SERVER_NOT_IMPLEMENT;
//step 3: delete table
std::vector<DB_DATE> dates;
stat = DB()->DeleteTable(table_name_, dates);
if(!stat.ok()) {
SERVER_LOG_ERROR << "Engine failed: " << stat.ToString();
return SERVER_UNEXPECTED_ERROR;
}
rc.Record("deleta table");
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
return SERVER_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ShowTablesTask::ShowTablesTask(std::vector<std::string>& tables)
: BaseTask(PING_TASK_GROUP),
: BaseTask(DQL_TASK_GROUP),
tables_(tables) {
}
......@@ -262,6 +314,19 @@ BaseTaskPtr ShowTablesTask::Create(std::vector<std::string>& tables) {
}
ServerError ShowTablesTask::OnExecute() {
std::vector<engine::meta::TableSchema> schema_array;
engine::Status stat = DB()->AllTables(schema_array);
if(!stat.ok()) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
tables_.clear();
for(auto& schema : schema_array) {
tables_.push_back(schema.table_id_);
}
return SERVER_SUCCESS;
}
......@@ -468,17 +533,39 @@ BaseTaskPtr GetTableRowCountTask::Create(const std::string& table_name, int64_t&
}
ServerError GetTableRowCountTask::OnExecute() {
if(table_name_.empty()) {
try {
TimeRecorder rc("GetTableRowCountTask");
//step 1: check validation
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
//step 2: get row count
uint64_t row_count = 0;
engine::Status stat = DB()->GetTableRowCount(table_name_, row_count);
if (!stat.ok()) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
row_count_ = (int64_t) row_count;
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Table name cannot be empty";
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
error_code_ = SERVER_NOT_IMPLEMENT;
error_msg_ = "Not implemented";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SERVER_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -16,7 +16,7 @@
#include <memory>
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
using namespace megasearch;
......
......@@ -7,7 +7,7 @@
#include "MegasearchThreadPoolServer.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
void
......@@ -21,7 +21,7 @@ MegasearchThreadPoolServer::onClientDisconnected(apache::thrift::server::TConnec
server::Metrics::GetInstance().ConnectionGaugeDecrement();
TThreadPoolServer::onClientDisconnected(pClient);
}
zilliz::vecwise::server::MegasearchThreadPoolServer::MegasearchThreadPoolServer(const std::shared_ptr<apache::thrift::TProcessor> &processor,
zilliz::milvus::server::MegasearchThreadPoolServer::MegasearchThreadPoolServer(const std::shared_ptr<apache::thrift::TProcessor> &processor,
const std::shared_ptr<apache::thrift::transport::TServerTransport> &serverTransport,
const std::shared_ptr<apache::thrift::transport::TTransportFactory> &transportFactory,
const std::shared_ptr<apache::thrift::protocol::TProtocolFactory> &protocolFactory,
......
......@@ -10,7 +10,7 @@
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
class MegasearchThreadPoolServer : public apache::thrift::server::TThreadPoolServer {
......
......@@ -23,7 +23,7 @@
#include "metrics/Metrics.h"
namespace zilliz {
namespace vecwise {
namespace milvus {
namespace server {
Server*
......@@ -52,7 +52,7 @@ Server::Daemonize() {
return;
}
SERVER_LOG_INFO << "Vecwise server run in daemonize mode";
SERVER_LOG_INFO << "Milvus server run in daemonize mode";
// std::string log_path(GetLogDirFullPath());
// log_path += "zdb_server.(INFO/WARNNING/ERROR/CRITICAL)";
......@@ -152,9 +152,6 @@ Server::Start() {
ServerConfig &config = ServerConfig::GetInstance();
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
//print config into console and log
config.PrintAll();
#ifdef ENABLE_LICENSE
ConfigNode license_config = config.GetConfig(CONFIG_LICENSE);
std::string license_file_path = license_config.GetValue(CONFIG_LICENSE_PATH);
......@@ -174,11 +171,11 @@ Server::Start() {
signal(SIGTERM, SignalUtil::HandleSignal);
server::Metrics::GetInstance().Init();
server::SystemInfo::GetInstance().Init();
SERVER_LOG_INFO << "Vecwise server is running...";
printf("Milvus server start successfully.\n");
StartService();
} catch(std::exception& ex){
SERVER_LOG_ERROR << "Vecwise server encounter exception: " << std::string(ex.what())
SERVER_LOG_ERROR << "Milvus server encounter exception: " << std::string(ex.what())
<< "Is another server instance running?";
break;
}
......@@ -190,17 +187,19 @@ Server::Start() {
void
Server::Stop() {
SERVER_LOG_INFO << "Vecwise server will be closed";
printf("Milvus server is going to shutdown ...\n");
// Unlock and close lockfile
if (pid_fd != -1) {
int ret = lockf(pid_fd, F_ULOCK, 0);
if(ret != 0){
printf("Can't lock file: %s\n", strerror(errno));
exit(0);
}
ret = close(pid_fd);
if(ret != 0){
printf("Can't close file: %s\n", strerror(errno));
exit(0);
}
}
......@@ -208,7 +207,8 @@ Server::Stop() {
if (!pid_filename_.empty()) {
int ret = unlink(pid_filename_.c_str());
if(ret != 0){
printf("Can't unlink file: %s\n", strerror(errno));
exit(0);
}
}
......@@ -219,8 +219,7 @@ Server::Stop() {
#ifdef ENABLE_LICENSE
server::LicenseCheck::GetInstance().StopCountingDown();
#endif
SERVER_LOG_INFO << "Vecwise server closed";
printf("Milvus server is closed!\n");
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册