提交 75c1186c 编写于 作者: G groot

fix conflict

......@@ -27,10 +27,11 @@ Please mark all change in change log and use the ticket from JIRA.
## Improvement
- \#255 - Add ivfsq8 test report detailed version
- \#275 - Rename C++ SDK IndexType
- \#260 - C++ SDK README
- \#266 - Rpc request source code refactor
- \#275 - Rename C++ SDK IndexType
- \#284 - Change C++ SDK to shared library
- \#306 - Use int64 for all config integer
- \#310 - Add Q&A for 'protocol https not supported or disable in libcurl' issue
- \#314 - add Find FAISS in CMake
- \#322 - Add option to enable / disable prometheus
......
......@@ -16,7 +16,7 @@ Milvus is the world's fastest similarity search engine for massive-scale feature
For more detailed introduction of Milvus and its architecture, see [Milvus overview](https://www.milvus.io/docs/en/aboutmilvus/overview/).
Milvus provides stable [Python](https://github.com/milvus-io/pymilvus), [Java](https://github.com/milvus-io/milvus-sdk-java) and C++ APIs.
Milvus provides stable [Python](https://github.com/milvus-io/pymilvus), [Java](https://github.com/milvus-io/milvus-sdk-java) and [C++](https://github.com/milvus-io/milvus/tree/master/core/src/sdk) APIs.
Keep up-to-date with newest releases and latest updates by reading Milvus [release notes](https://www.milvus.io/docs/en/release/v0.5.0/).
......
......@@ -20,7 +20,7 @@
Milvus 是一款开源的、针对海量特征向量的相似性搜索引擎。基于异构众核计算框架设计,成本更低,性能更好。在有限的计算资源下,十亿向量搜索仅毫秒响应。
Milvus 提供稳定的 Python、Java 以及 C++ 的 API 接口。
Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)[Java](https://github.com/milvus-io/milvus-sdk-java) 以及 [C++](https://github.com/milvus-io/milvus/tree/master/core/src/sdk) 的 API 接口。
通过 [版本发布说明](https://milvus.io/docs/zh-CN/release/v0.5.0/) 获取最新发行版本的 Milvus。
......
......@@ -50,7 +50,7 @@ pipeline {
}
stages {
stage("Run Build") {
stage("Run GPU Version Build") {
agent {
kubernetes {
label "${BINRARY_VERSION}-build"
......@@ -60,7 +60,7 @@ pipeline {
}
stages {
stage('Build') {
stage('GPU Version Build') {
steps {
container('milvus-build-env') {
script {
......@@ -69,7 +69,7 @@ pipeline {
}
}
}
stage('Code Coverage') {
stage('GPU Version Code Coverage') {
steps {
container('milvus-build-env') {
script {
......@@ -78,7 +78,7 @@ pipeline {
}
}
}
stage('Upload Package') {
stage('Upload GPU Version Package') {
steps {
container('milvus-build-env') {
script {
......@@ -90,7 +90,7 @@ pipeline {
}
}
stage("Publish docker images") {
stage("Publish GPU Version docker images") {
agent {
kubernetes {
label "${BINRARY_VERSION}-publish"
......@@ -100,7 +100,7 @@ pipeline {
}
stages {
stage('Publish') {
stage('Publish GPU Version') {
steps {
container('publish-images'){
script {
......@@ -112,7 +112,7 @@ pipeline {
}
}
stage("Deploy to Development") {
stage("Deploy GPU Version to Development") {
agent {
kubernetes {
label "${BINRARY_VERSION}-dev-test"
......@@ -122,7 +122,7 @@ pipeline {
}
stages {
stage("Deploy to Dev") {
stage("Deploy GPU Version to Dev") {
steps {
container('milvus-test-env') {
script {
......@@ -132,7 +132,7 @@ pipeline {
}
}
stage("Dev Test") {
stage("GPU Version Dev Test") {
steps {
container('milvus-test-env') {
script {
......@@ -147,7 +147,7 @@ pipeline {
}
}
stage ("Cleanup Dev") {
stage ("Cleanup GPU Version Dev") {
steps {
container('milvus-test-env') {
script {
......@@ -180,7 +180,7 @@ pipeline {
}
stages {
stage("Run Build") {
stage("Run CPU Version Build") {
agent {
kubernetes {
label "${BINRARY_VERSION}-build"
......@@ -190,7 +190,7 @@ pipeline {
}
stages {
stage('Build') {
stage('Build CPU Version') {
steps {
container('milvus-build-env') {
script {
......@@ -199,7 +199,7 @@ pipeline {
}
}
}
stage('Code Coverage') {
stage('CPU Version Code Coverage') {
steps {
container('milvus-build-env') {
script {
......@@ -208,7 +208,7 @@ pipeline {
}
}
}
stage('Upload Package') {
stage('Upload CPU Version Package') {
steps {
container('milvus-build-env') {
script {
......@@ -220,7 +220,7 @@ pipeline {
}
}
stage("Publish docker images") {
stage("Publish CPU Version docker images") {
agent {
kubernetes {
label "${BINRARY_VERSION}-publish"
......@@ -230,7 +230,7 @@ pipeline {
}
stages {
stage('Publish') {
stage('Publish CPU Version') {
steps {
container('publish-images'){
script {
......@@ -242,7 +242,7 @@ pipeline {
}
}
stage("Deploy to Development") {
stage("Deploy CPU Version to Development") {
agent {
kubernetes {
label "${BINRARY_VERSION}-dev-test"
......@@ -252,7 +252,7 @@ pipeline {
}
stages {
stage("Deploy to Dev") {
stage("Deploy CPU Version to Dev") {
steps {
container('milvus-test-env') {
script {
......@@ -262,7 +262,7 @@ pipeline {
}
}
stage("Dev Test") {
stage("CPU Version Dev Test") {
steps {
container('milvus-test-env') {
script {
......@@ -277,7 +277,7 @@ pipeline {
}
}
stage ("Cleanup Dev") {
stage ("Cleanup CPU Version Dev") {
steps {
container('milvus-test-env') {
script {
......
......@@ -16,6 +16,7 @@ BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="/opt/milvus"
FAISS_ROOT=""
CUSTOMIZATION="OFF" # default use origin faiss
BUILD_COVERAGE="OFF"
USE_JFROG_CACHE="OFF"
RUN_CPPLINT="OFF"
......@@ -23,7 +24,7 @@ GPU_VERSION="OFF"
WITH_MKL="OFF"
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
while getopts "o:t:b:f:gulcjmh" arg
while getopts "o:t:b:f:gxulcjmh" arg
do
case $arg in
o)
......@@ -41,6 +42,9 @@ do
g)
GPU_VERSION="ON";
;;
x)
CUSTOMIZATION="ON";
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="ON";
......@@ -66,6 +70,7 @@ parameter:
-b: core code build directory
-f: faiss root path
-g: gpu version
-x: milvus customization (default: OFF)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-c: code coverage(default: OFF)
......@@ -74,7 +79,7 @@ parameter:
-h: help
usage:
./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} -f \${FAISS_ROOT} [-u] [-l] [-c] [-j] [-m] [-h]
./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} -f \${FAISS_ROOT} [-g] [-x] [-u] [-l] [-c] [-j] [-m] [-h]
"
exit 0
;;
......@@ -96,6 +101,7 @@ CMAKE_CMD="cmake \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DCUSTOMIZATION=${CUSTOMIZATION} \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
......
......@@ -9,3 +9,5 @@ output.info
output_new.info
server.info
*.pyc
src/grpc/python_gen.h
src/grpc/python/
......@@ -36,7 +36,7 @@ engine_config:
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
gpu_resource_config:
enable_gpu: false # whether to enable GPU resources
enable: false # whether to enable GPU resources
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
search_resources: # define the GPU devices used for search computation, must be in format gpux
- gpu0
......
......@@ -36,7 +36,7 @@ engine_config:
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
gpu_resource_config:
enable_gpu: true # whether to enable GPU resources
enable: true # whether to enable GPU resources
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
search_resources: # define the GPU devices used for search computation, must be in format gpux
- gpu0
......
......@@ -33,7 +33,7 @@ static const char* ARCHIVE_CONF_DISK = "disk";
static const char* ARCHIVE_CONF_DAYS = "days";
struct ArchiveConf {
using CriteriaT = std::map<std::string, int>;
using CriteriaT = std::map<std::string, int64_t>;
explicit ArchiveConf(const std::string& type, const std::string& criterias = std::string());
......
......@@ -146,7 +146,7 @@ ExecutionEngineImpl::HybridLoad() const {
const std::string key = location_ + ".quantizer";
server::Config& config = server::Config::GetInstance();
std::vector<int32_t> gpus;
std::vector<int64_t> gpus;
Status s = config.GetGpuResourceConfigSearchResources(gpus);
if (!s.ok()) {
ENGINE_LOG_ERROR << s.message();
......@@ -586,7 +586,7 @@ ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
Status
ExecutionEngineImpl::Init() {
server::Config& config = server::Config::GetInstance();
std::vector<int32_t> gpu_ids;
std::vector<int64_t> gpu_ids;
Status s = config.GetGpuResourceConfigBuildIndexResources(gpu_ids);
for (auto id : gpu_ids) {
if (gpu_num_ == id) {
......
......@@ -122,8 +122,8 @@ class ExecutionEngineImpl : public ExecutionEngine {
int64_t dim_;
std::string location_;
int32_t nlist_ = 0;
int32_t gpu_num_ = 0;
int64_t nlist_ = 0;
int64_t gpu_num_ = 0;
};
} // namespace engine
......
......@@ -55,9 +55,9 @@ load_simple_config() {
// get resources
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
std::vector<int32_t> gpu_ids;
std::vector<int64_t> gpu_ids;
config.GetGpuResourceConfigSearchResources(gpu_ids);
std::vector<int32_t> build_gpu_ids;
std::vector<int64_t> build_gpu_ids;
config.GetGpuResourceConfigBuildIndexResources(build_gpu_ids);
auto pcie = Connection("pcie", 12000);
......
......@@ -26,7 +26,7 @@ namespace scheduler {
void
BuildIndexPass::Init() {
server::Config& config = server::Config::GetInstance();
std::vector<int32_t> build_resources;
std::vector<int64_t> build_resources;
Status s = config.GetGpuResourceConfigBuildIndexResources(build_resources);
if (!s.ok()) {
throw;
......
......@@ -45,9 +45,9 @@ class FaissFlatPass : public Pass {
Run(const TaskPtr& task) override;
private:
int32_t threshold_ = std::numeric_limits<int32_t>::max();
int64_t threshold_ = std::numeric_limits<int64_t>::max();
int64_t count_ = 0;
std::vector<int32_t> gpus;
std::vector<int64_t> gpus;
};
using FaissFlatPassPtr = std::shared_ptr<FaissFlatPass>;
......
......@@ -45,9 +45,9 @@ class FaissIVFFlatPass : public Pass {
Run(const TaskPtr& task) override;
private:
int32_t threshold_ = std::numeric_limits<int32_t>::max();
int64_t threshold_ = std::numeric_limits<int64_t>::max();
int64_t count_ = 0;
std::vector<int32_t> gpus;
std::vector<int64_t> gpus;
};
using FaissIVFFlatPassPtr = std::shared_ptr<FaissIVFFlatPass>;
......
......@@ -32,7 +32,7 @@ FaissIVFSQ8HPass::Init() {
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
threshold_ = std::numeric_limits<int32_t>::max();
threshold_ = std::numeric_limits<int64_t>::max();
}
s = config.GetGpuResourceConfigSearchResources(gpus);
}
......
......@@ -45,9 +45,9 @@ class FaissIVFSQ8HPass : public Pass {
Run(const TaskPtr& task) override;
private:
int32_t threshold_ = std::numeric_limits<int32_t>::max();
int64_t threshold_ = std::numeric_limits<int64_t>::max();
int64_t count_ = 0;
std::vector<int32_t> gpus;
std::vector<int64_t> gpus;
};
using FaissIVFSQ8HPassPtr = std::shared_ptr<FaissIVFSQ8HPass>;
......
......@@ -45,9 +45,9 @@ class FaissIVFSQ8Pass : public Pass {
Run(const TaskPtr& task) override;
private:
int32_t threshold_ = std::numeric_limits<int32_t>::max();
int64_t threshold_ = std::numeric_limits<int64_t>::max();
int64_t count_ = 0;
std::vector<int32_t> gpus;
std::vector<int64_t> gpus;
};
using FaissIVFSQ8PassPtr = std::shared_ptr<FaissIVFSQ8Pass>;
......
......@@ -113,19 +113,19 @@ Config::ValidateConfig() {
return s;
}
int32_t db_archive_disk_threshold;
int64_t db_archive_disk_threshold;
s = GetDBConfigArchiveDiskThreshold(db_archive_disk_threshold);
if (!s.ok()) {
return s;
}
int32_t db_archive_days_threshold;
int64_t db_archive_days_threshold;
s = GetDBConfigArchiveDaysThreshold(db_archive_days_threshold);
if (!s.ok()) {
return s;
}
int32_t db_insert_buffer_size;
int64_t db_insert_buffer_size;
s = GetDBConfigInsertBufferSize(db_insert_buffer_size);
if (!s.ok()) {
return s;
......@@ -170,19 +170,19 @@ Config::ValidateConfig() {
}
/* engine config */
int32_t engine_use_blas_threshold;
int64_t engine_use_blas_threshold;
s = GetEngineConfigUseBlasThreshold(engine_use_blas_threshold);
if (!s.ok()) {
return s;
}
int32_t engine_omp_thread_num;
int64_t engine_omp_thread_num;
s = GetEngineConfigOmpThreadNum(engine_omp_thread_num);
if (!s.ok()) {
return s;
}
int32_t engine_gpu_search_threshold;
int64_t engine_gpu_search_threshold;
s = GetEngineConfigGpuSearchThreshold(engine_gpu_search_threshold);
if (!s.ok()) {
return s;
......@@ -190,8 +190,8 @@ Config::ValidateConfig() {
/* gpu resource config */
#ifdef MILVUS_GPU_VERSION
bool resource_enable_gpu;
s = GetGpuResourceConfigEnableGpu(resource_enable_gpu);
bool gpu_resource_enable;
s = GetGpuResourceConfigEnable(gpu_resource_enable);
if (!s.ok()) {
return s;
}
......@@ -208,13 +208,13 @@ Config::ValidateConfig() {
return s;
}
std::vector<int32_t> search_resources;
std::vector<int64_t> search_resources;
s = GetGpuResourceConfigSearchResources(search_resources);
if (!s.ok()) {
return s;
}
std::vector<int32_t> index_build_resources;
std::vector<int64_t> index_build_resources;
s = GetGpuResourceConfigBuildIndexResources(index_build_resources);
if (!s.ok()) {
return s;
......@@ -330,7 +330,7 @@ Config::ResetDefaultConfig() {
/* gpu resource config */
#ifdef MILVUS_GPU_VERSION
s = SetGpuResourceConfigEnableGpu(CONFIG_GPU_RESOURCE_ENABLE_GPU_DEFAULT);
s = SetGpuResourceConfigEnable(CONFIG_GPU_RESOURCE_ENABLE_DEFAULT);
if (!s.ok()) {
return s;
}
......@@ -485,7 +485,7 @@ Config::CheckDBConfigInsertBufferSize(const std::string& value) {
". Possible reason: db_config.insert_buffer_size is not a positive integer.";
return Status(SERVER_INVALID_ARGUMENT, msg);
} else {
int64_t buffer_size = std::stoi(value) * GB;
int64_t buffer_size = std::stoll(value) * GB;
if (buffer_size <= 0) {
std::string msg = "Invalid insert buffer size: " + value +
". Possible reason: db_config.insert_buffer_size is not a positive integer.";
......@@ -540,7 +540,7 @@ Config::CheckCacheConfigCpuCacheCapacity(const std::string& value) {
". Possible reason: cache_config.cpu_cache_capacity is not a positive integer.";
return Status(SERVER_INVALID_ARGUMENT, msg);
} else {
int64_t cpu_cache_capacity = std::stoi(value) * GB;
int64_t cpu_cache_capacity = std::stoll(value) * GB;
if (cpu_cache_capacity <= 0) {
std::string msg = "Invalid cpu cache capacity: " + value +
". Possible reason: cache_config.cpu_cache_capacity is not a positive integer.";
......@@ -557,7 +557,7 @@ Config::CheckCacheConfigCpuCacheCapacity(const std::string& value) {
std::cerr << "WARNING: cpu cache capacity value is too big" << std::endl;
}
int32_t buffer_value;
int64_t buffer_value;
Status s = GetDBConfigInsertBufferSize(buffer_value);
if (!s.ok()) {
return s;
......@@ -619,10 +619,10 @@ Config::CheckEngineConfigOmpThreadNum(const std::string& value) {
return Status(SERVER_INVALID_ARGUMENT, msg);
}
int32_t omp_thread = std::stoi(value);
uint32_t sys_thread_cnt = 8;
int64_t omp_thread = std::stoll(value);
int64_t sys_thread_cnt = 8;
CommonUtil::GetSystemAvailableThreads(sys_thread_cnt);
if (omp_thread > static_cast<int32_t>(sys_thread_cnt)) {
if (omp_thread > sys_thread_cnt) {
std::string msg = "Invalid omp thread num: " + value +
". Possible reason: engine_config.omp_thread_num exceeds system cpu cores.";
return Status(SERVER_INVALID_ARGUMENT, msg);
......@@ -641,10 +641,10 @@ Config::CheckEngineConfigGpuSearchThreshold(const std::string& value) {
}
Status
Config::CheckGpuResourceConfigEnableGpu(const std::string& value) {
Config::CheckGpuResourceConfigEnable(const std::string& value) {
if (!ValidationUtil::ValidateStringIsBool(value).ok()) {
std::string msg = "Invalid gpu resource config: " + value +
". Possible reason: gpu_resource_config.enable_gpu is not a boolean.";
std::string msg =
"Invalid gpu resource config: " + value + ". Possible reason: gpu_resource_config.enable is not a boolean.";
return Status(SERVER_INVALID_ARGUMENT, msg);
}
return Status::OK();
......@@ -657,14 +657,14 @@ Config::CheckGpuResourceConfigCacheCapacity(const std::string& value) {
". Possible reason: gpu_resource_config.cache_capacity is not a positive integer.";
return Status(SERVER_INVALID_ARGUMENT, msg);
} else {
uint64_t gpu_cache_capacity = std::stoi(value) * GB;
std::vector<int32_t> gpu_ids;
int64_t gpu_cache_capacity = std::stoll(value) * GB;
std::vector<int64_t> gpu_ids;
Status s = GetGpuResourceConfigBuildIndexResources(gpu_ids);
if (!s.ok()) {
return s;
}
for (int32_t gpu_id : gpu_ids) {
for (int64_t gpu_id : gpu_ids) {
size_t gpu_memory;
if (!ValidationUtil::GetGpuMemory(gpu_id, gpu_memory).ok()) {
std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(gpu_id);
......@@ -855,37 +855,37 @@ Config::GetDBConfigBackendUrl(std::string& value) {
}
Status
Config::GetDBConfigArchiveDiskThreshold(int32_t& value) {
Config::GetDBConfigArchiveDiskThreshold(int64_t& value) {
std::string str =
GetConfigStr(CONFIG_DB, CONFIG_DB_ARCHIVE_DISK_THRESHOLD, CONFIG_DB_ARCHIVE_DISK_THRESHOLD_DEFAULT);
Status s = CheckDBConfigArchiveDiskThreshold(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetDBConfigArchiveDaysThreshold(int32_t& value) {
Config::GetDBConfigArchiveDaysThreshold(int64_t& value) {
std::string str =
GetConfigStr(CONFIG_DB, CONFIG_DB_ARCHIVE_DAYS_THRESHOLD, CONFIG_DB_ARCHIVE_DAYS_THRESHOLD_DEFAULT);
Status s = CheckDBConfigArchiveDaysThreshold(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetDBConfigInsertBufferSize(int32_t& value) {
Config::GetDBConfigInsertBufferSize(int64_t& value) {
std::string str = GetConfigStr(CONFIG_DB, CONFIG_DB_INSERT_BUFFER_SIZE, CONFIG_DB_INSERT_BUFFER_SIZE_DEFAULT);
Status s = CheckDBConfigInsertBufferSize(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
......@@ -927,7 +927,7 @@ Config::GetCacheConfigCpuCacheCapacity(int64_t& value) {
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
......@@ -957,45 +957,44 @@ Config::GetCacheConfigCacheInsertData(bool& value) {
}
Status
Config::GetEngineConfigUseBlasThreshold(int32_t& value) {
Config::GetEngineConfigUseBlasThreshold(int64_t& value) {
std::string str =
GetConfigStr(CONFIG_ENGINE, CONFIG_ENGINE_USE_BLAS_THRESHOLD, CONFIG_ENGINE_USE_BLAS_THRESHOLD_DEFAULT);
Status s = CheckEngineConfigUseBlasThreshold(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetEngineConfigOmpThreadNum(int32_t& value) {
Config::GetEngineConfigOmpThreadNum(int64_t& value) {
std::string str = GetConfigStr(CONFIG_ENGINE, CONFIG_ENGINE_OMP_THREAD_NUM, CONFIG_ENGINE_OMP_THREAD_NUM_DEFAULT);
Status s = CheckEngineConfigOmpThreadNum(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetEngineConfigGpuSearchThreshold(int32_t& value) {
Config::GetEngineConfigGpuSearchThreshold(int64_t& value) {
std::string str =
GetConfigStr(CONFIG_ENGINE, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD_DEFAULT);
Status s = CheckEngineConfigGpuSearchThreshold(str);
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetGpuResourceConfigEnableGpu(bool& value) {
std::string str =
GetConfigStr(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_ENABLE_GPU, CONFIG_GPU_RESOURCE_ENABLE_GPU_DEFAULT);
Status s = CheckGpuResourceConfigEnableGpu(str);
Config::GetGpuResourceConfigEnable(bool& value) {
std::string str = GetConfigStr(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_ENABLE, CONFIG_GPU_RESOURCE_ENABLE_DEFAULT);
Status s = CheckGpuResourceConfigEnable(str);
if (!s.ok()) {
return s;
}
......@@ -1006,13 +1005,13 @@ Config::GetGpuResourceConfigEnableGpu(bool& value) {
Status
Config::GetGpuResourceConfigCacheCapacity(int64_t& value) {
bool enable_gpu = false;
Status s = GetGpuResourceConfigEnableGpu(enable_gpu);
bool gpu_resource_enable = false;
Status s = GetGpuResourceConfigEnable(gpu_resource_enable);
if (!s.ok()) {
return s;
}
if (!enable_gpu) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable_gpu is set to false.";
if (!gpu_resource_enable) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable is set to false.";
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
std::string str = GetConfigStr(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_CACHE_CAPACITY,
......@@ -1021,19 +1020,19 @@ Config::GetGpuResourceConfigCacheCapacity(int64_t& value) {
if (!s.ok()) {
return s;
}
value = std::stoi(str);
value = std::stoll(str);
return Status::OK();
}
Status
Config::GetGpuResourceConfigCacheThreshold(float& value) {
bool enable_gpu = false;
Status s = GetGpuResourceConfigEnableGpu(enable_gpu);
bool gpu_resource_enable = false;
Status s = GetGpuResourceConfigEnable(gpu_resource_enable);
if (!s.ok()) {
return s;
}
if (!enable_gpu) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable_gpu is set to false.";
if (!gpu_resource_enable) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable is set to false.";
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
std::string str = GetConfigStr(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_CACHE_THRESHOLD,
......@@ -1047,14 +1046,14 @@ Config::GetGpuResourceConfigCacheThreshold(float& value) {
}
Status
Config::GetGpuResourceConfigSearchResources(std::vector<int32_t>& value) {
bool enable_gpu = false;
Status s = GetGpuResourceConfigEnableGpu(enable_gpu);
Config::GetGpuResourceConfigSearchResources(std::vector<int64_t>& value) {
bool gpu_resource_enable = false;
Status s = GetGpuResourceConfigEnable(gpu_resource_enable);
if (!s.ok()) {
return s;
}
if (!enable_gpu) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable_gpu is set to false.";
if (!gpu_resource_enable) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable is set to false.";
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
std::string str = GetConfigSequenceStr(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_SEARCH_RESOURCES,
......@@ -1066,20 +1065,20 @@ Config::GetGpuResourceConfigSearchResources(std::vector<int32_t>& value) {
return s;
}
for (std::string& res : res_vec) {
value.push_back(std::stoi(res.substr(3)));
value.push_back(std::stoll(res.substr(3)));
}
return Status::OK();
}
Status
Config::GetGpuResourceConfigBuildIndexResources(std::vector<int32_t>& value) {
bool enable_gpu = false;
Status s = GetGpuResourceConfigEnableGpu(enable_gpu);
Config::GetGpuResourceConfigBuildIndexResources(std::vector<int64_t>& value) {
bool gpu_resource_enable = false;
Status s = GetGpuResourceConfigEnable(gpu_resource_enable);
if (!s.ok()) {
return s;
}
if (!enable_gpu) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable_gpu is set to false.";
if (!gpu_resource_enable) {
std::string msg = "GPU not supported. Possible reason: gpu_resource_config.enable is set to false.";
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
std::string str =
......@@ -1092,7 +1091,7 @@ Config::GetGpuResourceConfigBuildIndexResources(std::vector<int32_t>& value) {
return s;
}
for (std::string& res : res_vec) {
value.push_back(std::stoi(res.substr(3)));
value.push_back(std::stoll(res.substr(3)));
}
return Status::OK();
}
......@@ -1295,12 +1294,12 @@ Config::SetEngineConfigGpuSearchThreshold(const std::string& value) {
/* gpu resource config */
Status
Config::SetGpuResourceConfigEnableGpu(const std::string& value) {
Status s = CheckGpuResourceConfigEnableGpu(value);
Config::SetGpuResourceConfigEnable(const std::string& value) {
Status s = CheckGpuResourceConfigEnable(value);
if (!s.ok()) {
return s;
}
SetConfigValueInMem(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_ENABLE_GPU, value);
SetConfigValueInMem(CONFIG_GPU_RESOURCE, CONFIG_GPU_RESOURCE_ENABLE, value);
return Status::OK();
}
......
......@@ -85,11 +85,11 @@ static const char* CONFIG_ENGINE_GPU_SEARCH_THRESHOLD_DEFAULT = "1000";
/* gpu resource config */
static const char* CONFIG_GPU_RESOURCE = "gpu_resource_config";
static const char* CONFIG_GPU_RESOURCE_ENABLE_GPU = "enable_gpu";
static const char* CONFIG_GPU_RESOURCE_ENABLE = "enable";
#ifdef MILVUS_GPU_VERSION
static const char* CONFIG_GPU_RESOURCE_ENABLE_GPU_DEFAULT = "true";
static const char* CONFIG_GPU_RESOURCE_ENABLE_DEFAULT = "true";
#else
static const char* CONFIG_GPU_RESOURCE_ENABLE_GPU_DEFAULT = "false";
static const char* CONFIG_GPU_RESOURCE_ENABLE_DEFAULT = "false";
#endif
static const char* CONFIG_GPU_RESOURCE_CACHE_CAPACITY = "cache_capacity";
static const char* CONFIG_GPU_RESOURCE_CACHE_CAPACITY_DEFAULT = "4";
......@@ -175,7 +175,7 @@ class Config {
/* gpu resource config */
Status
CheckGpuResourceConfigEnableGpu(const std::string& value);
CheckGpuResourceConfigEnable(const std::string& value);
Status
CheckGpuResourceConfigCacheCapacity(const std::string& value);
Status
......@@ -210,11 +210,11 @@ class Config {
Status
GetDBConfigBackendUrl(std::string& value);
Status
GetDBConfigArchiveDiskThreshold(int32_t& value);
GetDBConfigArchiveDiskThreshold(int64_t& value);
Status
GetDBConfigArchiveDaysThreshold(int32_t& value);
GetDBConfigArchiveDaysThreshold(int64_t& value);
Status
GetDBConfigInsertBufferSize(int32_t& value);
GetDBConfigInsertBufferSize(int64_t& value);
Status
GetDBConfigPreloadTable(std::string& value);
......@@ -236,23 +236,23 @@ class Config {
/* engine config */
Status
GetEngineConfigUseBlasThreshold(int32_t& value);
GetEngineConfigUseBlasThreshold(int64_t& value);
Status
GetEngineConfigOmpThreadNum(int32_t& value);
GetEngineConfigOmpThreadNum(int64_t& value);
Status
GetEngineConfigGpuSearchThreshold(int32_t& value);
GetEngineConfigGpuSearchThreshold(int64_t& value);
/* gpu resource config */
Status
GetGpuResourceConfigEnableGpu(bool& value);
GetGpuResourceConfigEnable(bool& value);
Status
GetGpuResourceConfigCacheCapacity(int64_t& value);
Status
GetGpuResourceConfigCacheThreshold(float& value);
Status
GetGpuResourceConfigSearchResources(std::vector<int32_t>& value);
GetGpuResourceConfigSearchResources(std::vector<int64_t>& value);
Status
GetGpuResourceConfigBuildIndexResources(std::vector<int32_t>& value);
GetGpuResourceConfigBuildIndexResources(std::vector<int64_t>& value);
public:
/* server config */
......@@ -305,7 +305,7 @@ class Config {
/* gpu resource config */
Status
SetGpuResourceConfigEnableGpu(const std::string& value);
SetGpuResourceConfigEnable(const std::string& value);
Status
SetGpuResourceConfigCacheCapacity(const std::string& value);
Status
......
......@@ -89,7 +89,7 @@ DBWrapper::StartService() {
}
// engine config
int32_t omp_thread;
int64_t omp_thread;
s = config.GetEngineConfigOmpThreadNum(omp_thread);
if (!s.ok()) {
std::cerr << s.ToString() << std::endl;
......@@ -100,7 +100,7 @@ DBWrapper::StartService() {
omp_set_num_threads(omp_thread);
SERVER_LOG_DEBUG << "Specify openmp thread number: " << omp_thread;
} else {
uint32_t sys_thread_cnt = 8;
int64_t sys_thread_cnt = 8;
if (CommonUtil::GetSystemAvailableThreads(sys_thread_cnt)) {
omp_thread = static_cast<int32_t>(ceil(sys_thread_cnt * 0.5));
omp_set_num_threads(omp_thread);
......@@ -108,7 +108,7 @@ DBWrapper::StartService() {
}
// init faiss global variable
int32_t use_blas_threshold;
int64_t use_blas_threshold;
s = config.GetEngineConfigUseBlasThreshold(use_blas_threshold);
if (!s.ok()) {
std::cerr << s.ToString() << std::endl;
......@@ -119,7 +119,7 @@ DBWrapper::StartService() {
// set archive config
engine::ArchiveConf::CriteriaT criterial;
int32_t disk, days;
int64_t disk, days;
s = config.GetDBConfigArchiveDiskThreshold(disk);
if (!s.ok()) {
std::cerr << s.ToString() << std::endl;
......
......@@ -54,7 +54,7 @@ CommonUtil::GetSystemMemInfo(uint64_t& total_mem, uint64_t& free_mem) {
}
bool
CommonUtil::GetSystemAvailableThreads(uint32_t& thread_count) {
CommonUtil::GetSystemAvailableThreads(int64_t& thread_count) {
// threadCnt = std::thread::hardware_concurrency();
thread_count = sysconf(_SC_NPROCESSORS_CONF);
thread_count *= THREAD_MULTIPLY_CPU;
......
......@@ -30,7 +30,7 @@ class CommonUtil {
static bool
GetSystemMemInfo(uint64_t& total_mem, uint64_t& free_mem);
static bool
GetSystemAvailableThreads(uint32_t& thread_count);
GetSystemAvailableThreads(int64_t& thread_count);
static bool
IsFileExist(const std::string& path);
......
......@@ -48,7 +48,7 @@ KnowhereResource::Initialize() {
// get build index gpu resource
server::Config& config = server::Config::GetInstance();
std::vector<int32_t> build_index_gpus;
std::vector<int64_t> build_index_gpus;
s = config.GetGpuResourceConfigBuildIndexResources(build_index_gpus);
if (!s.ok())
return s;
......@@ -58,7 +58,7 @@ KnowhereResource::Initialize() {
}
// get search gpu resource
std::vector<int32_t> search_gpus;
std::vector<int64_t> search_gpus;
s = config.GetGpuResourceConfigSearchResources(search_gpus);
if (!s.ok())
return s;
......
......@@ -9,4 +9,6 @@ sudo apt-get -y update && sudo apt-get -y install intel-mkl-gnu-2019.5-281 intel
sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev libboost-system-dev \
libboost-filesystem-dev libboost-serialization-dev libboost-regex-dev
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
if [ ! -f "/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so" ]; then
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
fi
......@@ -104,7 +104,6 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
milvus::server::Config& config = milvus::server::Config::GetInstance();
milvus::Status s;
std::string str_val;
int32_t int32_val;
int64_t int64_val;
float float_val;
bool bool_val;
......@@ -160,26 +159,26 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == db_backend_url);
int32_t db_archive_disk_threshold = 100;
int64_t db_archive_disk_threshold = 100;
s = config.SetDBConfigArchiveDiskThreshold(std::to_string(db_archive_disk_threshold));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigArchiveDiskThreshold(int32_val);
s = config.GetDBConfigArchiveDiskThreshold(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_archive_disk_threshold);
ASSERT_TRUE(int64_val == db_archive_disk_threshold);
int32_t db_archive_days_threshold = 365;
int64_t db_archive_days_threshold = 365;
s = config.SetDBConfigArchiveDaysThreshold(std::to_string(db_archive_days_threshold));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigArchiveDaysThreshold(int32_val);
s = config.GetDBConfigArchiveDaysThreshold(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_archive_days_threshold);
ASSERT_TRUE(int64_val == db_archive_days_threshold);
int32_t db_insert_buffer_size = 2;
int64_t db_insert_buffer_size = 2;
s = config.SetDBConfigInsertBufferSize(std::to_string(db_insert_buffer_size));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigInsertBufferSize(int32_val);
s = config.GetDBConfigInsertBufferSize(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_insert_buffer_size);
ASSERT_TRUE(int64_val == db_insert_buffer_size);
/* metric config */
bool metric_enable_monitor = false;
......@@ -223,32 +222,32 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
ASSERT_TRUE(bool_val == cache_insert_data);
/* engine config */
int32_t engine_use_blas_threshold = 50;
int64_t engine_use_blas_threshold = 50;
s = config.SetEngineConfigUseBlasThreshold(std::to_string(engine_use_blas_threshold));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigUseBlasThreshold(int32_val);
s = config.GetEngineConfigUseBlasThreshold(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_use_blas_threshold);
ASSERT_TRUE(int64_val == engine_use_blas_threshold);
int32_t engine_omp_thread_num = 8;
int64_t engine_omp_thread_num = 8;
s = config.SetEngineConfigOmpThreadNum(std::to_string(engine_omp_thread_num));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigOmpThreadNum(int32_val);
s = config.GetEngineConfigOmpThreadNum(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_omp_thread_num);
ASSERT_TRUE(int64_val == engine_omp_thread_num);
int32_t engine_gpu_search_threshold = 800;
int64_t engine_gpu_search_threshold = 800;
s = config.SetEngineConfigGpuSearchThreshold(std::to_string(engine_gpu_search_threshold));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigGpuSearchThreshold(int32_val);
s = config.GetEngineConfigGpuSearchThreshold(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_gpu_search_threshold);
ASSERT_TRUE(int64_val == engine_gpu_search_threshold);
/* gpu resource config */
bool resource_enable_gpu = true;
s = config.SetGpuResourceConfigEnableGpu(std::to_string(resource_enable_gpu));
s = config.SetGpuResourceConfigEnable(std::to_string(resource_enable_gpu));
ASSERT_TRUE(s.ok());
s = config.GetGpuResourceConfigEnableGpu(bool_val);
s = config.GetGpuResourceConfigEnable(bool_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(bool_val == resource_enable_gpu);
......@@ -267,7 +266,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
ASSERT_TRUE(float_val == gpu_cache_threshold);
std::vector<std::string> search_resources = {"gpu0"};
std::vector<int32_t> search_res_vec;
std::vector<int64_t> search_res_vec;
std::string search_res_str;
milvus::server::StringHelpFunctions::MergeStringWithDelimeter(
search_resources, milvus::server::CONFIG_GPU_RESOURCE_DELIMITER, search_res_str);
......@@ -276,11 +275,11 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
s = config.GetGpuResourceConfigSearchResources(search_res_vec);
ASSERT_TRUE(s.ok());
for (size_t i = 0; i < search_resources.size(); i++) {
ASSERT_TRUE(std::stoi(search_resources[i].substr(3)) == search_res_vec[i]);
ASSERT_TRUE(std::stoll(search_resources[i].substr(3)) == search_res_vec[i]);
}
std::vector<std::string> build_index_resources = {"gpu0"};
std::vector<int32_t> build_index_res_vec;
std::vector<int64_t> build_index_res_vec;
std::string build_index_res_str;
milvus::server::StringHelpFunctions::MergeStringWithDelimeter(
build_index_resources, milvus::server::CONFIG_GPU_RESOURCE_DELIMITER, build_index_res_str);
......@@ -289,7 +288,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
s = config.GetGpuResourceConfigBuildIndexResources(build_index_res_vec);
ASSERT_TRUE(s.ok());
for (size_t i = 0; i < build_index_resources.size(); i++) {
ASSERT_TRUE(std::stoi(build_index_resources[i].substr(3)) == build_index_res_vec[i]);
ASSERT_TRUE(std::stoll(build_index_resources[i].substr(3)) == build_index_res_vec[i]);
}
#endif
}
......@@ -394,7 +393,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) {
ASSERT_FALSE(s.ok());
/* gpu resource config */
s = config.SetGpuResourceConfigEnableGpu("ok");
s = config.SetGpuResourceConfigEnable("ok");
ASSERT_FALSE(s.ok());
#ifdef MILVUS_GPU_VERSION
......
......@@ -60,7 +60,7 @@ TEST(UtilTest, COMMON_TEST) {
ASSERT_GT(total_mem, 0);
ASSERT_GT(free_mem, 0);
uint32_t thread_cnt = 0;
int64_t thread_cnt = 0;
milvus::server::CommonUtil::GetSystemAvailableThreads(thread_cnt);
ASSERT_GT(thread_cnt, 0);
......
......@@ -2,6 +2,7 @@ import logging
import threading
from functools import wraps
from milvus import Milvus
from milvus.client.hooks import BaseaSearchHook
from mishards import (settings, exceptions)
from utils import singleton
......@@ -9,6 +10,12 @@ from utils import singleton
logger = logging.getLogger(__name__)
class Searchook(BaseaSearchHook):
def on_response(self, *args, **kwargs):
return True
class Connection:
def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs):
self.name = name
......@@ -18,6 +25,9 @@ class Connection:
self.conn = Milvus()
self.error_handlers = [] if not error_handlers else error_handlers
self.on_retry_func = kwargs.get('on_retry_func', None)
# define search hook
self.conn._set_hook(search_in_file=Searchook())
# self._connect()
def __str__(self):
......
......@@ -29,39 +29,71 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
self.router = router
self.max_workers = max_workers
def _reduce(self, source_ids, ids, source_diss, diss, k, reverse):
if source_diss[k - 1] <= diss[0]:
return source_ids, source_diss
if diss[k - 1] <= source_diss[0]:
return ids, diss
source_diss.extend(diss)
diss_t = enumerate(source_diss)
diss_m_rst = sorted(diss_t, key=lambda x: x[1])[:k]
diss_m_out = [id_ for _, id_ in diss_m_rst]
source_ids.extend(ids)
id_m_out = [source_ids[i] for i, _ in diss_m_rst]
return id_m_out, diss_m_out
def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs):
status = status_pb2.Status(error_code=status_pb2.SUCCESS,
reason="Success")
if not files_n_topk_results:
return status, []
request_results = defaultdict(list)
merge_id_results = []
merge_dis_results = []
calc_time = time.time()
for files_collection in files_n_topk_results:
if isinstance(files_collection, tuple):
status, _ = files_collection
return status, []
for request_pos, each_request_results in enumerate(
files_collection.topk_query_result):
request_results[request_pos].extend(
each_request_results.query_result_arrays)
request_results[request_pos] = sorted(
request_results[request_pos],
key=lambda x: x.distance,
reverse=reverse)[:topk]
row_num = files_collection.row_num
ids = files_collection.ids
diss = files_collection.distances # distance collections
# TODO: batch_len is equal to topk, may need to compare with topk
batch_len = len(ids) // row_num
for row_index in range(row_num):
id_batch = ids[row_index * batch_len: (row_index + 1) * batch_len]
dis_batch = diss[row_index * batch_len: (row_index + 1) * batch_len]
if len(merge_id_results) < row_index:
raise ValueError("merge error")
elif len(merge_id_results) == row_index:
# TODO: may bug here
merge_id_results.append(id_batch)
merge_dis_results.append(dis_batch)
else:
merge_id_results[row_index], merge_dis_results[row_index] = \
self._reduce(merge_id_results[row_index], id_batch,
merge_dis_results[row_index], dis_batch,
batch_len,
reverse)
calc_time = time.time() - calc_time
logger.info('Merge takes {}'.format(calc_time))
results = sorted(request_results.items())
topk_query_result = []
id_mrege_list = []
dis_mrege_list = []
for result in results:
query_result = TopKQueryResult(query_result_arrays=result[1])
topk_query_result.append(query_result)
for id_results, dis_results in zip(merge_id_results, merge_dis_results):
id_mrege_list.extend(id_results)
dis_mrege_list.extend(dis_results)
return status, topk_query_result
return status, id_mrege_list, dis_mrege_list
def _do_query(self,
context,
......@@ -109,8 +141,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
file_ids=query_params['file_ids'],
query_records=vectors,
top_k=topk,
nprobe=nprobe,
lazy_=True)
nprobe=nprobe
)
end = time.time()
logger.info('search_vectors_in_files takes: {}'.format(end - start))
......@@ -241,7 +273,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
logger.info('Search {}: topk={} nprobe={}'.format(
table_name, topk, nprobe))
metadata = {'resp_class': milvus_pb2.TopKQueryResultList}
metadata = {'resp_class': milvus_pb2.TopKQueryResult}
if nprobe > self.MAX_NPROBE or nprobe <= 0:
raise exceptions.InvalidArgumentError(
......@@ -275,22 +307,24 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
query_range_array.append(
Range(query_range.start_value, query_range.end_value))
status, results = self._do_query(context,
table_name,
table_meta,
query_record_array,
topk,
nprobe,
query_range_array,
metadata=metadata)
status, id_results, dis_results = self._do_query(context,
table_name,
table_meta,
query_record_array,
topk,
nprobe,
query_range_array,
metadata=metadata)
now = time.time()
logger.info('SearchVector takes: {}'.format(now - start))
topk_result_list = milvus_pb2.TopKQueryResultList(
topk_result_list = milvus_pb2.TopKQueryResult(
status=status_pb2.Status(error_code=status.error_code,
reason=status.reason),
topk_query_result=results)
row_num=len(query_record_array),
ids=id_results,
distances=dis_results)
return topk_result_list
@mark_grpc_method
......
......@@ -14,8 +14,7 @@ py==1.8.0
pyasn1==0.4.7
pyasn1-modules==0.2.6
pylint==2.3.1
pymilvus-test==0.2.28
#pymilvus==0.2.0
pymilvus==0.2.5
pyparsing==2.4.0
pytest==4.6.3
pytest-level==0.1.1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册