提交 5b7704a9 编写于 作者: P peng.xu

Merge branch 'branch-0.4.0' into 'branch-0.4.0'

MS-417 YAML sequence load disable cause scheduler startup failed

See merge request megasearch/milvus!424

Former-commit-id: 3812e11a43625bb9135a6df975b922c8e4f9efed
...@@ -7,6 +7,8 @@ Please mark all change in change log and use the ticket from JIRA. ...@@ -7,6 +7,8 @@ Please mark all change in change log and use the ticket from JIRA.
## Bug ## Bug
- MS-411 - Fix metric unittest linking error - MS-411 - Fix metric unittest linking error
- MS-412 - Fix gpu cache logical error - MS-412 - Fix gpu cache logical error
- MS-416 - ExecutionEngineImpl::GpuCache has not return value cause crash
- MS-417 - YAML sequence load disable cause scheduler startup failed
## Improvement ## Improvement
- MS-327 - Clean code for milvus - MS-327 - Clean code for milvus
...@@ -51,6 +53,9 @@ Please mark all change in change log and use the ticket from JIRA. ...@@ -51,6 +53,9 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-409 - Using new scheduler - MS-409 - Using new scheduler
- MS-413 - Remove thrift dependency - MS-413 - Remove thrift dependency
- MS-410 - Add resource config comment - MS-410 - Add resource config comment
- MS-414 - Add TaskType in Scheduler::Task
- MS-415 - Add command tasktable to dump all tasktables
- MS-418 - Update server_config.template file, set CPU compute only default
- MS-419 - Move index_file_size from IndexParam to TableSchema - MS-419 - Move index_file_size from IndexParam to TableSchema
## New Feature ## New Feature
......
...@@ -64,21 +64,21 @@ resource_config: ...@@ -64,21 +64,21 @@ resource_config:
memory: 64 memory: 64
device_id: 0 device_id: 0
enable_loader: true enable_loader: true
enable_executor: false enable_executor: true
gtx1060: gtx1060:
type: GPU type: GPU
memory: 6 memory: 6
device_id: 0 device_id: 0
enable_loader: true enable_loader: false
enable_executor: true enable_executor: false
gtx1660: gtx1660:
type: GPU type: GPU
memory: 6 memory: 6
device_id: 1 device_id: 1
enable_loader: true enable_loader: false
enable_executor: true enable_executor: false
# connection list, length: 0~N # connection list, length: 0~N
# format: -${resource_name}===${resource_name} # format: -${resource_name}===${resource_name}
......
...@@ -73,19 +73,19 @@ YamlConfigMgr::SetChildConfig(const YAML::Node& node, ...@@ -73,19 +73,19 @@ YamlConfigMgr::SetChildConfig(const YAML::Node& node,
return false; return false;
} }
//bool bool
//YamlConfigMgr::SetSequence(const YAML::Node &node, YamlConfigMgr::SetSequence(const YAML::Node &node,
// const std::string &child_name, const std::string &child_name,
// ConfigNode &config) { ConfigNode &config) {
// if(node[child_name].IsDefined ()) { if(node[child_name].IsDefined ()) {
// size_t cnt = node[child_name].size(); size_t cnt = node[child_name].size();
// for(size_t i = 0; i < cnt; i++){ for(size_t i = 0; i < cnt; i++){
// config.AddSequenceItem(child_name, node[child_name][i].as<std::string>()); config.AddSequenceItem(child_name, node[child_name][i].as<std::string>());
// } }
// return true; return true;
// } }
// return false; return false;
//} }
void void
YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) { YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) {
...@@ -98,8 +98,8 @@ YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) { ...@@ -98,8 +98,8 @@ YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) {
SetConfigValue(node, key, config); SetConfigValue(node, key, config);
} else if(node[key].IsMap()){ } else if(node[key].IsMap()){
SetChildConfig(node, key, config); SetChildConfig(node, key, config);
// } else if(node[key].IsSequence()){ } else if(node[key].IsSequence()){
// SetSequence(node, key, config); SetSequence(node, key, config);
} }
} }
} }
......
...@@ -33,10 +33,10 @@ class YamlConfigMgr : public IConfigMgr { ...@@ -33,10 +33,10 @@ class YamlConfigMgr : public IConfigMgr {
const std::string &name, const std::string &name,
ConfigNode &config); ConfigNode &config);
// bool bool
// SetSequence(const YAML::Node &node, SetSequence(const YAML::Node &node,
// const std::string &child_name, const std::string &child_name,
// ConfigNode &config); ConfigNode &config);
void LoadConfigNode(const YAML::Node& node, ConfigNode& config); void LoadConfigNode(const YAML::Node& node, ConfigNode& config);
......
...@@ -273,6 +273,8 @@ Status ExecutionEngineImpl::Cache() { ...@@ -273,6 +273,8 @@ Status ExecutionEngineImpl::Cache() {
Status ExecutionEngineImpl::GpuCache(uint64_t gpu_id) { Status ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
zilliz::milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, index_); zilliz::milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, index_);
return Status::OK();
} }
// TODO(linxj): remove. // TODO(linxj): remove.
......
...@@ -12,7 +12,7 @@ namespace milvus { ...@@ -12,7 +12,7 @@ namespace milvus {
namespace engine { namespace engine {
XDeleteTask::XDeleteTask(DeleteContextPtr &delete_context) XDeleteTask::XDeleteTask(DeleteContextPtr &delete_context)
: delete_context_ptr_(delete_context) {} : Task(TaskType::DeleteTask), delete_context_ptr_(delete_context) {}
void void
XDeleteTask::Load(LoadType type, uint8_t device_id) { XDeleteTask::Load(LoadType type, uint8_t device_id) {
......
...@@ -81,11 +81,12 @@ CollectFileMetrics(int file_type, size_t file_size) { ...@@ -81,11 +81,12 @@ CollectFileMetrics(int file_type, size_t file_size) {
} }
} }
XSearchTask::XSearchTask(TableFileSchemaPtr file) : file_(file) { XSearchTask::XSearchTask(TableFileSchemaPtr file)
: Task(TaskType::SearchTask), file_(file) {
index_engine_ = EngineFactory::Build(file_->dimension_, index_engine_ = EngineFactory::Build(file_->dimension_,
file_->location_, file_->location_,
(EngineType) file_->engine_type_, (EngineType) file_->engine_type_,
(MetricType)file_->metric_type_, (MetricType) file_->metric_type_,
file_->nlist_); file_->nlist_);
} }
......
...@@ -21,13 +21,20 @@ enum class LoadType { ...@@ -21,13 +21,20 @@ enum class LoadType {
GPU2CPU, GPU2CPU,
}; };
enum class TaskType {
SearchTask,
DeleteTask,
TestTask,
};
class Task; class Task;
using TaskPtr = std::shared_ptr<Task>; using TaskPtr = std::shared_ptr<Task>;
class Task { class Task {
public: public:
Task() = default; explicit
Task(TaskType type) : type_(type) {}
virtual void virtual void
Load(LoadType type, uint8_t device_id) = 0; Load(LoadType type, uint8_t device_id) = 0;
...@@ -39,9 +46,13 @@ public: ...@@ -39,9 +46,13 @@ public:
virtual TaskPtr virtual TaskPtr
Clone() = 0; Clone() = 0;
inline TaskType
Type() const { return type_; }
public: public:
std::vector<SearchContextPtr> search_contexts_; std::vector<SearchContextPtr> search_contexts_;
ScheduleTaskPtr task_; ScheduleTaskPtr task_;
TaskType type_;
}; };
......
...@@ -11,6 +11,8 @@ namespace zilliz { ...@@ -11,6 +11,8 @@ namespace zilliz {
namespace milvus { namespace milvus {
namespace engine { namespace engine {
TestTask::TestTask() : Task(TaskType::TestTask) {}
void void
TestTask::Load(LoadType type, uint8_t device_id) { TestTask::Load(LoadType type, uint8_t device_id) {
load_count_++; load_count_++;
......
...@@ -14,7 +14,7 @@ namespace engine { ...@@ -14,7 +14,7 @@ namespace engine {
class TestTask : public Task { class TestTask : public Task {
public: public:
TestTask() = default; TestTask();
public: public:
void void
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "version.h" #include "version.h"
#include "GrpcMilvusServer.h" #include "GrpcMilvusServer.h"
#include "db/Utils.h" #include "db/Utils.h"
#include "scheduler/SchedInst.h"
#include "src/server/Server.h" #include "src/server/Server.h"
...@@ -753,7 +754,10 @@ ServerError ...@@ -753,7 +754,10 @@ ServerError
CmdTask::OnExecute() { CmdTask::OnExecute() {
if (cmd_ == "version") { if (cmd_ == "version") {
result_ = MILVUS_VERSION; result_ = MILVUS_VERSION;
} else { } else if (cmd_ == "tasktable") {
result_ = engine::ResMgrInst::GetInstance()->DumpTaskTables();
}
else {
result_ = "OK"; result_ = "OK";
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册