提交 aabd91a1 编写于 作者: S starlord

format code by clang-tidy


Former-commit-id: f9e606566555ad381b8e95e1a4e2572fc61678f0
上级 464a3415
......@@ -23,3 +23,5 @@ AccessModifierOffset: -3
AlwaysBreakAfterReturnType: All
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AlignTrailingComments: true
......@@ -47,7 +47,7 @@ class YamlConfigMgr : public ConfigMgr {
SetConfigValue(const YAML::Node& node, const std::string& key, ConfigNode& config);
bool
SetChildConfig(const YAML::Node& node, const std::string& name, ConfigNode& config);
SetChildConfig(const YAML::Node& node, const std::string& child_name, ConfigNode& config);
bool
SetSequence(const YAML::Node& node, const std::string& child_name, ConfigNode& config);
......
......@@ -46,12 +46,14 @@ struct Cfg {
const int64_t &k,
const int64_t &gpu_id,
METRICTYPE type)
: d(dim), k(k), gpu_id(gpu_id), metric_type(type) {}
: metric_type(type), k(k), gpu_id(gpu_id), d(dim) {}
Cfg() = default;
virtual bool
CheckValid(){};
CheckValid(){
return true;
};
};
using Config = std::shared_ptr<Cfg>;
......
......@@ -127,10 +127,10 @@ class Dataset {
//}
private:
SchemaPtr array_schema_;
SchemaPtr tensor_schema_;
std::vector<ArrayPtr> array_;
SchemaPtr array_schema_;
std::vector<TensorPtr> tensor_;
SchemaPtr tensor_schema_;
//Config meta_;
};
......
......@@ -25,9 +25,14 @@ namespace zilliz {
namespace knowhere {
faiss::MetricType GetMetricType(METRICTYPE &type) {
if (type == METRICTYPE::L2) return faiss::METRIC_L2;
if (type == METRICTYPE::IP) return faiss::METRIC_INNER_PRODUCT;
if (type == METRICTYPE::INVALID) KNOWHERE_THROW_MSG("Metric type is invalid");
if (type == METRICTYPE::L2) {
return faiss::METRIC_L2;
}
if (type == METRICTYPE::IP) {
return faiss::METRIC_INNER_PRODUCT;
}
KNOWHERE_THROW_MSG("Metric type is invalid");
}
......
......@@ -52,12 +52,15 @@ struct IVFCfg : public Cfg {
const int64_t &nlist,
const int64_t &nprobe,
METRICTYPE type)
: nlist(nlist), nprobe(nprobe), Cfg(dim, k, gpu_id, type) {}
: Cfg(dim, k, gpu_id, type), nlist(nlist), nprobe(nprobe) {
}
IVFCfg() = default;
bool
CheckValid() override {};
CheckValid() override {
return true;
};
};
using IVFConfig = std::shared_ptr<IVFCfg>;
......@@ -72,12 +75,15 @@ struct IVFSQCfg : public IVFCfg {
const int64_t &nprobe,
const int64_t &nbits,
METRICTYPE type)
: nbits(nbits), IVFCfg(dim, k, gpu_id, nlist, nprobe, type) {}
: IVFCfg(dim, k, gpu_id, nlist, nprobe, type), nbits(nbits) {
}
IVFSQCfg() = default;
bool
CheckValid() override {};
CheckValid() override {
return true;
};
};
using IVFSQConfig = std::shared_ptr<IVFSQCfg>;
......@@ -98,12 +104,15 @@ struct IVFPQCfg : public IVFCfg {
const int64_t &nbits,
const int64_t &m,
METRICTYPE type)
: nbits(nbits), m(m), IVFCfg(dim, k, gpu_id, nlist, nprobe, type) {}
: IVFCfg(dim, k, gpu_id, nlist, nprobe, type), m(m), nbits(nbits) {
}
IVFPQCfg() = default;
bool
CheckValid() override {};
CheckValid() override {
return true;
};
};
using IVFPQConfig = std::shared_ptr<IVFPQCfg>;
......@@ -123,13 +132,17 @@ struct NSGCfg : public IVFCfg {
const int64_t &out_degree,
const int64_t &candidate_size,
METRICTYPE type)
: knng(knng), search_length(search_length), out_degree(out_degree), candidate_pool_size(candidate_size),
IVFCfg(dim, k, gpu_id, nlist, nprobe, type) {}
: IVFCfg(dim, k, gpu_id, nlist, nprobe, type),
knng(knng), search_length(search_length),
out_degree(out_degree), candidate_pool_size(candidate_size) {
}
NSGCfg() = default;
bool
CheckValid() override {};
CheckValid() override {
return true;
};
};
using NSGConfig = std::shared_ptr<NSGCfg>;
......
......@@ -81,7 +81,7 @@ void NsgIndex::Build_with_ids(size_t nb, const float *data, const long *ids, con
//>> Debug code
///
int total_degree = 0;
for (int i = 0; i < ntotal; ++i) {
for (size_t i = 0; i < ntotal; ++i) {
total_degree += nsg[i].size();
}
......@@ -172,7 +172,7 @@ void NsgIndex::GetNeighbors(const float *query,
for (size_t i = 0; i < init_ids.size(); ++i) {
node_t id = init_ids[i];
if (id >= ntotal) {
if (id >= static_cast<node_t>(ntotal)) {
KNOWHERE_THROW_MSG("Build Index Error, id > ntotal");
continue;
}
......@@ -262,7 +262,7 @@ void NsgIndex::GetNeighbors(const float *query, std::vector<Neighbor> &resset, s
for (size_t i = 0; i < init_ids.size(); ++i) {
node_t id = init_ids[i];
if (id >= ntotal) {
if (id >= static_cast<node_t>(ntotal)) {
KNOWHERE_THROW_MSG("Build Index Error, id > ntotal");
continue;
}
......@@ -350,7 +350,7 @@ void NsgIndex::GetNeighbors(const float *query,
node_t id = init_ids[i];
//assert(id < ntotal);
if (id >= ntotal) {
if (id >= static_cast<node_t>(ntotal)) {
KNOWHERE_THROW_MSG("Build Index Error, id > ntotal");
continue;
}
......@@ -461,7 +461,7 @@ void NsgIndex::Link() {
//}
/////
for (int i = 0; i < ntotal; ++i) {
for (size_t i = 0; i < ntotal; ++i) {
nsg[i].shrink_to_fit();
}
}
......@@ -483,7 +483,9 @@ void NsgIndex::SyncPrune(size_t n,
unsigned cursor = 0;
std::sort(pool.begin(), pool.end());
std::vector<Neighbor> result;
if (pool[cursor].id == n) cursor++;
if (pool[cursor].id == static_cast<node_t>(n)) {
cursor++;
}
result.push_back(pool[cursor]); // init result with nearest neighbor
SelectEdge(cursor, pool, result, true);
......@@ -518,7 +520,7 @@ void NsgIndex::InterInsert(unsigned n, std::vector<std::mutex> &mutex_vec, float
int duplicate = false;
{
LockGuard lk(mutex_vec[current_neighbor]);
for (int j = 0; j < out_degree; ++j) {
for (size_t j = 0; j < out_degree; ++j) {
if (nsn_dist_pool[j] == -1) break;
// 保证至少有一条边能连回来
......@@ -551,14 +553,14 @@ void NsgIndex::InterInsert(unsigned n, std::vector<std::mutex> &mutex_vec, float
{
LockGuard lk(mutex_vec[current_neighbor]);
for (int j = 0; j < result.size(); ++j) {
for (size_t j = 0; j < result.size(); ++j) {
nsn_id_pool[j] = result[j].id;
nsn_dist_pool[j] = result[j].distance;
}
}
} else {
LockGuard lk(mutex_vec[current_neighbor]);
for (int j = 0; j < out_degree; ++j) {
for (size_t j = 0; j < out_degree; ++j) {
if (nsn_dist_pool[j] == -1) {
nsn_id_pool.push_back(current_as_neighbor.id);
nsn_dist_pool[j] = current_as_neighbor.distance;
......@@ -605,9 +607,11 @@ void NsgIndex::CheckConnectivity() {
boost::dynamic_bitset<> has_linked{ntotal, 0};
int64_t linked_count = 0;
while (linked_count < ntotal) {
while (linked_count < static_cast<int64_t>(ntotal)) {
DFS(root, has_linked, linked_count);
if (linked_count >= ntotal) break;
if (linked_count >= static_cast<int64_t>(ntotal)) {
break;
}
FindUnconnectedNode(has_linked, root);
}
}
......@@ -697,7 +701,7 @@ void NsgIndex::Search(const float *query,
} else{
//#pragma omp parallel for schedule(dynamic, 50)
#pragma omp parallel for
for (int i = 0; i < nq; ++i) {
for (unsigned int i = 0; i < nq; ++i) {
// TODO(linxj): when to use openmp
auto single_query = query + i * dim;
GetNeighbors(single_query, resset[i], nsg, &params);
......@@ -705,8 +709,8 @@ void NsgIndex::Search(const float *query,
}
rc.ElapseFromBegin("cost");
for (int i = 0; i < nq; ++i) {
for (int j = 0; j < k; ++j) {
for (unsigned int i = 0; i < nq; ++i) {
for (unsigned int j = 0; j < k; ++j) {
//ids[i * k + j] = resset[i][j].id;
// Fix(linxj): bug, reset[i][j] out of range
......
......@@ -29,7 +29,7 @@ namespace algo {
// TODO: impl search && insert && return insert pos. why not just find and swap?
int InsertIntoPool(Neighbor *addr, unsigned K, Neighbor nn) {
//>> Fix: Add assert
for (int i = 0; i < K; ++i) {
for (unsigned int i = 0; i < K; ++i) {
assert(addr[i].id != nn.id);
}
......
......@@ -38,8 +38,8 @@ void DataGen::Generate(const int &dim, const int &nb, const int &nq) {
this->dim = dim;
GenAll(dim, nb, xb, ids, nq, xq);
assert(xb.size() == dim * nb);
assert(xq.size() == dim * nq);
assert(xb.size() == (size_t)dim * nb);
assert(xq.size() == (size_t)dim * nq);
base_dataset = generate_dataset(nb, dim, xb.data(), ids.data());
query_dataset = generate_query_dataset(nq, dim, xq.data());
......@@ -47,7 +47,7 @@ void DataGen::Generate(const int &dim, const int &nb, const int &nq) {
}
zilliz::knowhere::DatasetPtr DataGen::GenQuery(const int &nq) {
xq.resize(nq * dim);
for (size_t i = 0; i < nq * dim; ++i) {
for (int i = 0; i < nq * dim; ++i) {
xq[i] = xb[i];
}
return generate_query_dataset(nq, dim, xq.data());
......@@ -72,7 +72,7 @@ void GenAll(const int64_t &dim,
const int64_t &nq,
float *xq) {
GenBase(dim, nb, xb, ids);
for (size_t i = 0; i < nq * dim; ++i) {
for (int64_t i = 0; i < nq * dim; ++i) {
xq[i] = xb[i];
}
}
......
......@@ -638,7 +638,8 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date, const m
ENGINE_LOG_DEBUG << "Merging file " << file_schema.file_id_;
index_size = index->Size();
if (index_size >= file_schema.index_file_size_) break;
if (index_size >= file_schema.index_file_size_)
break;
}
// step 3: serialize to disk
......
......@@ -53,9 +53,9 @@ struct ArchiveConf {
private:
void
ParseCritirias(const std::string& type);
ParseCritirias(const std::string& criterias);
void
ParseType(const std::string& criterias);
ParseType(const std::string& type);
std::string type_;
CriteriaT criterias_;
......
......@@ -143,14 +143,14 @@ GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file
if (boost::filesystem::exists(file_path)) {
table_file.location_ = file_path;
return Status::OK();
} else {
for (auto& path : options.slave_paths_) {
parent_path = ConstructParentFolder(path, table_file);
file_path = parent_path + "/" + table_file.file_id_;
if (boost::filesystem::exists(file_path)) {
table_file.location_ = file_path;
return Status::OK();
}
}
for (auto& path : options.slave_paths_) {
parent_path = ConstructParentFolder(path, table_file);
file_path = parent_path + "/" + table_file.file_id_;
if (boost::filesystem::exists(file_path)) {
table_file.location_ = file_path;
return Status::OK();
}
}
......
......@@ -342,7 +342,8 @@ Status
ExecutionEngineImpl::Init() {
server::Config& config = server::Config::GetInstance();
Status s = config.GetDBConfigBuildIndexGPU(gpu_num_);
if (!s.ok()) return s;
if (!s.ok())
return s;
return Status::OK();
}
......
......@@ -39,12 +39,12 @@ main(int argc, char* argv[]) {
std::cout << std::endl << "Welcome to use Milvus by Zilliz!" << std::endl;
std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << " built at " << BUILD_TIME << std::endl;
static struct option long_options[] = {{"conf_file", required_argument, 0, 'c'},
{"log_conf_file", required_argument, 0, 'l'},
{"help", no_argument, 0, 'h'},
{"daemon", no_argument, 0, 'd'},
{"pid_file", required_argument, 0, 'p'},
{NULL, 0, 0, 0}};
static struct option long_options[] = {{"conf_file", required_argument, nullptr, 'c'},
{"log_conf_file", required_argument, nullptr, 'l'},
{"help", no_argument, nullptr, 'h'},
{"daemon", no_argument, nullptr, 'd'},
{"pid_file", required_argument, nullptr, 'p'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
int64_t start_daemonized = 0;
......
......@@ -33,13 +33,20 @@ PrometheusMetrics::Init() {
try {
Config& config = Config::GetInstance();
Status s = config.GetMetricConfigEnableMonitor(startup_);
if (!s.ok()) return s.code();
if (!startup_) return SERVER_SUCCESS;
if (!s.ok()) {
return s.code();
}
if (!startup_) {
return SERVER_SUCCESS;
}
// Following should be read from config file.
std::string bind_address;
s = config.GetMetricConfigPrometheusPort(bind_address);
if (!s.ok()) return s.code();
if (!s.ok()) {
return s.code();
}
const std::string uri = std::string("/tmp/metrics");
const std::size_t num_threads = 2;
......@@ -58,21 +65,30 @@ PrometheusMetrics::Init() {
void
PrometheusMetrics::CPUUsagePercentSet() {
if (!startup_) return;
if (!startup_) {
return;
}
double usage_percent = server::SystemInfo::GetInstance().CPUPercent();
CPU_usage_percent_.Set(usage_percent);
}
void
PrometheusMetrics::RAMUsagePercentSet() {
if (!startup_) return;
if (!startup_) {
return;
}
double usage_percent = server::SystemInfo::GetInstance().MemoryPercent();
RAM_usage_percent_.Set(usage_percent);
}
void
PrometheusMetrics::GPUPercentGaugeSet() {
if (!startup_) return;
if (!startup_) {
return;
}
int numDevice = server::SystemInfo::GetInstance().num_device();
std::vector<uint64_t> used_total = server::SystemInfo::GetInstance().GPUMemoryTotal();
std::vector<uint64_t> used_memory = server::SystemInfo::GetInstance().GPUMemoryUsed();
......@@ -86,7 +102,10 @@ PrometheusMetrics::GPUPercentGaugeSet() {
void
PrometheusMetrics::GPUMemoryUsageGaugeSet() {
if (!startup_) return;
if (!startup_) {
return;
}
std::vector<uint64_t> values = server::SystemInfo::GetInstance().GPUMemoryUsed();
constexpr uint64_t MtoB = 1024 * 1024;
int numDevice = server::SystemInfo::GetInstance().num_device();
......@@ -100,7 +119,9 @@ PrometheusMetrics::GPUMemoryUsageGaugeSet() {
void
PrometheusMetrics::AddVectorsPerSecondGaugeSet(int num_vector, int dim, double time) {
// MB/s
if (!startup_) return;
if (!startup_) {
return;
}
int64_t MtoB = 1024 * 1024;
int64_t size = num_vector * dim * 4;
......@@ -109,7 +130,10 @@ PrometheusMetrics::AddVectorsPerSecondGaugeSet(int num_vector, int dim, double t
void
PrometheusMetrics::QueryIndexTypePerSecondSet(std::string type, double value) {
if (!startup_) return;
if (!startup_) {
return;
}
if (type == "IVF") {
query_index_IVF_type_per_second_gauge_.Set(value);
} else if (type == "IDMap") {
......@@ -119,19 +143,27 @@ PrometheusMetrics::QueryIndexTypePerSecondSet(std::string type, double value) {
void
PrometheusMetrics::ConnectionGaugeIncrement() {
if (!startup_) return;
if (!startup_) {
return;
}
connection_gauge_.Increment();
}
void
PrometheusMetrics::ConnectionGaugeDecrement() {
if (!startup_) return;
if (!startup_) {
return;
}
connection_gauge_.Decrement();
}
void
PrometheusMetrics::OctetsSet() {
if (!startup_) return;
if (!startup_) {
return;
}
// get old stats and reset them
uint64_t old_inoctets = SystemInfo::GetInstance().get_inoctets();
......@@ -147,14 +179,19 @@ PrometheusMetrics::OctetsSet() {
auto now_time = std::chrono::system_clock::now();
auto total_microsecond = METRICS_MICROSECONDS(old_time, now_time);
auto total_second = total_microsecond * micro_to_second;
if (total_second == 0) return;
if (total_second == 0) {
return;
}
inoctets_gauge_.Set((in_and_out_octets.first - old_inoctets) / total_second);
outoctets_gauge_.Set((in_and_out_octets.second - old_outoctets) / total_second);
}
void
PrometheusMetrics::CPUCoreUsagePercentSet() {
if (!startup_) return;
if (!startup_) {
return;
}
std::vector<double> cpu_core_percent = server::SystemInfo::GetInstance().CPUCorePercent();
......@@ -166,7 +203,9 @@ PrometheusMetrics::CPUCoreUsagePercentSet() {
void
PrometheusMetrics::GPUTemperature() {
if (!startup_) return;
if (!startup_) {
return;
}
std::vector<uint64_t> GPU_temperatures = server::SystemInfo::GetInstance().GPUTemperature();
......@@ -178,7 +217,9 @@ PrometheusMetrics::GPUTemperature() {
void
PrometheusMetrics::CPUTemperature() {
if (!startup_) return;
if (!startup_) {
return;
}
std::vector<float> CPU_temperatures = server::SystemInfo::GetInstance().CPUTemperature();
......
......@@ -31,7 +31,8 @@ namespace server {
void
SystemInfo::Init() {
if (initialized_) return;
if (initialized_)
return;
initialized_ = true;
......@@ -45,7 +46,8 @@ SystemInfo::Init() {
file = fopen("/proc/cpuinfo", "r");
num_processors_ = 0;
while (fgets(line, 128, file) != NULL) {
if (strncmp(line, "processor", 9) == 0) num_processors_++;
if (strncmp(line, "processor", 9) == 0)
num_processors_++;
if (strncmp(line, "physical", 8) == 0) {
num_physical_processors_ = ParseLine(line);
}
......@@ -116,7 +118,8 @@ SystemInfo::GetProcessUsedMemory() {
double
SystemInfo::MemoryPercent() {
if (!initialized_) Init();
if (!initialized_)
Init();
return (double)(GetProcessUsedMemory() * 100) / (double)total_ram_;
}
......@@ -171,7 +174,8 @@ SystemInfo::getTotalCpuTime(std::vector<uint64_t>& work_time_array) {
double
SystemInfo::CPUPercent() {
if (!initialized_) Init();
if (!initialized_)
Init();
struct tms time_sample;
clock_t now;
double percent;
......@@ -195,7 +199,8 @@ SystemInfo::CPUPercent() {
std::vector<uint64_t>
SystemInfo::GPUMemoryTotal() {
// get GPU usage percent
if (!initialized_) Init();
if (!initialized_)
Init();
std::vector<uint64_t> result;
nvmlMemory_t nvmlMemory;
for (int i = 0; i < num_device_; ++i) {
......@@ -209,7 +214,8 @@ SystemInfo::GPUMemoryTotal() {
std::vector<uint64_t>
SystemInfo::GPUTemperature() {
if (!initialized_) Init();
if (!initialized_)
Init();
std::vector<uint64_t> result;
for (int i = 0; i < num_device_; i++) {
nvmlDevice_t device;
......@@ -241,7 +247,8 @@ SystemInfo::CPUTemperature() {
std::vector<uint64_t>
SystemInfo::GPUMemoryUsed() {
// get GPU memory used
if (!initialized_) Init();
if (!initialized_)
Init();
std::vector<uint64_t> result;
nvmlMemory_t nvmlMemory;
......
......@@ -48,7 +48,7 @@ class ResourceMgr {
Add(ResourcePtr&& resource);
bool
Connect(const std::string& res1, const std::string& res2, Connection& connection);
Connect(const std::string& name1, const std::string& name2, Connection& connection);
void
Clear();
......
......@@ -30,7 +30,8 @@ get_neighbours(const ResourcePtr& self) {
std::vector<ResourcePtr> neighbours;
for (auto& neighbour_node : self->GetNeighbours()) {
auto node = neighbour_node.neighbour_node.lock();
if (not node) continue;
if (not node)
continue;
auto resource = std::static_pointer_cast<Resource>(node);
// if (not resource->HasExecutor()) continue;
......@@ -45,7 +46,8 @@ get_neighbours_with_connetion(const ResourcePtr& self) {
std::vector<std::pair<ResourcePtr, Connection>> neighbours;
for (auto& neighbour_node : self->GetNeighbours()) {
auto node = neighbour_node.neighbour_node.lock();
if (not node) continue;
if (not node)
continue;
auto resource = std::static_pointer_cast<Resource>(node);
// if (not resource->HasExecutor()) continue;
......
......@@ -92,7 +92,8 @@ uint64_t
Resource::NumOfTaskToExec() {
uint64_t count = 0;
for (auto& task : task_table_) {
if (task->state == TaskTableItemState::LOADED) ++count;
if (task->state == TaskTableItemState::LOADED)
++count;
}
return count;
}
......@@ -102,7 +103,8 @@ Resource::pick_task_load() {
auto indexes = task_table_.PickToLoad(10);
for (auto index : indexes) {
// try to set one task loading, then return
if (task_table_.Load(index)) return task_table_.Get(index);
if (task_table_.Load(index))
return task_table_.Get(index);
// else try next
}
return nullptr;
......@@ -113,7 +115,8 @@ Resource::pick_task_execute() {
auto indexes = task_table_.PickToExecute(3);
for (auto index : indexes) {
// try to set one task executing, then return
if (task_table_.Execute(index)) return task_table_.Get(index);
if (task_table_.Execute(index))
return task_table_.Get(index);
// else try next
}
return nullptr;
......
......@@ -26,7 +26,7 @@ namespace scheduler {
class XDeleteTask : public Task {
public:
explicit XDeleteTask(const scheduler::DeleteJobPtr& job);
explicit XDeleteTask(const scheduler::DeleteJobPtr& delete_job);
void
Load(LoadType type, uint8_t device_id) override;
......
......@@ -31,9 +31,8 @@ UriCheck(const std::string& uri) {
size_t index = uri.find_first_of(':', 0);
if (index == std::string::npos) {
return false;
} else {
return true;
}
return true;
}
Status
......@@ -45,11 +44,11 @@ ClientProxy::Connect(const ConnectParam& param) {
connected_ = true;
client_ptr_ = std::make_shared<GrpcClient>(channel_);
return Status::OK();
} else {
std::string reason = "connect failed!";
connected_ = false;
return Status(StatusCode::NotConnected, reason);
}
std::string reason = "connect failed!";
connected_ = false;
return Status(StatusCode::NotConnected, reason);
}
Status
......@@ -134,7 +133,7 @@ ClientProxy::CreateIndex(const IndexParam& index_param) {
// TODO: add index params
::milvus::grpc::IndexParam grpc_index_param;
grpc_index_param.set_table_name(index_param.table_name);
grpc_index_param.mutable_index()->set_index_type((int32_t)index_param.index_type);
grpc_index_param.mutable_index()->set_index_type(static_cast<int32_t>(index_param.index_type));
grpc_index_param.mutable_index()->set_nlist(index_param.nlist);
return client_ptr_->CreateIndex(grpc_index_param);
} catch (std::exception& ex) {
......
......@@ -85,7 +85,7 @@ class Status {
MoveFrom(Status& s);
private:
const char* state_ = nullptr;
char* state_ = nullptr;
}; // Status
} // namespace milvus
......@@ -28,7 +28,7 @@ Status::Status(StatusCode code, const std::string& msg) {
// 4 bytes store message length
// the left bytes store message string
const uint32_t length = (uint32_t)msg.size();
char* result = new char[length + sizeof(length) + CODE_WIDTH];
auto result = new char[length + sizeof(length) + CODE_WIDTH];
memcpy(result, &code, CODE_WIDTH);
memcpy(result + CODE_WIDTH, &length, sizeof(length));
memcpy(result + sizeof(length) + CODE_WIDTH, msg.data(), length);
......@@ -75,7 +75,7 @@ Status::CopyFrom(const Status& s) {
memcpy(&length, s.state_ + CODE_WIDTH, sizeof(length));
int buff_len = length + sizeof(length) + CODE_WIDTH;
state_ = new char[buff_len];
memcpy((void*)state_, (void*)s.state_, buff_len);
memcpy(state_, s.state_, buff_len);
}
void
......
此差异已折叠。
......@@ -30,9 +30,6 @@ namespace zilliz {
namespace milvus {
namespace server {
DBWrapper::DBWrapper() {
}
Status
DBWrapper::StartService() {
Config& config = Config::GetInstance();
......@@ -41,27 +38,37 @@ DBWrapper::StartService() {
engine::DBOptions opt;
s = config.GetDBConfigBackendUrl(opt.meta_.backend_uri_);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
std::string path;
s = config.GetDBConfigPrimaryPath(path);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
opt.meta_.path_ = path + "/db";
std::string db_slave_path;
s = config.GetDBConfigSecondaryPath(db_slave_path);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
StringHelpFunctions::SplitStringByDelimeter(db_slave_path, ";", opt.meta_.slave_paths_);
// cache config
s = config.GetCacheConfigCacheInsertData(opt.insert_cache_immediately_);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
std::string mode;
s = config.GetServerConfigDeployMode(mode);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
if (mode == "single") {
opt.mode_ = engine::DBOptions::MODE::SINGLE;
......@@ -78,14 +85,17 @@ DBWrapper::StartService() {
// engine config
int32_t omp_thread;
s = config.GetEngineConfigOmpThreadNum(omp_thread);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
if (omp_thread > 0) {
omp_set_num_threads(omp_thread);
SERVER_LOG_DEBUG << "Specify openmp thread number: " << omp_thread;
} else {
uint32_t sys_thread_cnt = 8;
if (CommonUtil::GetSystemAvailableThreads(sys_thread_cnt)) {
omp_thread = (int32_t)ceil(sys_thread_cnt * 0.5);
omp_thread = static_cast<int32_t>(ceil(sys_thread_cnt * 0.5));
omp_set_num_threads(omp_thread);
}
}
......@@ -93,20 +103,29 @@ DBWrapper::StartService() {
// init faiss global variable
int32_t use_blas_threshold;
s = config.GetEngineConfigUseBlasThreshold(use_blas_threshold);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
faiss::distance_compute_blas_threshold = use_blas_threshold;
// set archive config
engine::ArchiveConf::CriteriaT criterial;
int32_t disk, days;
s = config.GetDBConfigArchiveDiskThreshold(disk);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
if (disk > 0) {
criterial[engine::ARCHIVE_CONF_DISK] = disk;
}
s = config.GetDBConfigArchiveDaysThreshold(days);
if (!s.ok()) return s;
if (!s.ok()) {
return s;
}
if (days > 0) {
criterial[engine::ARCHIVE_CONF_DAYS] = days;
}
......
......@@ -28,7 +28,7 @@ namespace server {
class DBWrapper {
private:
DBWrapper();
DBWrapper() = default;
~DBWrapper() = default;
public:
......
......@@ -146,7 +146,7 @@ Server::Daemonize() {
void
Server::Start() {
if (daemonized_) {
if (daemonized_ != 0) {
Daemonize();
}
......
......@@ -227,7 +227,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service {
*/
::grpc::Status
ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request,
::milvus::grpc::TableNameList* table_name_list) override;
::milvus::grpc::TableNameList* response) override;
/**
* @brief Give the server status
......
......@@ -163,7 +163,8 @@ GrpcRequestScheduler::Stop() {
}
for (auto iter : execute_threads_) {
if (iter == nullptr) continue;
if (iter == nullptr)
continue;
iter->join();
}
......
......@@ -69,7 +69,7 @@ class GrpcBaseTask {
OnExecute() = 0;
Status
SetStatus(ErrorCode error_code, const std::string& msg);
SetStatus(ErrorCode error_code, const std::string& error_msg);
protected:
mutable std::mutex finish_mtx_;
......
......@@ -159,7 +159,7 @@ CreateTableTask::OnExecute() {
// step 2: construct table schema
engine::meta::TableSchema table_info;
table_info.table_id_ = schema_->table_name();
table_info.dimension_ = (uint16_t)schema_->dimension();
table_info.dimension_ = static_cast<uint16_t>(schema_->dimension());
table_info.index_file_size_ = schema_->index_file_size();
table_info.metric_type_ = schema_->metric_type();
......@@ -446,13 +446,13 @@ InsertTask::OnExecute() {
// all user provide id, or all internal id
bool user_provide_ids = !insert_param_->row_id_array().empty();
// user already provided id before, all insert action require user id
if ((table_info.flag_ & engine::meta::FLAG_MASK_HAS_USERID) && !user_provide_ids) {
if ((table_info.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) {
return Status(SERVER_ILLEGAL_VECTOR_ID,
"Table vector ids are user defined, please provide id for this batch");
}
// user didn't provided id before, no need to provide user id
if ((table_info.flag_ & engine::meta::FLAG_MASK_NO_USERID) && user_provide_ids) {
if ((table_info.flag_ & engine::meta::FLAG_MASK_NO_USERID) != 0 && user_provide_ids) {
return Status(SERVER_ILLEGAL_VECTOR_ID,
"Table vector ids are auto generated, no need to provide id for this batch");
}
......@@ -487,12 +487,12 @@ InsertTask::OnExecute() {
rc.ElapseFromBegin("prepare vectors data");
// step 5: insert vectors
auto vec_count = (uint64_t)insert_param_->row_record_array_size();
auto vec_count = static_cast<uint64_t>(insert_param_->row_record_array_size());
std::vector<int64_t> vec_ids(insert_param_->row_id_array_size(), 0);
if (!insert_param_->row_id_array().empty()) {
const int64_t* src_data = insert_param_->row_id_array().data();
int64_t* target_data = vec_ids.data();
memcpy(target_data, src_data, (size_t)(sizeof(int64_t) * insert_param_->row_id_array_size()));
memcpy(target_data, src_data, static_cast<size_t>(sizeof(int64_t) * insert_param_->row_id_array_size()));
}
status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), vec_count, vec_f.data(), vec_ids);
......@@ -710,7 +710,7 @@ CountTableTask::OnExecute() {
return status;
}
row_count_ = (int64_t)row_count;
row_count_ = static_cast<int64_t>(row_count);
rc.ElapseFromBegin("total cost");
} catch (std::exception& ex) {
......
......@@ -41,7 +41,7 @@ class CreateTableTask : public GrpcBaseTask {
Create(const ::milvus::grpc::TableSchema* schema);
protected:
explicit CreateTableTask(const ::milvus::grpc::TableSchema* request);
explicit CreateTableTask(const ::milvus::grpc::TableSchema* schema);
Status
OnExecute() override;
......@@ -57,7 +57,7 @@ class HasTableTask : public GrpcBaseTask {
Create(const std::string& table_name, bool& has_table);
protected:
HasTableTask(const std::string& request, bool& has_table);
HasTableTask(const std::string& table_name, bool& has_table);
Status
OnExecute() override;
......@@ -104,10 +104,10 @@ class DropTableTask : public GrpcBaseTask {
class CreateIndexTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const ::milvus::grpc::IndexParam* index_Param);
Create(const ::milvus::grpc::IndexParam* index_param);
protected:
explicit CreateIndexTask(const ::milvus::grpc::IndexParam* index_Param);
explicit CreateIndexTask(const ::milvus::grpc::IndexParam* index_param);
Status
OnExecute() override;
......@@ -136,10 +136,10 @@ class ShowTablesTask : public GrpcBaseTask {
class InsertTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const ::milvus::grpc::InsertParam* insert_Param, ::milvus::grpc::VectorIds* record_ids_);
Create(const ::milvus::grpc::InsertParam* insert_param, ::milvus::grpc::VectorIds* record_ids);
protected:
InsertTask(const ::milvus::grpc::InsertParam* insert_Param, ::milvus::grpc::VectorIds* record_ids_);
InsertTask(const ::milvus::grpc::InsertParam* insert_param, ::milvus::grpc::VectorIds* record_ids);
Status
OnExecute() override;
......
......@@ -59,7 +59,9 @@ CommonUtil::GetSystemAvailableThreads(uint32_t& thread_count) {
// threadCnt = std::thread::hardware_concurrency();
thread_count = sysconf(_SC_NPROCESSORS_CONF);
thread_count *= THREAD_MULTIPLY_CPU;
if (thread_count == 0) thread_count = 8;
if (thread_count == 0) {
thread_count = 8;
}
return true;
}
......@@ -162,9 +164,9 @@ CommonUtil::GetFileSize(const std::string& path) {
struct stat file_info;
if (stat(path.c_str(), &file_info) < 0) {
return 0;
} else {
return (uint64_t)file_info.st_size;
}
return static_cast<uint64_t>(file_info.st_size);
}
std::string
......
......@@ -29,7 +29,7 @@ Status::Status(StatusCode code, const std::string& msg) {
// 4 bytes store message length
// the left bytes store message string
const uint32_t length = (uint32_t)msg.size();
char* result = new char[length + sizeof(length) + CODE_WIDTH];
auto result = new char[length + sizeof(length) + CODE_WIDTH];
std::memcpy(result, &code, CODE_WIDTH);
std::memcpy(result + CODE_WIDTH, &length, sizeof(length));
memcpy(result + sizeof(length) + CODE_WIDTH, msg.data(), length);
......@@ -76,7 +76,7 @@ Status::CopyFrom(const Status& s) {
memcpy(&length, s.state_ + CODE_WIDTH, sizeof(length));
int buff_len = length + sizeof(length) + CODE_WIDTH;
state_ = new char[buff_len];
memcpy((void*)state_, (void*)s.state_, buff_len);
memcpy(state_, s.state_, buff_len);
}
void
......
......@@ -71,7 +71,7 @@ class Status {
MoveFrom(Status& s);
private:
const char* state_ = nullptr;
char* state_ = nullptr;
}; // Status
} // namespace milvus
......
......@@ -70,7 +70,8 @@ inline ThreadPool::ThreadPool(size_t threads, size_t queue_size) : max_queue_siz
{
std::unique_lock<std::mutex> lock(this->queue_mutex_);
this->condition_.wait(lock, [this] { return this->stop || !this->tasks_.empty(); });
if (this->stop && this->tasks_.empty()) return;
if (this->stop && this->tasks_.empty())
return;
task = std::move(this->tasks_.front());
this->tasks_.pop();
}
......@@ -95,7 +96,8 @@ ThreadPool::enqueue(F&& f, Args&&... args) -> std::future<typename std::result_o
std::unique_lock<std::mutex> lock(queue_mutex_);
this->condition_.wait(lock, [this] { return this->tasks_.size() < max_queue_size_; });
// don't allow enqueueing after stopping the pool
if (stop) throw std::runtime_error("enqueue on stopped ThreadPool");
if (stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks_.emplace([task]() { (*task)(); });
}
......
......@@ -39,7 +39,8 @@ TimeRecorder::GetTimeSpanStr(double span) {
void
TimeRecorder::PrintTimeRecord(const std::string& msg, double span) {
std::string str_log;
if (!header_.empty()) str_log += header_ + ": ";
if (!header_.empty())
str_log += header_ + ": ";
str_log += msg;
str_log += " (";
str_log += TimeRecorder::GetTimeSpanStr(span);
......
......@@ -83,8 +83,8 @@ ValidationUtil::ValidateTableDimension(int64_t dimension) {
Status
ValidationUtil::ValidateTableIndexType(int32_t index_type) {
int engine_type = (int)engine::EngineType(index_type);
if (engine_type <= 0 || engine_type > (int)engine::EngineType::MAX_VALUE) {
int engine_type = static_cast<int>(engine::EngineType(index_type));
if (engine_type <= 0 || engine_type > static_cast<int>(engine::EngineType::MAX_VALUE)) {
std::string msg = "Invalid index type: " + std::to_string(index_type);
SERVER_LOG_ERROR << msg;
return Status(SERVER_INVALID_INDEX_TYPE, msg);
......@@ -117,7 +117,8 @@ ValidationUtil::ValidateTableIndexFileSize(int64_t index_file_size) {
Status
ValidationUtil::ValidateTableIndexMetricType(int32_t metric_type) {
if (metric_type != (int32_t)engine::MetricType::L2 && metric_type != (int32_t)engine::MetricType::IP) {
if (metric_type != static_cast<int32_t>(engine::MetricType::L2) &&
metric_type != static_cast<int32_t>(engine::MetricType::IP)) {
std::string msg = "Invalid metric type: " + std::to_string(metric_type);
SERVER_LOG_ERROR << msg;
return Status(SERVER_INVALID_INDEX_METRIC_TYPE, msg);
......@@ -151,7 +152,7 @@ Status
ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) {
int num_devices = 0;
auto cuda_err = cudaGetDeviceCount(&num_devices);
if (cuda_err) {
if (cuda_err != cudaSuccess) {
std::string msg = "Failed to get gpu card number, cuda error:" + std::to_string(cuda_err);
SERVER_LOG_ERROR << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
......@@ -222,9 +223,8 @@ ValidationUtil::ValidateStringIsBool(const std::string& str) {
if (s == "true" || s == "on" || s == "yes" || s == "1" || s == "false" || s == "off" || s == "no" || s == "0" ||
s.empty()) {
return Status::OK();
} else {
return Status(SERVER_INVALID_ARGUMENT, "Invalid boolean: " + str);
}
return Status(SERVER_INVALID_ARGUMENT, "Invalid boolean: " + str);
}
Status
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册