提交 924ada33 编写于 作者: L Link Qian

suite for cuda compute 6.1

上级 62f15d73
......@@ -45,8 +45,8 @@ class DB {
DISABLE_COPY_AND_ASSIGN(DB);
};
DB* GetDB(DataParameter::DB backend);
DB* GetDB(const string& backend);
//DB* GetDB(DataParameter::DB backend){ return nullptr;}
//DB* GetDB(const string& backend) { return nullptr;}
} // namespace db
} // namespace caffe
......
#ifdef USE_LEVELDB
#ifndef CAFFE_UTIL_DB_LEVELDB_HPP
#define CAFFE_UTIL_DB_LEVELDB_HPP
#include <string>
#include "leveldb/db.h"
#include "leveldb/write_batch.h"
#include "caffe/util/db.hpp"
namespace caffe { namespace db {
class LevelDBCursor : public Cursor {
public:
explicit LevelDBCursor(leveldb::Iterator* iter)
: iter_(iter) { SeekToFirst(); }
~LevelDBCursor() { delete iter_; }
virtual void SeekToFirst() { iter_->SeekToFirst(); }
virtual void Next() { iter_->Next(); }
virtual string key() { return iter_->key().ToString(); }
virtual string value() { return iter_->value().ToString(); }
virtual bool valid() { return iter_->Valid(); }
private:
leveldb::Iterator* iter_;
};
class LevelDBTransaction : public Transaction {
public:
explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOTNULL(db_); }
virtual void Put(const string& key, const string& value) {
batch_.Put(key, value);
}
virtual void Commit() {
leveldb::Status status = db_->Write(leveldb::WriteOptions(), &batch_);
CHECK(status.ok()) << "Failed to write batch to leveldb "
<< std::endl << status.ToString();
}
private:
leveldb::DB* db_;
leveldb::WriteBatch batch_;
DISABLE_COPY_AND_ASSIGN(LevelDBTransaction);
};
class LevelDB : public DB {
public:
LevelDB() : db_(NULL) { }
virtual ~LevelDB() { Close(); }
virtual void Open(const string& source, Mode mode);
virtual void Close() {
if (db_ != NULL) {
delete db_;
db_ = NULL;
}
}
virtual LevelDBCursor* NewCursor() {
return new LevelDBCursor(db_->NewIterator(leveldb::ReadOptions()));
}
virtual LevelDBTransaction* NewTransaction() {
return new LevelDBTransaction(db_);
}
private:
leveldb::DB* db_;
};
} // namespace db
} // namespace caffe
#endif // CAFFE_UTIL_DB_LEVELDB_HPP
#endif // USE_LEVELDB
//#ifdef USE_LMDB
#ifndef CAFFE_UTIL_DB_LMDB_HPP
#define CAFFE_UTIL_DB_LMDB_HPP
#include <string>
#include <vector>
#include "lmdb.h"
#include "caffe/util/db.hpp"
namespace caffe { namespace db {
inline void MDB_CHECK(int mdb_status) {
CHECK_EQ(mdb_status, MDB_SUCCESS) << mdb_strerror(mdb_status);
}
class LMDBCursor : public Cursor {
public:
explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor)
: mdb_txn_(mdb_txn), mdb_cursor_(mdb_cursor), valid_(false) {
SeekToFirst();
}
virtual ~LMDBCursor() {
mdb_cursor_close(mdb_cursor_);
mdb_txn_abort(mdb_txn_);
}
virtual void SeekToFirst() { Seek(MDB_FIRST); }
virtual void Next() { Seek(MDB_NEXT); }
virtual string key() {
return string(static_cast<const char*>(mdb_key_.mv_data), mdb_key_.mv_size);
}
virtual string value() {
return string(static_cast<const char*>(mdb_value_.mv_data),
mdb_value_.mv_size);
}
virtual bool valid() { return valid_; }
private:
void Seek(MDB_cursor_op op) {
int mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op);
if (mdb_status == MDB_NOTFOUND) {
valid_ = false;
} else {
MDB_CHECK(mdb_status);
valid_ = true;
}
}
MDB_txn* mdb_txn_;
MDB_cursor* mdb_cursor_;
MDB_val mdb_key_, mdb_value_;
bool valid_;
};
class LMDBTransaction : public Transaction {
public:
explicit LMDBTransaction(MDB_env* mdb_env)
: mdb_env_(mdb_env) { }
virtual void Put(const string& key, const string& value);
virtual void Commit();
private:
MDB_env* mdb_env_;
vector<string> keys, values;
void DoubleMapSize();
DISABLE_COPY_AND_ASSIGN(LMDBTransaction);
};
class LMDB : public DB {
public:
LMDB() : mdb_env_(NULL) { }
virtual ~LMDB() { Close(); }
virtual void Open(const string& source, Mode mode);
virtual void Close() {
if (mdb_env_ != NULL) {
mdb_dbi_close(mdb_env_, mdb_dbi_);
mdb_env_close(mdb_env_);
mdb_env_ = NULL;
}
}
virtual LMDBCursor* NewCursor();
virtual LMDBTransaction* NewTransaction();
private:
MDB_env* mdb_env_;
MDB_dbi mdb_dbi_;
};
} // namespace db
} // namespace caffe
#endif // CAFFE_UTIL_DB_LMDB_HPP
//#endif // USE_LMDB
#ifndef CAFFE_UTIL_HDF5_H_
#define CAFFE_UTIL_HDF5_H_
#include <string>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/blob.hpp"
namespace caffe {
template <typename Dtype>
void hdf5_load_nd_dataset_helper(
hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
Blob<Dtype>* blob);
template <typename Dtype>
void hdf5_load_nd_dataset(
hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
Blob<Dtype>* blob);
template <typename Dtype>
void hdf5_save_nd_dataset(
const hid_t file_id, const string& dataset_name, const Blob<Dtype>& blob,
bool write_diff = false);
int hdf5_load_int(hid_t loc_id, const string& dataset_name);
void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i);
string hdf5_load_string(hid_t loc_id, const string& dataset_name);
void hdf5_save_string(hid_t loc_id, const string& dataset_name,
const string& s);
int hdf5_get_num_links(hid_t loc_id);
string hdf5_get_name_by_idx(hid_t loc_id, int idx);
} // namespace caffe
#endif // CAFFE_UTIL_HDF5_H_
......@@ -74,6 +74,9 @@ void initGlog() {
google::SetLogDestination(google::GLOG_ERROR, LOG_ERROR_FILE.c_str());
LOG_FATAL_FILE = FLAGS_log_dir + "FATAL" + now_time + ".txt";
google::SetLogDestination(google::GLOG_FATAL, LOG_FATAL_FILE.c_str());
::google::InitGoogleLogging("");
FLAGS_minloglevel = google::GLOG_ERROR;
}
void GlobalInit(int* pargc, char*** pargv) {
......@@ -84,6 +87,7 @@ void GlobalInit(int* pargc, char*** pargv) {
// Google logging.
initGlog();
::google::InitGoogleLogging(*(pargv)[0]);
FLAGS_minloglevel = google::GLOG_ERROR;
}
#ifdef CPU_ONLY // CPU-only Caffe.
......
......@@ -71,7 +71,8 @@ DataReader::Body::~Body() {
}
void DataReader::Body::InternalThreadEntry() {
shared_ptr<db::DB> db(db::GetDB(param_.data_param().backend()));
//shared_ptr<db::DB> db(db::GetDB(param_.data_param().backend()));
shared_ptr<db::DB> db(nullptr);
db->Open(param_.data_param().source(), db::READ);
shared_ptr<db::Cursor> cursor(db->NewCursor());
vector<shared_ptr<QueuePair> > qps;
......
......@@ -15,10 +15,10 @@ template <typename Dtype>
DataLayer<Dtype>::DataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param),
offset_() {
db_.reset(db::GetDB(param.data_param().backend()));
db_->Open(param.data_param().source(), db::READ);
cursor_.reset(db_->NewCursor());
rand_skip_num_ = param.data_param().rand_skip();
//db_.reset(db::GetDB(param.data_param().backend()));
//db_->Open(param.data_param().source(), db::READ);
//cursor_.reset(db_->NewCursor());
//rand_skip_num_ = param.data_param().rand_skip();
}
template <typename Dtype>
......
/*
TODO:
- load file in a separate thread ("prefetch")
- can be smarter about the memcpy call instead of doing it row-by-row
:: use util functions caffe_copy, and Blob->offset()
:: don't forget to update hdf5_daa_layer.cu accordingly
- add ability to shuffle filenames if flag is set
*/
#include <fstream> // NOLINT(readability/streams)
#include <string>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "stdint.h"
#include "caffe/layers/hdf5_data_layer.hpp"
#include "caffe/util/hdf5.hpp"
namespace caffe {
template <typename Dtype>
HDF5DataLayer<Dtype>::~HDF5DataLayer<Dtype>() { }
// Load data and label from HDF5 filename into the class property blobs.
template <typename Dtype>
void HDF5DataLayer<Dtype>::LoadHDF5FileData(const char* filename) {
DLOG(INFO) << "Loading HDF5 file: " << filename;
hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
if (file_id < 0) {
LOG(FATAL) << "Failed opening HDF5 file: " << filename;
}
int top_size = this->layer_param_.top_size();
hdf_blobs_.resize(top_size);
const int MIN_DATA_DIM = 1;
const int MAX_DATA_DIM = INT_MAX;
for (int i = 0; i < top_size; ++i) {
hdf_blobs_[i] = shared_ptr<Blob<Dtype> >(new Blob<Dtype>());
hdf5_load_nd_dataset(file_id, this->layer_param_.top(i).c_str(),
MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get());
}
herr_t status = H5Fclose(file_id);
CHECK_GE(status, 0) << "Failed to close HDF5 file: " << filename;
// MinTopBlobs==1 guarantees at least one top blob
CHECK_GE(hdf_blobs_[0]->num_axes(), 1) << "Input must have at least 1 axis.";
const int num = hdf_blobs_[0]->shape(0);
for (int i = 1; i < top_size; ++i) {
CHECK_EQ(hdf_blobs_[i]->shape(0), num);
}
// Default to identity permutation.
data_permutation_.clear();
data_permutation_.resize(hdf_blobs_[0]->shape(0));
for (int i = 0; i < hdf_blobs_[0]->shape(0); i++)
data_permutation_[i] = i;
// Shuffle if needed.
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(data_permutation_.begin(), data_permutation_.end());
DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0)
<< " rows (shuffled)";
} else {
DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows";
}
}
template <typename Dtype>
void HDF5DataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Refuse transformation parameters since HDF5 is totally generic.
CHECK(!this->layer_param_.has_transform_param()) <<
this->type() << " does not transform data.";
// Read the source to parse the filenames.
const string& source = this->layer_param_.hdf5_data_param().source();
LOG(INFO) << "Loading list of HDF5 filenames from: " << source;
hdf_filenames_.clear();
std::ifstream source_file(source.c_str());
if (source_file.is_open()) {
std::string line;
while (source_file >> line) {
hdf_filenames_.push_back(line);
}
} else {
LOG(FATAL) << "Failed to open source file: " << source;
}
source_file.close();
num_files_ = hdf_filenames_.size();
current_file_ = 0;
LOG(INFO) << "Number of HDF5 files: " << num_files_;
CHECK_GE(num_files_, 1) << "Must have at least 1 HDF5 filename listed in "
<< source;
file_permutation_.clear();
file_permutation_.resize(num_files_);
// Default to identity permutation.
for (int i = 0; i < num_files_; i++) {
file_permutation_[i] = i;
}
// Shuffle if needed.
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(file_permutation_.begin(), file_permutation_.end());
}
// Load the first HDF5 file and initialize the line counter.
LoadHDF5FileData(hdf_filenames_[file_permutation_[current_file_]].c_str());
current_row_ = 0;
// Reshape blobs.
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
const int top_size = this->layer_param_.top_size();
vector<int> top_shape;
for (int i = 0; i < top_size; ++i) {
top_shape.resize(hdf_blobs_[i]->num_axes());
top_shape[0] = batch_size;
for (int j = 1; j < top_shape.size(); ++j) {
top_shape[j] = hdf_blobs_[i]->shape(j);
}
top[i]->Reshape(top_shape);
}
}
template <typename Dtype>
void HDF5DataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
for (int i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == hdf_blobs_[0]->shape(0)) {
if (num_files_ > 1) {
++current_file_;
if (current_file_ == num_files_) {
current_file_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(file_permutation_.begin(),
file_permutation_.end());
}
DLOG(INFO) << "Looping around to first file.";
}
LoadHDF5FileData(
hdf_filenames_[file_permutation_[current_file_]].c_str());
}
current_row_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle())
std::random_shuffle(data_permutation_.begin(), data_permutation_.end());
}
for (int j = 0; j < this->layer_param_.top_size(); ++j) {
int data_dim = top[j]->count() / top[j]->shape(0);
caffe_copy(data_dim,
&hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_]
* data_dim], &top[j]->mutable_cpu_data()[i * data_dim]);
}
}
}
#ifdef CPU_ONLY
STUB_GPU_FORWARD(HDF5DataLayer, Forward);
#endif
INSTANTIATE_CLASS(HDF5DataLayer);
REGISTER_LAYER_CLASS(HDF5Data);
} // namespace caffe
/*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
*/
#include <stdint.h>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/layers/hdf5_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.hdf5_data_param().batch_size();
for (int i = 0; i < batch_size; ++i, ++current_row_) {
if (current_row_ == hdf_blobs_[0]->shape(0)) {
if (num_files_ > 1) {
current_file_ += 1;
if (current_file_ == num_files_) {
current_file_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle()) {
std::random_shuffle(file_permutation_.begin(),
file_permutation_.end());
}
DLOG(INFO) << "Looping around to first file.";
}
LoadHDF5FileData(
hdf_filenames_[file_permutation_[current_file_]].c_str());
}
current_row_ = 0;
if (this->layer_param_.hdf5_data_param().shuffle())
std::random_shuffle(data_permutation_.begin(), data_permutation_.end());
}
for (int j = 0; j < this->layer_param_.top_size(); ++j) {
int data_dim = top[j]->count() / top[j]->shape(0);
caffe_copy(data_dim,
&hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_]
* data_dim], &top[j]->mutable_gpu_data()[i * data_dim]);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer);
} // namespace caffe
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/layers/hdf5_output_layer.hpp"
#include "caffe/util/hdf5.hpp"
namespace caffe {
template <typename Dtype>
void HDF5OutputLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
file_name_ = this->layer_param_.hdf5_output_param().file_name();
file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_;
file_opened_ = true;
}
template <typename Dtype>
HDF5OutputLayer<Dtype>::~HDF5OutputLayer<Dtype>() {
if (file_opened_) {
herr_t status = H5Fclose(file_id_);
CHECK_GE(status, 0) << "Failed to close HDF5 file " << file_name_;
}
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::SaveBlobs() {
// TODO: no limit on the number of blobs
LOG(INFO) << "Saving HDF5 file " << file_name_;
CHECK_EQ(data_blob_.num(), label_blob_.num()) <<
"data blob and label blob must have the same batch size";
hdf5_save_nd_dataset(file_id_, HDF5_DATA_DATASET_NAME, data_blob_);
hdf5_save_nd_dataset(file_id_, HDF5_DATA_LABEL_NAME, label_blob_);
LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows";
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_GE(bottom.size(), 2);
CHECK_EQ(bottom[0]->num(), bottom[1]->num());
data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(),
bottom[1]->height(), bottom[1]->width());
const int data_datum_dim = bottom[0]->count() / bottom[0]->num();
const int label_datum_dim = bottom[1]->count() / bottom[1]->num();
for (int i = 0; i < bottom[0]->num(); ++i) {
caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim],
&data_blob_.mutable_cpu_data()[i * data_datum_dim]);
caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim],
&label_blob_.mutable_cpu_data()[i * label_datum_dim]);
}
SaveBlobs();
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
}
#ifdef CPU_ONLY
STUB_GPU(HDF5OutputLayer);
#endif
INSTANTIATE_CLASS(HDF5OutputLayer);
REGISTER_LAYER_CLASS(HDF5Output);
} // namespace caffe
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/layers/hdf5_output_layer.hpp"
namespace caffe {
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_GE(bottom.size(), 2);
CHECK_EQ(bottom[0]->num(), bottom[1]->num());
data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(),
bottom[1]->height(), bottom[1]->width());
const int data_datum_dim = bottom[0]->count() / bottom[0]->num();
const int label_datum_dim = bottom[1]->count() / bottom[1]->num();
for (int i = 0; i < bottom[0]->num(); ++i) {
caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim],
&data_blob_.mutable_cpu_data()[i * data_datum_dim]);
caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim],
&label_blob_.mutable_cpu_data()[i * label_datum_dim]);
}
SaveBlobs();
}
template <typename Dtype>
void HDF5OutputLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
}
INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer);
} // namespace caffe
......@@ -41,7 +41,8 @@ struct CTAReduce {
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
shuff = __shfl_down(x, offset,32);
//shuff = __shfl_down(x, offset,32);
shuff = __shfl_down_sync(0xFFFFFFFF, x, offset,32);
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
......
......@@ -5,14 +5,14 @@
#include <utility>
#include <vector>
#include "hdf5.h"
//#include "hdf5.h"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/parallel.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/hdf5.hpp"
//#include "caffe/util/hdf5.hpp"
#include "caffe/util/insert_splits.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/upgrade_proto.hpp"
......@@ -804,52 +804,7 @@ void Net<Dtype>::CopyTrainedLayersFromBinaryProto(
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFromHDF5(const string trained_filename) {
hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename;
hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT);
CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename;
int num_layers = hdf5_get_num_links(data_hid);
for (int i = 0; i < num_layers; ++i) {
string source_layer_name = hdf5_get_name_by_idx(data_hid, i);
if (!layer_names_index_.count(source_layer_name)) {
LOG(INFO) << "Ignoring source layer " << source_layer_name;
continue;
}
int target_layer_id = layer_names_index_[source_layer_name];
DLOG(INFO) << "Copying source layer " << source_layer_name;
vector<shared_ptr<Blob<Dtype> > >& target_blobs =
layers_[target_layer_id]->blobs();
hid_t layer_hid = H5Gopen2(data_hid, source_layer_name.c_str(),
H5P_DEFAULT);
CHECK_GE(layer_hid, 0)
<< "Error reading weights from " << trained_filename;
// Check that source layer doesn't have more params than target layer
int num_source_params = hdf5_get_num_links(layer_hid);
CHECK_LE(num_source_params, target_blobs.size())
<< "Incompatible number of blobs for layer " << source_layer_name;
for (int j = 0; j < target_blobs.size(); ++j) {
ostringstream oss;
oss << j;
string dataset_name = oss.str();
int target_net_param_id = param_id_vecs_[target_layer_id][j];
if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) {
// Target param doesn't exist in source weights...
if (param_owners_[target_net_param_id] != -1) {
// ...but it's weight-shared in target, so that's fine.
continue;
} else {
LOG(FATAL) << "Incompatible number of blobs for layer "
<< source_layer_name;
}
}
hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes,
target_blobs[j].get());
}
H5Gclose(layer_hid);
}
H5Gclose(data_hid);
H5Fclose(file_hid);
}
template <typename Dtype>
......@@ -866,59 +821,59 @@ void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {
template <typename Dtype>
void Net<Dtype>::ToHDF5(const string& filename, bool write_diff) const {
hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(file_hid, 0)
<< "Couldn't open " << filename << " to save weights.";
hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << ".";
hid_t diff_hid = -1;
if (write_diff) {
diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << ".";
}
for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
const LayerParameter& layer_param = layers_[layer_id]->layer_param();
string layer_name = layer_param.name();
hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(),
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_GE(layer_data_hid, 0)
<< "Error saving weights to " << filename << ".";
hid_t layer_diff_hid = -1;
if (write_diff) {
layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(),
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_GE(layer_diff_hid, 0)
<< "Error saving weights to " << filename << ".";
}
int num_params = layers_[layer_id]->blobs().size();
for (int param_id = 0; param_id < num_params; ++param_id) {
ostringstream dataset_name;
dataset_name << param_id;
const int net_param_id = param_id_vecs_[layer_id][param_id];
if (param_owners_[net_param_id] == -1) {
// Only save params that own themselves
hdf5_save_nd_dataset<Dtype>(layer_data_hid, dataset_name.str(),
*params_[net_param_id]);
}
if (write_diff) {
// hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
// H5P_DEFAULT);
// CHECK_GE(file_hid, 0)
// << "Couldn't open " << filename << " to save weights.";
// hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT,
// H5P_DEFAULT);
// CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << ".";
// hid_t diff_hid = -1;
// if (write_diff) {
// diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT,
// H5P_DEFAULT);
// CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << ".";
// }
// for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
// const LayerParameter& layer_param = layers_[layer_id]->layer_param();
// string layer_name = layer_param.name();
// hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(),
// H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
// CHECK_GE(layer_data_hid, 0)
// << "Error saving weights to " << filename << ".";
// hid_t layer_diff_hid = -1;
// if (write_diff) {
// layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(),
// H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
// CHECK_GE(layer_diff_hid, 0)
// << "Error saving weights to " << filename << ".";
// }
// int num_params = layers_[layer_id]->blobs().size();
// for (int param_id = 0; param_id < num_params; ++param_id) {
// ostringstream dataset_name;
// dataset_name << param_id;
// const int net_param_id = param_id_vecs_[layer_id][param_id];
// if (param_owners_[net_param_id] == -1) {
// // Only save params that own themselves
// hdf5_save_nd_dataset<Dtype>(layer_data_hid, dataset_name.str(),
// *params_[net_param_id]);
// }
// if (write_diff) {
// Write diffs regardless of weight-sharing
hdf5_save_nd_dataset<Dtype>(layer_diff_hid, dataset_name.str(),
*params_[net_param_id], true);
}
}
H5Gclose(layer_data_hid);
if (write_diff) {
H5Gclose(layer_diff_hid);
}
}
H5Gclose(data_hid);
if (write_diff) {
H5Gclose(diff_hid);
}
H5Fclose(file_hid);
// hdf5_save_nd_dataset<Dtype>(layer_diff_hid, dataset_name.str(),
// *params_[net_param_id], true);
// }
// }
// H5Gclose(layer_data_hid);
// if (write_diff) {
// H5Gclose(layer_diff_hid);
// }
// }
// H5Gclose(data_hid);
//// if (write_diff) {
// H5Gclose(diff_hid);
// }
// H5Fclose(file_hid);
}
template <typename Dtype>
......
......@@ -2,7 +2,7 @@
// source: src/caffe/proto/caffe.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include "src/caffe/proto/caffe.pb.h"
#include "caffe/proto/caffe.pb.h"
#include <algorithm>
此差异已折叠。
......@@ -5,7 +5,7 @@
#include "caffe/solver.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/hdf5.hpp"
//#include "caffe/util/hdf5.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/upgrade_proto.hpp"
......@@ -420,7 +420,7 @@ void Solver<Dtype>::Snapshot() {
model_filename = SnapshotToBinaryProto();
break;
case caffe::SolverParameter_SnapshotFormat_HDF5:
model_filename = SnapshotToHDF5();
//model_filename = SnapshotToHDF5();
break;
default:
LOG(FATAL) << "Unsupported snapshot format.";
......@@ -465,10 +465,11 @@ string Solver<Dtype>::SnapshotToBinaryProto() {
template <typename Dtype>
string Solver<Dtype>::SnapshotToHDF5() {
string model_filename = SnapshotFilename(".caffemodel.h5");
LOG(INFO) << "Snapshotting to HDF5 file " << model_filename;
net_->ToHDF5(model_filename, param_.snapshot_diff());
return model_filename;
//string model_filename = SnapshotFilename(".caffemodel.h5");
//LOG(INFO) << "Snapshotting to HDF5 file " << model_filename;
//net_->ToHDF5(model_filename, param_.snapshot_diff());
//return model_filename;
return "";
}
template <typename Dtype>
......@@ -477,7 +478,7 @@ void Solver<Dtype>::Restore(const char* state_file) {
string state_filename(state_file);
if (state_filename.size() >= 3 &&
state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) {
RestoreSolverStateFromHDF5(state_filename);
//RestoreSolverStateFromHDF5(state_filename);
} else {
RestoreSolverStateFromBinaryProto(state_filename);
}
......
......@@ -2,7 +2,7 @@
#include <vector>
#include "caffe/sgd_solvers.hpp"
#include "caffe/util/hdf5.hpp"
// #include "caffe/util/hdf5.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/upgrade_proto.hpp"
......@@ -249,7 +249,7 @@ void SGDSolver<Dtype>::SnapshotSolverState(const string& model_filename) {
SnapshotSolverStateToBinaryProto(model_filename);
break;
case caffe::SolverParameter_SnapshotFormat_HDF5:
SnapshotSolverStateToHDF5(model_filename);
//SnapshotSolverStateToHDF5(model_filename);
break;
default:
LOG(FATAL) << "Unsupported snapshot format.";
......@@ -278,27 +278,27 @@ void SGDSolver<Dtype>::SnapshotSolverStateToBinaryProto(
template <typename Dtype>
void SGDSolver<Dtype>::SnapshotSolverStateToHDF5(
const string& model_filename) {
string snapshot_filename =
Solver<Dtype>::SnapshotFilename(".solverstate.h5");
LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename;
hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC,
H5P_DEFAULT, H5P_DEFAULT);
CHECK_GE(file_hid, 0)
<< "Couldn't open " << snapshot_filename << " to save solver state.";
hdf5_save_int(file_hid, "iter", this->iter_);
hdf5_save_string(file_hid, "learned_net", model_filename);
hdf5_save_int(file_hid, "current_step", this->current_step_);
hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(history_hid, 0)
<< "Error saving solver state to " << snapshot_filename << ".";
for (int i = 0; i < history_.size(); ++i) {
ostringstream oss;
oss << i;
hdf5_save_nd_dataset<Dtype>(history_hid, oss.str(), *history_[i]);
}
H5Gclose(history_hid);
H5Fclose(file_hid);
// string snapshot_filename =
// Solver<Dtype>::SnapshotFilename(".solverstate.h5");
// LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename;
// hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC,
// H5P_DEFAULT, H5P_DEFAULT);
// CHECK_GE(file_hid, 0)
// << "Couldn't open " << snapshot_filename << " to save solver state.";
// hdf5_save_int(file_hid, "iter", this->iter_);
// hdf5_save_string(file_hid, "learned_net", model_filename);
// hdf5_save_int(file_hid, "current_step", this->current_step_);
// hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT,
// H5P_DEFAULT);
// CHECK_GE(history_hid, 0)
// << "Error saving solver state to " << snapshot_filename << ".";
// for (int i = 0; i < history_.size(); ++i) {
// ostringstream oss;
// oss << i;
// hdf5_save_nd_dataset<Dtype>(history_hid, oss.str(), *history_[i]);
// }
// H5Gclose(history_hid);
// H5Fclose(file_hid);
}
template <typename Dtype>
......@@ -323,27 +323,27 @@ void SGDSolver<Dtype>::RestoreSolverStateFromBinaryProto(
template <typename Dtype>
void SGDSolver<Dtype>::RestoreSolverStateFromHDF5(const string& state_file) {
hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file;
this->iter_ = hdf5_load_int(file_hid, "iter");
if (H5LTfind_dataset(file_hid, "learned_net")) {
string learned_net = hdf5_load_string(file_hid, "learned_net");
this->net_->CopyTrainedLayersFrom(learned_net);
}
this->current_step_ = hdf5_load_int(file_hid, "current_step");
hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT);
CHECK_GE(history_hid, 0) << "Error reading history from " << state_file;
int state_history_size = hdf5_get_num_links(history_hid);
CHECK_EQ(state_history_size, history_.size())
<< "Incorrect length of history blobs.";
for (int i = 0; i < history_.size(); ++i) {
ostringstream oss;
oss << i;
hdf5_load_nd_dataset<Dtype>(history_hid, oss.str().c_str(), 0,
kMaxBlobAxes, history_[i].get());
}
H5Gclose(history_hid);
H5Fclose(file_hid);
// hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
// CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file;
// this->iter_ = hdf5_load_int(file_hid, "iter");
// if (H5LTfind_dataset(file_hid, "learned_net")) {
// string learned_net = hdf5_load_string(file_hid, "learned_net");
// this->net_->CopyTrainedLayersFrom(learned_net);
// }
// this->current_step_ = hdf5_load_int(file_hid, "current_step");
// hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT);
// CHECK_GE(history_hid, 0) << "Error reading history from " << state_file;
// int state_history_size = hdf5_get_num_links(history_hid);
// CHECK_EQ(state_history_size, history_.size())
// << "Incorrect length of history blobs.";
// for (int i = 0; i < history_.size(); ++i) {
// ostringstream oss;
// oss << i;
// hdf5_load_nd_dataset<Dtype>(history_hid, oss.str().c_str(), 0,
// kMaxBlobAxes, history_[i].get());
// }
// H5Gclose(history_hid);
// H5Fclose(file_hid);
}
INSTANTIATE_CLASS(SGDSolver);
......
#include "caffe/util/db.hpp"
#include "caffe/util/db_leveldb.hpp"
#include "caffe/util/db_lmdb.hpp"
#include <string>
namespace caffe { namespace db {
DB* GetDB(DataParameter::DB backend) {
switch (backend) {
#ifdef USE_LEVELDB
case DataParameter_DB_LEVELDB:
return new LevelDB();
#endif // USE_LEVELDB
#ifdef USE_LMDB
case DataParameter_DB_LMDB:
return new LMDB();
#endif // USE_LMDB
default:
LOG(FATAL) << "Unknown database backend";
return NULL;
}
}
DB* GetDB(const string& backend) {
#ifdef USE_LEVELDB
if (backend == "leveldb") {
return new LevelDB();
}
#endif // USE_LEVELDB
#ifdef USE_LMDB
if (backend == "lmdb") {
return new LMDB();
}
#endif // USE_LMDB
LOG(FATAL) << "Unknown database backend";
return NULL;
}
} // namespace db
} // namespace caffe
#ifdef USE_LEVELDB
#include "caffe/util/db_leveldb.hpp"
#include <string>
namespace caffe { namespace db {
void LevelDB::Open(const string& source, Mode mode) {
leveldb::Options options;
options.block_size = 65536;
options.write_buffer_size = 268435456;
options.max_open_files = 100;
options.error_if_exists = mode == NEW;
options.create_if_missing = mode != READ;
leveldb::Status status = leveldb::DB::Open(options, source, &db_);
CHECK(status.ok()) << "Failed to open leveldb " << source
<< std::endl << status.ToString();
LOG(INFO) << "Opened leveldb " << source;
}
} // namespace db
} // namespace caffe
#endif // USE_LEVELDB
#ifdef USE_LMDB
#include "caffe/util/db_lmdb.hpp"
#include <sys/stat.h>
#ifdef _WIN32
#include <direct.h>
#endif
#include <string>
namespace caffe { namespace db {
void LMDB::Open(const string& source, Mode mode) {
MDB_CHECK(mdb_env_create(&mdb_env_));
if (mode == NEW) {
#ifdef _WIN32
CHECK_EQ(_mkdir(source.c_str()), 0) << "mkdir " << source << "failed";
#elif __linux__
CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed";
#endif
}
int flags = 0;
if (mode == READ) {
flags = MDB_RDONLY | MDB_NOTLS;
}
int rc = mdb_env_open(mdb_env_, source.c_str(), flags, 0664);
#ifndef ALLOW_LMDB_NOLOCK
MDB_CHECK(rc);
#else
if (rc == EACCES) {
LOG(WARNING) << "Permission denied. Trying with MDB_NOLOCK ...";
// Close and re-open environment handle
mdb_env_close(mdb_env_);
MDB_CHECK(mdb_env_create(&mdb_env_));
// Try again with MDB_NOLOCK
flags |= MDB_NOLOCK;
MDB_CHECK(mdb_env_open(mdb_env_, source.c_str(), flags, 0664));
} else {
MDB_CHECK(rc);
}
#endif
LOG(INFO) << "Opened lmdb " << source;
}
LMDBCursor* LMDB::NewCursor() {
MDB_txn* mdb_txn;
MDB_cursor* mdb_cursor;
MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn));
MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_));
MDB_CHECK(mdb_cursor_open(mdb_txn, mdb_dbi_, &mdb_cursor));
return new LMDBCursor(mdb_txn, mdb_cursor);
}
LMDBTransaction* LMDB::NewTransaction() {
return new LMDBTransaction(mdb_env_);
}
void LMDBTransaction::Put(const string& key, const string& value) {
keys.push_back(key);
values.push_back(value);
}
void LMDBTransaction::Commit() {
MDB_dbi mdb_dbi;
MDB_val mdb_key, mdb_data;
MDB_txn *mdb_txn;
// Initialize MDB variables
MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn));
MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi));
for (int i = 0; i < keys.size(); i++) {
mdb_key.mv_size = keys[i].size();
mdb_key.mv_data = const_cast<char*>(keys[i].data());
mdb_data.mv_size = values[i].size();
mdb_data.mv_data = const_cast<char*>(values[i].data());
// Add data to the transaction
int put_rc = mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0);
if (put_rc == MDB_MAP_FULL) {
// Out of memory - double the map size and retry
mdb_txn_abort(mdb_txn);
mdb_dbi_close(mdb_env_, mdb_dbi);
DoubleMapSize();
Commit();
return;
}
// May have failed for some other reason
MDB_CHECK(put_rc);
}
// Commit the transaction
int commit_rc = mdb_txn_commit(mdb_txn);
if (commit_rc == MDB_MAP_FULL) {
// Out of memory - double the map size and retry
mdb_dbi_close(mdb_env_, mdb_dbi);
DoubleMapSize();
Commit();
return;
}
// May have failed for some other reason
MDB_CHECK(commit_rc);
// Cleanup after successful commit
mdb_dbi_close(mdb_env_, mdb_dbi);
keys.clear();
values.clear();
}
void LMDBTransaction::DoubleMapSize() {
struct MDB_envinfo current_info;
MDB_CHECK(mdb_env_info(mdb_env_, &current_info));
size_t new_size = current_info.me_mapsize * 2;
DLOG(INFO) << "Doubling LMDB map size to " << (new_size>>20) << "MB ...";
MDB_CHECK(mdb_env_set_mapsize(mdb_env_, new_size));
}
} // namespace db
} // namespace caffe
#endif // USE_LMDB
#include "caffe/util/hdf5.hpp"
#include <string>
#include <vector>
namespace caffe {
// Verifies format of data stored in HDF5 file and reshapes blob accordingly.
template <typename Dtype>
void hdf5_load_nd_dataset_helper(
hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
Blob<Dtype>* blob) {
// Verify that the dataset exists.
CHECK(H5LTfind_dataset(file_id, dataset_name_))
<< "Failed to find HDF5 dataset " << dataset_name_;
// Verify that the number of dimensions is in the accepted range.
herr_t status;
int ndims;
status = H5LTget_dataset_ndims(file_id, dataset_name_, &ndims);
CHECK_GE(status, 0) << "Failed to get dataset ndims for " << dataset_name_;
CHECK_GE(ndims, min_dim);
CHECK_LE(ndims, max_dim);
// Verify that the data format is what we expect: float or double.
std::vector<hsize_t> dims(ndims);
H5T_class_t class_;
status = H5LTget_dataset_info(
file_id, dataset_name_, dims.data(), &class_, NULL);
CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_;
switch (class_) {
case H5T_FLOAT:
{LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_FLOAT"; }
break;
case H5T_INTEGER:
{LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_INTEGER"; }
break;
case H5T_TIME:
{LOG(FATAL) << "Unsupported datatype class: H5T_TIME"; }
case H5T_STRING:
{LOG(FATAL) << "Unsupported datatype class: H5T_STRING"; }
case H5T_BITFIELD:
{LOG(FATAL) << "Unsupported datatype class: H5T_BITFIELD"; }
case H5T_OPAQUE:
{LOG(FATAL) << "Unsupported datatype class: H5T_OPAQUE"; }
case H5T_COMPOUND:
{LOG(FATAL) << "Unsupported datatype class: H5T_COMPOUND"; }
case H5T_REFERENCE:
{LOG(FATAL) << "Unsupported datatype class: H5T_REFERENCE"; }
case H5T_ENUM:
{LOG(FATAL) << "Unsupported datatype class: H5T_ENUM"; }
case H5T_VLEN:
{LOG(FATAL) << "Unsupported datatype class: H5T_VLEN"; }
case H5T_ARRAY:
{LOG(FATAL) << "Unsupported datatype class: H5T_ARRAY"; }
default:
{LOG(FATAL) << "Datatype class unknown"; }
}
vector<int> blob_dims(dims.size());
for (int i = 0; i < dims.size(); ++i) {
blob_dims[i] = dims[i];
}
blob->Reshape(blob_dims);
}
template <>
void hdf5_load_nd_dataset<float>(hid_t file_id, const char* dataset_name_,
int min_dim, int max_dim, Blob<float>* blob) {
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_float(
file_id, dataset_name_, blob->mutable_cpu_data());
CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_;
}
template <>
void hdf5_load_nd_dataset<double>(hid_t file_id, const char* dataset_name_,
int min_dim, int max_dim, Blob<double>* blob) {
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_double(
file_id, dataset_name_, blob->mutable_cpu_data());
CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_;
}
template <>
void hdf5_save_nd_dataset<float>(
const hid_t file_id, const string& dataset_name, const Blob<float>& blob,
bool write_diff) {
int num_axes = blob.num_axes();
hsize_t *dims = new hsize_t[num_axes];
for (int i = 0; i < num_axes; ++i) {
dims[i] = blob.shape(i);
}
const float* data;
if (write_diff) {
data = blob.cpu_diff();
} else {
data = blob.cpu_data();
}
herr_t status = H5LTmake_dataset_float(
file_id, dataset_name.c_str(), num_axes, dims, data);
CHECK_GE(status, 0) << "Failed to make float dataset " << dataset_name;
delete[] dims;
}
template <>
void hdf5_save_nd_dataset<double>(
hid_t file_id, const string& dataset_name, const Blob<double>& blob,
bool write_diff) {
int num_axes = blob.num_axes();
hsize_t *dims = new hsize_t[num_axes];
for (int i = 0; i < num_axes; ++i) {
dims[i] = blob.shape(i);
}
const double* data;
if (write_diff) {
data = blob.cpu_diff();
} else {
data = blob.cpu_data();
}
herr_t status = H5LTmake_dataset_double(
file_id, dataset_name.c_str(), num_axes, dims, data);
CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name;
delete[] dims;
}
string hdf5_load_string(hid_t loc_id, const string& dataset_name) {
// Get size of dataset
size_t size;
H5T_class_t class_;
herr_t status = \
H5LTget_dataset_info(loc_id, dataset_name.c_str(), NULL, &class_, &size);
CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name;
char *buf = new char[size];
status = H5LTread_dataset_string(loc_id, dataset_name.c_str(), buf);
CHECK_GE(status, 0)
<< "Failed to load int dataset with name " << dataset_name;
string val(buf);
delete[] buf;
return val;
}
void hdf5_save_string(hid_t loc_id, const string& dataset_name,
const string& s) {
herr_t status = \
H5LTmake_dataset_string(loc_id, dataset_name.c_str(), s.c_str());
CHECK_GE(status, 0)
<< "Failed to save string dataset with name " << dataset_name;
}
int hdf5_load_int(hid_t loc_id, const string& dataset_name) {
int val;
herr_t status = H5LTread_dataset_int(loc_id, dataset_name.c_str(), &val);
CHECK_GE(status, 0)
<< "Failed to load int dataset with name " << dataset_name;
return val;
}
void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i) {
hsize_t one = 1;
herr_t status = \
H5LTmake_dataset_int(loc_id, dataset_name.c_str(), 1, &one, &i);
CHECK_GE(status, 0)
<< "Failed to save int dataset with name " << dataset_name;
}
int hdf5_get_num_links(hid_t loc_id) {
H5G_info_t info;
herr_t status = H5Gget_info(loc_id, &info);
CHECK_GE(status, 0) << "Error while counting HDF5 links.";
return info.nlinks;
}
string hdf5_get_name_by_idx(hid_t loc_id, int idx) {
ssize_t str_size = H5Lget_name_by_idx(
loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, NULL, 0, H5P_DEFAULT);
CHECK_GE(str_size, 0) << "Error retrieving HDF5 dataset at index " << idx;
char *c_str = new char[str_size+1];
ssize_t status = H5Lget_name_by_idx(
loc_id, ".", H5_INDEX_NAME, H5_ITER_NATIVE, idx, c_str, str_size+1,
H5P_DEFAULT);
CHECK_GE(status, 0) << "Error retrieving HDF5 dataset at index " << idx;
string result(c_str);
delete[] c_str;
return result;
}
} // namespace caffe
......@@ -323,6 +323,7 @@ MTCNN::MTCNN(const std::string &proto_model_dir){
#else
Caffe::set_mode(Caffe::GPU);
#endif
FLAGS_minloglevel = google::GLOG_ERROR;
/* Load the network. */
PNet_.reset(new Net<float>((proto_model_dir+"/det1.cfg"), TEST));
PNet_->CopyTrainedLayersFrom(proto_model_dir+"/det1.weights");
......
......@@ -875,7 +875,7 @@ void scanImages(){
for (directory_entry & x : directory_iterator(imageHome)) {
if (is_regular_file(x.path()) && x.path().has_extension()) {
string extName = x.path().extension().string();
if (extName.find("JPG") != -1 || extName.find("jpg") != -1) {
if (extName.find("JPG") != -1 || extName.find("jpg") != -1 || extName.find("jpeg") != -1) {
imgNames.push_back(x.path().filename().string());
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册