diff --git a/CMakeLists.txt b/CMakeLists.txt index 1abe068f0c6a2a54b4bcdcc675630859ef499c63..989f96e311ef19cd3a8722833884c15538e698ce 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,10 +11,10 @@ else() set(CMAKE_BUILD_TYPE Release) endif () + if(DEBUGING) message(STATUS "debuging") add_definitions(-DPADDLE_MOBILE_DEBUG) - else() message(STATUS "releasing") add_definitions(-fvisibility=hidden -fvisibility-inlines-hidden) @@ -25,7 +25,6 @@ if (USE_EXCEPTION) add_definitions(-fexceptions) else() add_definitions(-fno-exceptions) - endif () if(IS_MAC) @@ -119,7 +118,6 @@ else () add_definitions(-DTRANSPOSE_OP) endif() - add_library(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H}) if(DEBUGING) diff --git a/src/io/io.cpp b/src/io/io.cpp index 465f28bc8338ff4217b61db8ebdb5151e0210449..b37b3e248aac6839a268121ced5c9ee438cccc05 100644 --- a/src/io/io.cpp +++ b/src/io/io.cpp @@ -279,17 +279,14 @@ Executor::Executor(const framework::Program p, int batch_size, template void Executor::LoadMemory(const framework::VarDesc var_desc, - framework::LoDTensor *tensor, - const std::string &file_path, char *data) { + framework::LoDTensor *tensor, char *&data) { // 1. version uint32_t version = *(uint32_t *)data; data += sizeof(uint32_t); - DLOG << "version: " << version; // 2 Lod information uint64_t lod_level = *(uint64_t *)data; data += sizeof(uint64_t); - DLOG << "lod_level: " << lod_level; auto &lod = *tensor->mutable_lod(); lod.resize(lod_level); @@ -297,7 +294,6 @@ void Executor::LoadMemory(const framework::VarDesc var_desc, uint64_t size = *(uint64_t *)data; data += sizeof(uint64_t); DLOG << "lod size: " << i << size; - std::vector tmp(size / sizeof(size_t)); for (int k = 0; k < tmp.size(); ++k) { @@ -315,12 +311,10 @@ void Executor::LoadMemory(const framework::VarDesc var_desc, // 3. tensor version uint32_t tensor_version = *(uint32_t *)data; data += sizeof(uint32_t); - DLOG << "tensor_version: " << tensor_version; // 4. tensor desc int32_t size = *(int32_t *)data; data += sizeof(int32_t); - DLOG << "tensor desc size: " << size; std::unique_ptr buf(new char[size]); for (int m = 0; m < size; ++m) { @@ -344,7 +338,6 @@ void Executor::LoadMemory(const framework::VarDesc var_desc, break; case framework::VARTYPE_TYPE_FP32: type_size = 4; - DLOG << " type size: " << type_size; memory = tensor->mutable_data(); break; case framework::VARTYPE_TYPE_FP64: @@ -382,8 +375,8 @@ void Executor::InitMemory() { char *origin_data = Get_binary_data(program_.model_path + "/" + var_desc->Name()); - LoadMemory(*var_desc, tensor, - program_.model_path + "/" + var_desc->Name(), origin_data); + char *data = origin_data; + LoadMemory(*var_desc, tensor, data); delete origin_data; } else { if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { @@ -399,7 +392,7 @@ void Executor::InitMemory() { template void Executor::InitCombineMemory() { char *origin_data = Get_binary_data(program_.para_path); - + char *data = origin_data; for (const auto &block : to_predict_program_->Blocks()) { for (const auto &var_desc : block->Vars()) { auto var = program_.scope->Var(var_desc->Name()); @@ -408,18 +401,15 @@ void Executor::InitCombineMemory() { if (var_desc->Name() == "feed" || var_desc->Name() == "fetch") { continue; } - LoadMemory(*var_desc, tensor, - program_.model_path + "/" + var_desc->Name(), origin_data); + LoadMemory(*var_desc, tensor, data); } else { if (var_desc->Type() == framework::VARTYPE_TYPE_LOD_TENSOR) { auto tensor = var->template GetMutable(); - tensor->template mutable_data(); } } } } - delete origin_data; } diff --git a/src/io/io.h b/src/io/io.h index f89ccc8b2efeb755d3386e224ee61557642e03e9..2eaa94d0a63eadfc1a41761a71b2ea31a0fc7b07 100644 --- a/src/io/io.h +++ b/src/io/io.h @@ -63,8 +63,7 @@ class Executor { void InitMemory(); void LoadMemory(const framework::VarDesc var_desc, - framework::LoDTensor *tensor, const std::string &file_path, - char *data); + framework::LoDTensor *tensor, char *&data); void InitCombineMemory(); framework::Program program_; int batch_size_ = 1; diff --git a/test/net/test_googlenet.cpp b/test/net/test_googlenet.cpp index 210f9643a165909df9bbade5b842c7af9840cce3..ab4fd2fe0d1eaaa58fabc38fbf512a0b860c36f0 100644 --- a/test/net/test_googlenet.cpp +++ b/test/net/test_googlenet.cpp @@ -20,10 +20,9 @@ int main() { paddle_mobile::Loader loader; bool optimize = false; auto time1 = time(); - - auto program = loader.Load(g_googlenet, optimize); - // auto program = loader.Load(g_googlenet_combine + "/model", - // g_googlenet_combine + "/params", optimize); + // auto program = loader.Load(g_googlenet, optimize); + auto program = loader.Load(g_googlenet_combine + "/model", + g_googlenet_combine + "/params", optimize); auto time2 = time(); DLOG << "load cost :" << time_diff(time1, time2) << "ms\n"; paddle_mobile::Executor executor(program, 1, optimize);