diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index 8437b2b21942ead544dab8636db1b355b7cf7bd5..02ca8a45a851d262eed6962a9a227b5009ef03a5 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -8,27 +8,6 @@ cc_library(paddle_fluid_api # Merge all modules into a simgle static library cc_library(paddle_fluid DEPS paddle_fluid_api ${FLUID_CORE_MODULES}) -# ptools -# just for testing, we may need to change the storing format for inference_model -# and move the dependent of pickle. -# download from http://www.picklingtools.com/ -# build in the C++ sub-directory, using command -# make -f Makefile.Linux libptools.so -set(PTOOLS_LIB) -set(PTOOLS_ROOT $ENV{PTOOLS_ROOT} CACHE PATH "Folder contains PicklingTools") -find_path(PTOOLS_INC_DIR chooseser.h PATHS ${PTOOLS_ROOT}/C++) -find_library(PTOOLS_SHARED_LIB NAMES ptools PATHS ${PTOOLS_ROOT}/C++) -if(PTOOLS_INC_DIR AND PTOOLS_SHARED_LIB) - add_definitions(-DPADDLE_USE_PTOOLS) - set(PTOOLS_LIB ptools) - message(STATUS "Found PicklingTools: ${PTOOLS_SHARED_LIB}") - add_library(${PTOOLS_LIB} SHARED IMPORTED GLOBAL) - set_property(TARGET ${PTOOLS_LIB} PROPERTY IMPORTED_LOCATION ${PTOOLS_SHARED_LIB}) - include_directories(${PTOOLS_ROOT}/C++) - include_directories(${PTOOLS_ROOT}/C++/opencontainers_1_8_5/include) - add_definitions(-DOC_NEW_STYLE_INCLUDES) # used in ptools -endif() - add_executable(example example.cc) if(APPLE) set(OPTIONAL_LINK_FLAGS) diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 4a8bc21b1cf0694dd27e7343432d37b94721fe1b..49001778808173b82865a4b6632a6b175ef96242 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -56,15 +56,6 @@ void InferenceEngine::LoadInferenceModel( const std::string& dirname, const std::vector& feed_var_names, const std::vector& fetch_var_names) { -#ifdef PADDLE_USE_PTOOLS - std::string model_filename = dirname + "/__model__"; - LOG(INFO) << "Using PicklingTools, loading model from " << model_filename; - Val v; - LoadValFromFile(model_filename.c_str(), v, SERIALIZE_P0); - std::string program_desc_str = v["program_desc_str"]; - LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); -// PicklingTools cannot parse the vector of strings correctly. -#else std::string model_filename = dirname + "/__model__.dat"; LOG(INFO) << "loading model from " << model_filename; std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); @@ -75,7 +66,7 @@ void InferenceEngine::LoadInferenceModel( LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); inputfs.read(&program_desc_str[0], program_desc_str.size()); inputfs.close(); -#endif + program_ = new framework::ProgramDesc(program_desc_str); GenerateLoadProgram(dirname); diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 1b17afbe3fd305717097171024bf3c08c73662e3..57f051985ca0b91a071ad2ef27c48a00b0b615ef 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -198,11 +198,10 @@ def prepend_feed_ops(inference_program, feeded_var_names): name='feed', type=core.VarDesc.VarType.FEED_MINIBATCH, persistable=True) for i, name in enumerate(feeded_var_names): - out = global_block.var(name) global_block.prepend_op( type='feed', inputs={'X': [feed_var]}, - outputs={'Out': [out]}, + outputs={'Out': [name]}, attrs={'col': i}) @@ -269,11 +268,11 @@ def save_inference_model(dirname, "fetch_var_names": fetch_var_names }, f, -1) - # Save only programDesc of inference_program in binary format - # in another file: __model__.dat prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) + # Save only programDesc of inference_program in binary format + # in another file: __model__.dat with open(model_file_name + ".dat", "wb") as fp: fp.write(inference_program.desc.serialize_to_string())