diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 48a51efcd25afeb499ab9389c2f27c8bd2515cd8..49e39358e81bbee64a618be88ee0fca6aa438b93 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -38,23 +38,16 @@ void InferenceEngine::LoadInferenceModel( LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); // PicklingTools cannot parse the vector of strings correctly. #else - // program_desc_str - // the inference.model is stored by following python codes: - // inference_program = fluid.io.get_inference_program(predict) - // model_filename = "recognize_digits_mlp.inference.model/inference.model" - // with open(model_filename, "w") as f: - // program_str = inference_program.desc.serialize_to_string() - // f.write(struct.pack('q', len(program_str))) - // f.write(program_str) - std::string model_filename = dirname + "/inference.model"; + std::string model_filename = dirname + "/__model__.dat"; LOG(INFO) << "loading model from " << model_filename; - std::ifstream fs(model_filename, std::ios_base::binary); - int64_t size = 0; - fs.read(reinterpret_cast(&size), sizeof(int64_t)); - LOG(INFO) << "program_desc_str's size: " << size; + std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); std::string program_desc_str; - program_desc_str.resize(size); - fs.read(&program_desc_str[0], size); + inputfs.seekg(0, std::ios::end); + program_desc_str.resize(inputfs.tellg()); + inputfs.seekg(0, std::ios::beg); + LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); + inputfs.read(&program_desc_str[0], program_desc_str.size()); + inputfs.close(); #endif program_ = new framework::ProgramDesc(program_desc_str); GenerateLoadProgram(dirname); diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 926327b70c70d250766c2640d808bb3e3516d37b..c63567601accd8c072368351f2838857bb61c818 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -212,6 +212,11 @@ def save_inference_model(dirname, "fetch_var_names": fetch_var_names }, f, -1) + # Save only programDesc of inference_program in binary format + # in another file: __model__.dat + with open(model_file_name + ".dat", "wb") as fp: + fp.write(inference_program.desc.serialize_to_string()) + save_params(executor, dirname, main_program)